Ubuntu commited on
Commit
8bb2147
1 Parent(s): 857379d

modified: README.md

Browse files

new file: pipelines.ts
new file: widget-example.ts

Files changed (3) hide show
  1. README.md +1 -3
  2. pipelines.ts +675 -0
  3. widget-example.ts +125 -0
README.md CHANGED
@@ -9,8 +9,6 @@ model-index:
9
  results: []
10
  ---
11
 
12
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
- should probably proofread and complete it, then remove this comment. -->
14
 
15
  [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
16
  <details><summary>See axolotl config</summary>
@@ -152,4 +150,4 @@ The following hyperparameters were used during training:
152
  - Transformers 4.39.0.dev0
153
  - Pytorch 2.1.2+cu118
154
  - Datasets 2.18.0
155
- - Tokenizers 0.15.0
 
9
  results: []
10
  ---
11
 
 
 
12
 
13
  [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
14
  <details><summary>See axolotl config</summary>
 
150
  - Transformers 4.39.0.dev0
151
  - Pytorch 2.1.2+cu118
152
  - Datasets 2.18.0
153
+ - Tokenizers 0.15.0
pipelines.ts ADDED
@@ -0,0 +1,675 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export const MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"] as const;
2
+
3
+ export type Modality = (typeof MODALITIES)[number];
4
+
5
+ export const MODALITY_LABELS = {
6
+ multimodal: "Multimodal",
7
+ nlp: "Natural Language Processing",
8
+ audio: "Audio",
9
+ cv: "Computer Vision",
10
+ rl: "Reinforcement Learning",
11
+ tabular: "Tabular",
12
+ other: "Other",
13
+ } satisfies Record<Modality, string>;
14
+
15
+ /**
16
+ * Public interface for a sub task.
17
+ *
18
+ * This can be used in a model card's `model-index` metadata.
19
+ * and is more granular classification that can grow significantly
20
+ * over time as new tasks are added.
21
+ */
22
+ export interface SubTask {
23
+ /**
24
+ * type of the task (e.g. audio-source-separation)
25
+ */
26
+ type: string;
27
+ /**
28
+ * displayed name of the task (e.g. Audio Source Separation)
29
+ */
30
+ name: string;
31
+ }
32
+
33
+ /**
34
+ * Public interface for a PipelineData.
35
+ *
36
+ * This information corresponds to a pipeline type (aka task)
37
+ * in the Hub.
38
+ */
39
+ export interface PipelineData {
40
+ /**
41
+ * displayed name of the task (e.g. Text Classification)
42
+ */
43
+ name: string;
44
+ subtasks?: SubTask[];
45
+ modality: Modality;
46
+ /**
47
+ * color for the tag icon.
48
+ */
49
+ color: "blue" | "green" | "indigo" | "orange" | "red" | "yellow";
50
+ /**
51
+ * whether to hide in /models filters
52
+ */
53
+ hideInModels?: boolean;
54
+ /**
55
+ * whether to hide in /datasets filters
56
+ */
57
+ hideInDatasets?: boolean;
58
+ }
59
+
60
+ /// Coarse-grained taxonomy of tasks
61
+ ///
62
+ /// This type is used in multiple places in the Hugging Face
63
+ /// ecosystem:
64
+ /// - To determine which widget to show.
65
+ /// - To determine which endpoint of Inference Endpoints to use.
66
+ /// - As filters at the left of models and datasets page.
67
+ ///
68
+ /// Note that this is sensitive to order.
69
+ /// For each domain, the order should be of decreasing specificity.
70
+ /// This will impact the default pipeline tag of a model when not
71
+ /// specified.
72
+ export const PIPELINE_DATA = {
73
+ "text-classification": {
74
+ name: "Text Classification",
75
+ subtasks: [
76
+ {
77
+ type: "acceptability-classification",
78
+ name: "Acceptability Classification",
79
+ },
80
+ {
81
+ type: "entity-linking-classification",
82
+ name: "Entity Linking Classification",
83
+ },
84
+ {
85
+ type: "fact-checking",
86
+ name: "Fact Checking",
87
+ },
88
+ {
89
+ type: "intent-classification",
90
+ name: "Intent Classification",
91
+ },
92
+ {
93
+ type: "language-identification",
94
+ name: "Language Identification",
95
+ },
96
+ {
97
+ type: "multi-class-classification",
98
+ name: "Multi Class Classification",
99
+ },
100
+ {
101
+ type: "multi-label-classification",
102
+ name: "Multi Label Classification",
103
+ },
104
+ {
105
+ type: "multi-input-text-classification",
106
+ name: "Multi-input Text Classification",
107
+ },
108
+ {
109
+ type: "natural-language-inference",
110
+ name: "Natural Language Inference",
111
+ },
112
+ {
113
+ type: "semantic-similarity-classification",
114
+ name: "Semantic Similarity Classification",
115
+ },
116
+ {
117
+ type: "sentiment-classification",
118
+ name: "Sentiment Classification",
119
+ },
120
+ {
121
+ type: "topic-classification",
122
+ name: "Topic Classification",
123
+ },
124
+ {
125
+ type: "semantic-similarity-scoring",
126
+ name: "Semantic Similarity Scoring",
127
+ },
128
+ {
129
+ type: "sentiment-scoring",
130
+ name: "Sentiment Scoring",
131
+ },
132
+ {
133
+ type: "sentiment-analysis",
134
+ name: "Sentiment Analysis",
135
+ },
136
+ {
137
+ type: "hate-speech-detection",
138
+ name: "Hate Speech Detection",
139
+ },
140
+ {
141
+ type: "text-scoring",
142
+ name: "Text Scoring",
143
+ },
144
+ ],
145
+ modality: "nlp",
146
+ color: "orange",
147
+ },
148
+ "token-classification": {
149
+ name: "Token Classification",
150
+ subtasks: [
151
+ {
152
+ type: "named-entity-recognition",
153
+ name: "Named Entity Recognition",
154
+ },
155
+ {
156
+ type: "part-of-speech",
157
+ name: "Part of Speech",
158
+ },
159
+ {
160
+ type: "parsing",
161
+ name: "Parsing",
162
+ },
163
+ {
164
+ type: "lemmatization",
165
+ name: "Lemmatization",
166
+ },
167
+ {
168
+ type: "word-sense-disambiguation",
169
+ name: "Word Sense Disambiguation",
170
+ },
171
+ {
172
+ type: "coreference-resolution",
173
+ name: "Coreference-resolution",
174
+ },
175
+ ],
176
+ modality: "nlp",
177
+ color: "blue",
178
+ },
179
+ "table-question-answering": {
180
+ name: "Table Question Answering",
181
+ modality: "nlp",
182
+ color: "green",
183
+ },
184
+ "question-answering": {
185
+ name: "Question Answering",
186
+ subtasks: [
187
+ {
188
+ type: "extractive-qa",
189
+ name: "Extractive QA",
190
+ },
191
+ {
192
+ type: "open-domain-qa",
193
+ name: "Open Domain QA",
194
+ },
195
+ {
196
+ type: "closed-domain-qa",
197
+ name: "Closed Domain QA",
198
+ },
199
+ ],
200
+ modality: "nlp",
201
+ color: "blue",
202
+ },
203
+ "zero-shot-classification": {
204
+ name: "Zero-Shot Classification",
205
+ modality: "nlp",
206
+ color: "yellow",
207
+ },
208
+ translation: {
209
+ name: "Translation",
210
+ modality: "nlp",
211
+ color: "green",
212
+ },
213
+ summarization: {
214
+ name: "Summarization",
215
+ subtasks: [
216
+ {
217
+ type: "news-articles-summarization",
218
+ name: "News Articles Summarization",
219
+ },
220
+ {
221
+ type: "news-articles-headline-generation",
222
+ name: "News Articles Headline Generation",
223
+ },
224
+ ],
225
+ modality: "nlp",
226
+ color: "indigo",
227
+ },
228
+ "feature-extraction": {
229
+ name: "Feature Extraction",
230
+ modality: "nlp",
231
+ color: "red",
232
+ },
233
+ "text-generation": {
234
+ name: "Text Generation",
235
+ subtasks: [
236
+ {
237
+ type: "dialogue-modeling",
238
+ name: "Dialogue Modeling",
239
+ },
240
+ {
241
+ type: "dialogue-generation",
242
+ name: "Dialogue Generation",
243
+ },
244
+ {
245
+ type: "conversational",
246
+ name: "Conversational",
247
+ },
248
+ {
249
+ type: "language-modeling",
250
+ name: "Language Modeling",
251
+ },
252
+ ],
253
+ modality: "nlp",
254
+ color: "indigo",
255
+ },
256
+ "text2text-generation": {
257
+ name: "Text2Text Generation",
258
+ subtasks: [
259
+ {
260
+ type: "text-simplification",
261
+ name: "Text simplification",
262
+ },
263
+ {
264
+ type: "explanation-generation",
265
+ name: "Explanation Generation",
266
+ },
267
+ {
268
+ type: "abstractive-qa",
269
+ name: "Abstractive QA",
270
+ },
271
+ {
272
+ type: "open-domain-abstractive-qa",
273
+ name: "Open Domain Abstractive QA",
274
+ },
275
+ {
276
+ type: "closed-domain-qa",
277
+ name: "Closed Domain QA",
278
+ },
279
+ {
280
+ type: "open-book-qa",
281
+ name: "Open Book QA",
282
+ },
283
+ {
284
+ type: "closed-book-qa",
285
+ name: "Closed Book QA",
286
+ },
287
+ ],
288
+ modality: "nlp",
289
+ color: "indigo",
290
+ },
291
+ "fill-mask": {
292
+ name: "Fill-Mask",
293
+ subtasks: [
294
+ {
295
+ type: "slot-filling",
296
+ name: "Slot Filling",
297
+ },
298
+ {
299
+ type: "masked-language-modeling",
300
+ name: "Masked Language Modeling",
301
+ },
302
+ ],
303
+ modality: "nlp",
304
+ color: "red",
305
+ },
306
+ "sentence-similarity": {
307
+ name: "Sentence Similarity",
308
+ modality: "nlp",
309
+ color: "yellow",
310
+ },
311
+ "text-to-speech": {
312
+ name: "Text-to-Speech",
313
+ modality: "audio",
314
+ color: "yellow",
315
+ },
316
+ "text-to-audio": {
317
+ name: "Text-to-Audio",
318
+ modality: "audio",
319
+ color: "yellow",
320
+ },
321
+ "automatic-speech-recognition": {
322
+ name: "Automatic Speech Recognition",
323
+ modality: "audio",
324
+ color: "yellow",
325
+ },
326
+ "audio-to-audio": {
327
+ name: "Audio-to-Audio",
328
+ modality: "audio",
329
+ color: "blue",
330
+ },
331
+ "audio-classification": {
332
+ name: "Audio Classification",
333
+ subtasks: [
334
+ {
335
+ type: "keyword-spotting",
336
+ name: "Keyword Spotting",
337
+ },
338
+ {
339
+ type: "speaker-identification",
340
+ name: "Speaker Identification",
341
+ },
342
+ {
343
+ type: "audio-intent-classification",
344
+ name: "Audio Intent Classification",
345
+ },
346
+ {
347
+ type: "audio-emotion-recognition",
348
+ name: "Audio Emotion Recognition",
349
+ },
350
+ {
351
+ type: "audio-language-identification",
352
+ name: "Audio Language Identification",
353
+ },
354
+ ],
355
+ modality: "audio",
356
+ color: "green",
357
+ },
358
+ "voice-activity-detection": {
359
+ name: "Voice Activity Detection",
360
+ modality: "audio",
361
+ color: "red",
362
+ },
363
+ "depth-estimation": {
364
+ name: "Depth Estimation",
365
+ modality: "cv",
366
+ color: "yellow",
367
+ },
368
+ "image-classification": {
369
+ name: "Image Classification",
370
+ subtasks: [
371
+ {
372
+ type: "multi-label-image-classification",
373
+ name: "Multi Label Image Classification",
374
+ },
375
+ {
376
+ type: "multi-class-image-classification",
377
+ name: "Multi Class Image Classification",
378
+ },
379
+ ],
380
+ modality: "cv",
381
+ color: "blue",
382
+ },
383
+ "object-detection": {
384
+ name: "Object Detection",
385
+ subtasks: [
386
+ {
387
+ type: "face-detection",
388
+ name: "Face Detection",
389
+ },
390
+ {
391
+ type: "vehicle-detection",
392
+ name: "Vehicle Detection",
393
+ },
394
+ ],
395
+ modality: "cv",
396
+ color: "yellow",
397
+ },
398
+ "image-segmentation": {
399
+ name: "Image Segmentation",
400
+ subtasks: [
401
+ {
402
+ type: "instance-segmentation",
403
+ name: "Instance Segmentation",
404
+ },
405
+ {
406
+ type: "semantic-segmentation",
407
+ name: "Semantic Segmentation",
408
+ },
409
+ {
410
+ type: "panoptic-segmentation",
411
+ name: "Panoptic Segmentation",
412
+ },
413
+ ],
414
+ modality: "cv",
415
+ color: "green",
416
+ },
417
+ "text-to-image": {
418
+ name: "Text-to-Image",
419
+ modality: "cv",
420
+ color: "yellow",
421
+ },
422
+ "image-to-text": {
423
+ name: "Image-to-Text",
424
+ subtasks: [
425
+ {
426
+ type: "image-captioning",
427
+ name: "Image Captioning",
428
+ },
429
+ ],
430
+ modality: "cv",
431
+ color: "red",
432
+ },
433
+ "image-to-image": {
434
+ name: "Image-to-Image",
435
+ subtasks: [
436
+ {
437
+ type: "image-inpainting",
438
+ name: "Image Inpainting",
439
+ },
440
+ {
441
+ type: "image-colorization",
442
+ name: "Image Colorization",
443
+ },
444
+ {
445
+ type: "super-resolution",
446
+ name: "Super Resolution",
447
+ },
448
+ ],
449
+ modality: "cv",
450
+ color: "indigo",
451
+ },
452
+ "image-to-video": {
453
+ name: "Image-to-Video",
454
+ modality: "cv",
455
+ color: "indigo",
456
+ },
457
+ "unconditional-image-generation": {
458
+ name: "Unconditional Image Generation",
459
+ modality: "cv",
460
+ color: "green",
461
+ },
462
+ "video-classification": {
463
+ name: "Video Classification",
464
+ modality: "cv",
465
+ color: "blue",
466
+ },
467
+ "reinforcement-learning": {
468
+ name: "Reinforcement Learning",
469
+ modality: "rl",
470
+ color: "red",
471
+ },
472
+ robotics: {
473
+ name: "Robotics",
474
+ modality: "rl",
475
+ subtasks: [
476
+ {
477
+ type: "grasping",
478
+ name: "Grasping",
479
+ },
480
+ {
481
+ type: "task-planning",
482
+ name: "Task Planning",
483
+ },
484
+ ],
485
+ color: "blue",
486
+ },
487
+ "tabular-classification": {
488
+ name: "Tabular Classification",
489
+ modality: "tabular",
490
+ subtasks: [
491
+ {
492
+ type: "tabular-multi-class-classification",
493
+ name: "Tabular Multi Class Classification",
494
+ },
495
+ {
496
+ type: "tabular-multi-label-classification",
497
+ name: "Tabular Multi Label Classification",
498
+ },
499
+ ],
500
+ color: "blue",
501
+ },
502
+ "tabular-regression": {
503
+ name: "Tabular Regression",
504
+ modality: "tabular",
505
+ subtasks: [
506
+ {
507
+ type: "tabular-single-column-regression",
508
+ name: "Tabular Single Column Regression",
509
+ },
510
+ ],
511
+ color: "blue",
512
+ },
513
+ "tabular-to-text": {
514
+ name: "Tabular to Text",
515
+ modality: "tabular",
516
+ subtasks: [
517
+ {
518
+ type: "rdf-to-text",
519
+ name: "RDF to text",
520
+ },
521
+ ],
522
+ color: "blue",
523
+ hideInModels: true,
524
+ },
525
+ "table-to-text": {
526
+ name: "Table to Text",
527
+ modality: "nlp",
528
+ color: "blue",
529
+ hideInModels: true,
530
+ },
531
+ "multiple-choice": {
532
+ name: "Multiple Choice",
533
+ subtasks: [
534
+ {
535
+ type: "multiple-choice-qa",
536
+ name: "Multiple Choice QA",
537
+ },
538
+ {
539
+ type: "multiple-choice-coreference-resolution",
540
+ name: "Multiple Choice Coreference Resolution",
541
+ },
542
+ ],
543
+ modality: "nlp",
544
+ color: "blue",
545
+ hideInModels: true,
546
+ },
547
+ "text-retrieval": {
548
+ name: "Text Retrieval",
549
+ subtasks: [
550
+ {
551
+ type: "document-retrieval",
552
+ name: "Document Retrieval",
553
+ },
554
+ {
555
+ type: "utterance-retrieval",
556
+ name: "Utterance Retrieval",
557
+ },
558
+ {
559
+ type: "entity-linking-retrieval",
560
+ name: "Entity Linking Retrieval",
561
+ },
562
+ {
563
+ type: "fact-checking-retrieval",
564
+ name: "Fact Checking Retrieval",
565
+ },
566
+ ],
567
+ modality: "nlp",
568
+ color: "indigo",
569
+ hideInModels: true,
570
+ },
571
+ "time-series-forecasting": {
572
+ name: "Time Series Forecasting",
573
+ modality: "tabular",
574
+ subtasks: [
575
+ {
576
+ type: "univariate-time-series-forecasting",
577
+ name: "Univariate Time Series Forecasting",
578
+ },
579
+ {
580
+ type: "multivariate-time-series-forecasting",
581
+ name: "Multivariate Time Series Forecasting",
582
+ },
583
+ ],
584
+ color: "blue",
585
+ hideInModels: true,
586
+ },
587
+ "text-to-video": {
588
+ name: "Text-to-Video",
589
+ modality: "cv",
590
+ color: "green",
591
+ },
592
+ "image-text-to-text": {
593
+ name: "Image-Text-to-Text",
594
+ modality: "multimodal",
595
+ color: "red",
596
+ hideInDatasets: true,
597
+ },
598
+ "visual-question-answering": {
599
+ name: "Visual Question Answering",
600
+ subtasks: [
601
+ {
602
+ type: "visual-question-answering",
603
+ name: "Visual Question Answering",
604
+ },
605
+ ],
606
+ modality: "multimodal",
607
+ color: "red",
608
+ },
609
+ "document-question-answering": {
610
+ name: "Document Question Answering",
611
+ subtasks: [
612
+ {
613
+ type: "document-question-answering",
614
+ name: "Document Question Answering",
615
+ },
616
+ ],
617
+ modality: "multimodal",
618
+ color: "blue",
619
+ hideInDatasets: true,
620
+ },
621
+ "zero-shot-image-classification": {
622
+ name: "Zero-Shot Image Classification",
623
+ modality: "cv",
624
+ color: "yellow",
625
+ },
626
+ "graph-ml": {
627
+ name: "Graph Machine Learning",
628
+ modality: "other",
629
+ color: "green",
630
+ },
631
+ "mask-generation": {
632
+ name: "Mask Generation",
633
+ modality: "cv",
634
+ color: "indigo",
635
+ },
636
+ "zero-shot-object-detection": {
637
+ name: "Zero-Shot Object Detection",
638
+ modality: "cv",
639
+ color: "yellow",
640
+ },
641
+ "text-to-3d": {
642
+ name: "Text-to-3D",
643
+ modality: "cv",
644
+ color: "yellow",
645
+ },
646
+ "image-to-3d": {
647
+ name: "Image-to-3D",
648
+ modality: "cv",
649
+ color: "green",
650
+ },
651
+ "image-feature-extraction": {
652
+ name: "Image Feature Extraction",
653
+ modality: "cv",
654
+ color: "indigo",
655
+ },
656
+ other: {
657
+ name: "Other",
658
+ modality: "other",
659
+ color: "blue",
660
+ hideInModels: true,
661
+ hideInDatasets: true,
662
+ },
663
+ } satisfies Record<string, PipelineData>;
664
+
665
+ export type PipelineType = keyof typeof PIPELINE_DATA;
666
+
667
+ export type WidgetType = PipelineType | "conversational";
668
+
669
+ export const PIPELINE_TYPES = Object.keys(PIPELINE_DATA) as PipelineType[];
670
+
671
+ export const SUBTASK_TYPES = Object.values(PIPELINE_DATA)
672
+ .flatMap((data) => ("subtasks" in data ? data.subtasks : []))
673
+ .map((s) => s.type);
674
+
675
+ export const PIPELINE_TYPES_SET = new Set(PIPELINE_TYPES);
widget-example.ts ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ type TableData = Record<string, (string | number)[]>;
3
+
4
+ //#region outputs
5
+ export type WidgetExampleOutputLabels = Array<{ label: string; score: number }>;
6
+ export interface WidgetExampleOutputAnswerScore {
7
+ answer: string;
8
+ score: number;
9
+ }
10
+ export interface WidgetExampleOutputText {
11
+ text: string;
12
+ }
13
+ export interface WidgetExampleOutputUrl {
14
+ url: string;
15
+ }
16
+
17
+ export type WidgetExampleOutput =
18
+ | WidgetExampleOutputLabels
19
+ | WidgetExampleOutputAnswerScore
20
+ | WidgetExampleOutputText
21
+ | WidgetExampleOutputUrl;
22
+ //#endregion
23
+
24
+ export interface WidgetExampleBase<TOutput> {
25
+ example_title?: string;
26
+ group?: string;
27
+ /**
28
+ * Potential overrides to API parameters for this specific example
29
+ * (takes precedences over the model card metadata's inference.parameters)
30
+ */
31
+ parameters?: {
32
+ /// token-classification
33
+ aggregation_strategy?: string;
34
+ /// text-generation
35
+ top_k?: number;
36
+ top_p?: number;
37
+ temperature?: number;
38
+ max_new_tokens?: number;
39
+ do_sample?: boolean;
40
+ /// text-to-image
41
+ negative_prompt?: string;
42
+ guidance_scale?: number;
43
+ num_inference_steps?: number;
44
+ };
45
+ /**
46
+ * Optional output
47
+ */
48
+ output?: TOutput;
49
+ }
50
+
51
+ export interface ChatMessage {
52
+ role: "user" | "assistant" | "system";
53
+ content: string;
54
+ }
55
+
56
+ export interface WidgetExampleChatInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
57
+ messages: ChatMessage[];
58
+ }
59
+
60
+ export interface WidgetExampleTextInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
61
+ text: string;
62
+ }
63
+
64
+ export interface WidgetExampleTextAndContextInput<TOutput = WidgetExampleOutput>
65
+ extends WidgetExampleTextInput<TOutput> {
66
+ context: string;
67
+ }
68
+
69
+ export interface WidgetExampleTextAndTableInput<TOutput = WidgetExampleOutput> extends WidgetExampleTextInput<TOutput> {
70
+ table: TableData;
71
+ }
72
+
73
+ export interface WidgetExampleAssetInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
74
+ src: string;
75
+ }
76
+ export interface WidgetExampleAssetAndPromptInput<TOutput = WidgetExampleOutput>
77
+ extends WidgetExampleAssetInput<TOutput> {
78
+ prompt: string;
79
+ }
80
+
81
+ export type WidgetExampleAssetAndTextInput<TOutput = WidgetExampleOutput> = WidgetExampleAssetInput<TOutput> &
82
+ WidgetExampleTextInput<TOutput>;
83
+
84
+ export type WidgetExampleAssetAndZeroShotInput<TOutput = WidgetExampleOutput> = WidgetExampleAssetInput<TOutput> &
85
+ WidgetExampleZeroShotTextInput<TOutput>;
86
+
87
+ export interface WidgetExampleStructuredDataInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
88
+ structured_data: TableData;
89
+ }
90
+
91
+ export interface WidgetExampleTableDataInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
92
+ table: TableData;
93
+ }
94
+
95
+ export interface WidgetExampleZeroShotTextInput<TOutput = WidgetExampleOutput> extends WidgetExampleTextInput<TOutput> {
96
+ text: string;
97
+ candidate_labels: string;
98
+ multi_class: boolean;
99
+ }
100
+
101
+ export interface WidgetExampleSentenceSimilarityInput<TOutput = WidgetExampleOutput>
102
+ extends WidgetExampleBase<TOutput> {
103
+ source_sentence: string;
104
+ sentences: string[];
105
+ }
106
+
107
+ //#endregion
108
+
109
+ export type WidgetExample<TOutput = WidgetExampleOutput> =
110
+ | WidgetExampleChatInput<TOutput>
111
+ | WidgetExampleTextInput<TOutput>
112
+ | WidgetExampleTextAndContextInput<TOutput>
113
+ | WidgetExampleTextAndTableInput<TOutput>
114
+ | WidgetExampleAssetInput<TOutput>
115
+ | WidgetExampleAssetAndPromptInput<TOutput>
116
+ | WidgetExampleAssetAndTextInput<TOutput>
117
+ | WidgetExampleAssetAndZeroShotInput<TOutput>
118
+ | WidgetExampleStructuredDataInput<TOutput>
119
+ | WidgetExampleTableDataInput<TOutput>
120
+ | WidgetExampleZeroShotTextInput<TOutput>
121
+ | WidgetExampleSentenceSimilarityInput<TOutput>;
122
+
123
+ type KeysOfUnion<T> = T extends unknown ? keyof T : never;
124
+
125
+ export type WidgetExampleAttribute = KeysOfUnion<WidgetExample>;