Update with commit 281eeef1bbe7442646892059f2d13ff27c03f4a0
Browse filesSee: https://github.com/huggingface/transformers/commit/281eeef1bbe7442646892059f2d13ff27c03f4a0
- frameworks.json +1 -0
- pipeline_tags.json +1 -0
frameworks.json
CHANGED
|
@@ -385,6 +385,7 @@
|
|
| 385 |
{"model_type":"univnet","pytorch":true,"processor":"AutoFeatureExtractor"}
|
| 386 |
{"model_type":"upernet","pytorch":true,"processor":"AutoImageProcessor"}
|
| 387 |
{"model_type":"vaultgemma","pytorch":true,"processor":"AutoTokenizer"}
|
|
|
|
| 388 |
{"model_type":"video_llama_3","pytorch":true,"processor":"AutoImageProcessor"}
|
| 389 |
{"model_type":"video_llama_3_vision","pytorch":true,"processor":"AutoTokenizer"}
|
| 390 |
{"model_type":"video_llava","pytorch":true,"processor":"AutoProcessor"}
|
|
|
|
| 385 |
{"model_type":"univnet","pytorch":true,"processor":"AutoFeatureExtractor"}
|
| 386 |
{"model_type":"upernet","pytorch":true,"processor":"AutoImageProcessor"}
|
| 387 |
{"model_type":"vaultgemma","pytorch":true,"processor":"AutoTokenizer"}
|
| 388 |
+
{"model_type":"vibevoice_acoustic_tokenizer","pytorch":true,"processor":"AutoFeatureExtractor"}
|
| 389 |
{"model_type":"video_llama_3","pytorch":true,"processor":"AutoImageProcessor"}
|
| 390 |
{"model_type":"video_llama_3_vision","pytorch":true,"processor":"AutoTokenizer"}
|
| 391 |
{"model_type":"video_llava","pytorch":true,"processor":"AutoProcessor"}
|
pipeline_tags.json
CHANGED
|
@@ -1471,6 +1471,7 @@
|
|
| 1471 |
{"model_class":"ViTMSNForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
| 1472 |
{"model_class":"ViTMSNModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
| 1473 |
{"model_class":"ViTModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
|
|
|
| 1474 |
{"model_class":"VideoLlama3ForConditionalGeneration","pipeline_tag":"image-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 1475 |
{"model_class":"VideoLlama3Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 1476 |
{"model_class":"VideoLlama3VisionModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
| 1471 |
{"model_class":"ViTMSNForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
| 1472 |
{"model_class":"ViTMSNModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
| 1473 |
{"model_class":"ViTModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
| 1474 |
+
{"model_class":"VibeVoiceAcousticTokenizerModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 1475 |
{"model_class":"VideoLlama3ForConditionalGeneration","pipeline_tag":"image-to-text","auto_class":"AutoModelForImageTextToText"}
|
| 1476 |
{"model_class":"VideoLlama3Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
| 1477 |
{"model_class":"VideoLlama3VisionModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|