Update with commit 8d73a38606bc342b370afe1f42718b4828d95aaa
Browse filesSee: https://github.com/huggingface/transformers/commit/8d73a38606bc342b370afe1f42718b4828d95aaa
- frameworks.json +1 -0
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
@@ -44,6 +44,7 @@
|
|
44 |
{"model_type":"cpmant","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
45 |
{"model_type":"ctrl","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
46 |
{"model_type":"cvt","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoImageProcessor"}
|
|
|
47 |
{"model_type":"dac","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
48 |
{"model_type":"data2vec-audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
49 |
{"model_type":"data2vec-text","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
44 |
{"model_type":"cpmant","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
45 |
{"model_type":"ctrl","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
46 |
{"model_type":"cvt","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoImageProcessor"}
|
47 |
+
{"model_type":"dab-detr","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
48 |
{"model_type":"dac","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
49 |
{"model_type":"data2vec-audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
50 |
{"model_type":"data2vec-text","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
@@ -126,6 +126,8 @@
|
|
126 |
{"model_class":"DPRQuestionEncoder","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
127 |
{"model_class":"DPTForDepthEstimation","pipeline_tag":"depth-estimation","auto_class":"AutoModelForDepthEstimation"}
|
128 |
{"model_class":"DPTModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
|
|
|
|
129 |
{"model_class":"DacModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
130 |
{"model_class":"Data2VecAudioForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
|
131 |
{"model_class":"Data2VecAudioForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
|
|
|
126 |
{"model_class":"DPRQuestionEncoder","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
127 |
{"model_class":"DPTForDepthEstimation","pipeline_tag":"depth-estimation","auto_class":"AutoModelForDepthEstimation"}
|
128 |
{"model_class":"DPTModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
129 |
+
{"model_class":"DabDetrForObjectDetection","pipeline_tag":"object-detection","auto_class":"AutoModelForObjectDetection"}
|
130 |
+
{"model_class":"DabDetrModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
131 |
{"model_class":"DacModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
132 |
{"model_class":"Data2VecAudioForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
|
133 |
{"model_class":"Data2VecAudioForCTC","pipeline_tag":"automatic-speech-recognition","auto_class":"AutoModelForCTC"}
|