Upload 2025-03-24/ci_results_run_pipelines_torch_gpu/torch_pipeline_results.json with huggingface_hub
Browse files
2025-03-24/ci_results_run_pipelines_torch_gpu/torch_pipeline_results.json
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"failed": {
|
3 |
+
"unclassified": 0,
|
4 |
+
"single": 0,
|
5 |
+
"multi": 36
|
6 |
+
},
|
7 |
+
"success": 232,
|
8 |
+
"time_spent": "0:14:43, ",
|
9 |
+
"error": false,
|
10 |
+
"failures": {
|
11 |
+
"multi": [
|
12 |
+
{
|
13 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_return_timestamps_and_language_in_preprocess",
|
14 |
+
"trace": "(line 675) AssertionError: {'tex[77 chars]ge': None, 'text': ' Conquered returned to its[23 chars].'}]} != {'tex[77 chars]ge': 'english', 'text': ' Conquered returned t[28 chars].'}]}"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_return_timestamps_in_preprocess",
|
18 |
+
"trace": "(line 675) AssertionError: {'tex[379 chars] (2.8, 2.98)}, {'text': ' tents.', 'timestamp': (2.98, None)}]} != {'tex[379 chars] (2.8, 2.98)}, {'text': ' tents.', 'timestamp': (2.98, 3.48)}]}"
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_return_timestamps_in_preprocess_longform",
|
22 |
+
"trace": "(line 675) AssertionError: Lists differ: [{'te[28 chars]: (0.18, 1.04)}, {'text': ' returned', 'timest[613 chars].5)}] != [{'te[28 chars]: (0.5, 0.94)}, {'text': ' returned', 'timesta[606 chars].4)}]"
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_simple_whisper_asr",
|
26 |
+
"trace": "(line 675) AssertionError: {'tex[136 chars]: (0.0, 1.02)}, {'text': ' Quilter', 'timestam[696 chars]e)}]} != {'tex[136 chars]: (0.38, 1.04)}, {'text': ' Quilter', 'timesta[703 chars]4)}]}"
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_small_model_pt_seq2seq",
|
30 |
+
"trace": "(line 675) AssertionError: {'text': 'あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u া'} != {'text': 'あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u'}"
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_speculative_decoding_whisper_distil",
|
34 |
+
"trace": "(line 125) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! (when checking argument for argument mat2 in method wrapper_CUDA_mm)"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_speculative_decoding_whisper_non_distil",
|
38 |
+
"trace": "(line 338) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument test_elements in method wrapper_CUDA_isin_Tensor_Tensor)"
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_torch_speech_encoder_decoder",
|
42 |
+
"trace": "(line 675) AssertionError: {'text': 'Ein Mann sagte zum Universum : \" Sir , ich existiert ! \"'} != {'text': 'Ein Mann sagte zum Universum : \" Sir, ich existiert! \"'}"
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_whisper_large_timestamp_prediction",
|
46 |
+
"trace": "(line 1329) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 26.00 MiB. GPU 0 has a total capacity of 14.74 GiB of which 20.12 MiB is free. Process 21630 has 14.72 GiB memory in use. Of the allocated memory 14.11 GiB is allocated by PyTorch, and 485.45 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_whisper_large_word_timestamps_batched",
|
50 |
+
"trace": "(line 1329) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 0 has a total capacity of 14.74 GiB of which 20.12 MiB is free. Process 21630 has 14.72 GiB memory in use. Of the allocated memory 14.11 GiB is allocated by PyTorch, and 483.57 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_whisper_longform",
|
54 |
+
"trace": "(line 1329) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 76.00 MiB. GPU 0 has a total capacity of 14.74 GiB of which 10.12 MiB is free. Process 21630 has 14.73 GiB memory in use. Of the allocated memory 14.14 GiB is allocated by PyTorch, and 462.20 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_whisper_timestamp_prediction",
|
58 |
+
"trace": "(line 675) AssertionError: {'text': \" A man said to the universe, Sir,[1155 chars].'}]} != {'chunks': [{'text': ' A man said to the un[929 chars]ut.\"}"
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_whisper_word_timestamps_batched",
|
62 |
+
"trace": "(line 675) AssertionError: {'tex[81 chars] his to welcome his gospel.', 'chunks': [{'tex[898 chars]e)}]} != {'tex[81 chars] his gospel.', 'chunks': [{'text': ' Mr.', 'ti[747 chars]2)}]}"
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"line": "tests/pipelines/test_pipelines_common.py::CommonPipelineTest::test_auto_model_pipeline_registration_from_local_dir",
|
66 |
+
"trace": "(line 594) OSError: Looks like you do not have git-lfs installed, please install. You can install from https://git-lfs.github.com/. Then run `git lfs install` (you only have to do this once)."
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"line": "tests/pipelines/test_pipelines_common.py::PipelineUtilsTest::test_load_default_pipelines_pt",
|
70 |
+
"trace": "(line 741) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!"
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"line": "tests/pipelines/test_pipelines_common.py::PipelineUtilsTest::test_load_default_pipelines_pt_table_qa",
|
74 |
+
"trace": "(line 741) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!"
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"line": "tests/pipelines/test_pipelines_common.py::CustomPipelineTest::test_custom_code_with_string_tokenizer",
|
78 |
+
"trace": "(line 1011) AttributeError: 'str' object has no attribute 'pad_token_id'"
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"line": "tests/pipelines/test_pipelines_question_answering.py::QAPipelineTests::test_large_model_issue",
|
82 |
+
"trace": "(line 675) AssertionError: {'score': 0.002, 'start': 261, 'end': 294, [41 chars]ase'} != {'answer': 'an accused in the loan fraud ca[41 chars] 261}"
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"line": "tests/pipelines/test_pipelines_text_generation.py::TextGenerationPipelineTests::test_return_dict_in_generate",
|
86 |
+
"trace": "(line 675) AssertionError: Lists differ: [[{'generated_text': 'This is great !apt ob ob ob obififififif[79 chars]e'}]] != [[{'generated_text': ANY(str), 'logits': ANY(list), 'scores': [81 chars]t)}]]"
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"line": "tests/pipelines/test_pipelines_text_generation.py::TextGenerationPipelineTests::test_small_model_pt",
|
90 |
+
"trace": "(line 675) AssertionError: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili(DoEffecEffeclocality,'}] != [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili@@'}]"
|
91 |
+
},
|
92 |
+
{
|
93 |
+
"line": "tests/pipelines/test_pipelines_text_generation.py::TextGenerationPipelineTests::test_small_model_pt_bloom_accelerate",
|
94 |
+
"trace": "(line 675) AssertionError: Lists differ: [{'ge[70 chars] test test test test test test test test test test test test'}] != [{'ge[70 chars] test test test test test test test test'}]"
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"line": "tests/pipelines/test_pipelines_text_generation.py::TextGenerationPipelineTests::test_stop_sequence_stopping_criteria",
|
98 |
+
"trace": "(line 675) AssertionError: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe'}] != [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe'}]"
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"line": "tests/pipelines/test_pipelines_image_to_text.py::ImageToTextPipelineTests::test_conditional_generation_llava",
|
102 |
+
"trace": "(line 1329) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 224.00 MiB. GPU 0 has a total capacity of 14.74 GiB of which 144.12 MiB is free. Process 21630 has 14.60 GiB memory in use. Of the allocated memory 14.19 GiB is allocated by PyTorch, and 279.76 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"line": "tests/pipelines/test_pipelines_image_to_text.py::ImageToTextPipelineTests::test_nougat",
|
106 |
+
"trace": "(line 675) AssertionError: Lists differ: [{'ge[35 chars]tical Understanding for Academic Documents\\n\\n Lukas Blecher'}] != [{'ge[35 chars]tical Understanding for Academic Documents\\n\\n Lukas Blec'}]"
|
107 |
+
},
|
108 |
+
{
|
109 |
+
"line": "tests/pipelines/test_pipelines_image_to_text.py::ImageToTextPipelineTests::test_small_model_pt",
|
110 |
+
"trace": "(line 675) AssertionError: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGOGO'}] != [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}]"
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"line": "tests/pipelines/test_pipelines_visual_question_answering.py::VisualQuestionAnsweringPipelineTests::test_large_model_pt_blip2",
|
114 |
+
"trace": "(line 2910) RuntimeError: expected scalar type Float but found Half"
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"line": "tests/pipelines/test_pipelines_visual_question_answering.py::VisualQuestionAnsweringPipelineTests::test_small_model_pt_blip2",
|
118 |
+
"trace": "(line 2336) RuntimeError: shape mismatch: value tensor of shape [320] cannot be broadcast to indexing result of shape [0]"
|
119 |
+
},
|
120 |
+
{
|
121 |
+
"line": "tests/pipelines/test_pipelines_fill_mask.py::FillMaskPipelineTests::test_large_model_pt",
|
122 |
+
"trace": "(line 675) AssertionError: Lists differ: [{'score': 0.007, 'token': 1573, 'token_str':[123 chars]hn'}] != [{'sequence': 'My name is John', 'score': 0.0[123 chars]is'}]"
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"line": "tests/pipelines/test_pipelines_image_text_to_text.py::ImageTextToTextPipelineTests::test_model_pt_chat_template",
|
126 |
+
"trace": "(line 675) AssertionError: Lists differ: [{'in[166 chars]ted_text': 'The first image shows a statue of the Statue of'}] != [{'in[166 chars]ted_text': 'The first image shows a statue of Liberty in the'}]"
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"line": "tests/pipelines/test_pipelines_zero_shot_image_classification.py::ZeroShotImageClassificationPipelineTests::test_small_model_pt",
|
130 |
+
"trace": "(line 3024) AttributeError: 'list' object has no attribute 'keys'"
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"line": "tests/pipelines/test_pipelines_zero_shot_image_classification.py::ZeroShotImageClassificationPipelineTests::test_small_model_pt_fp16",
|
134 |
+
"trace": "(line 3024) AttributeError: 'list' object has no attribute 'keys'"
|
135 |
+
},
|
136 |
+
{
|
137 |
+
"line": "tests/pipelines/test_pipelines_depth_estimation.py::DepthEstimationPipelineTests::test_multiprocess",
|
138 |
+
"trace": "(line 2581) RuntimeError: Error(s) in loading state_dict for DepthAnythingForDepthEstimation:"
|
139 |
+
},
|
140 |
+
{
|
141 |
+
"line": "tests/pipelines/test_pipelines_mask_generation.py::MaskGenerationPipelineTests::test_small_model_pt",
|
142 |
+
"trace": "(line 734) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU 0 has a total capacity of 14.74 GiB of which 1.12 GiB is free. Process 21630 has 13.62 GiB memory in use. Of the allocated memory 9.88 GiB is allocated by PyTorch, and 3.60 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"line": "tests/pipelines/test_pipelines_mask_generation.py::MaskGenerationPipelineTests::test_threshold",
|
146 |
+
"trace": "(line 882) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 MiB. GPU 0 has a total capacity of 14.74 GiB of which 112.12 MiB is free. Process 21630 has 14.63 GiB memory in use. Of the allocated memory 14.37 GiB is allocated by PyTorch, and 116.38 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
147 |
+
},
|
148 |
+
{
|
149 |
+
"line": "tests/pipelines/test_pipelines_summarization.py::SummarizationPipelineTests::test_integration_torch_summarization",
|
150 |
+
"trace": "(line 1329) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 198.00 MiB. GPU 0 has a total capacity of 14.74 GiB of which 112.12 MiB is free. Process 21630 has 14.63 GiB memory in use. Of the allocated memory 14.37 GiB is allocated by PyTorch, and 116.38 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
151 |
+
},
|
152 |
+
{
|
153 |
+
"line": "tests/pipelines/test_pipelines_summarization.py::SummarizationPipelineTests::test_small_model_pt",
|
154 |
+
"trace": "(line 675) AssertionError: Lists differ: [{'su[68 chars]เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป'}] != [{'su[68 chars]เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป'}]"
|
155 |
+
}
|
156 |
+
]
|
157 |
+
},
|
158 |
+
"job_link": {
|
159 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/14026085871/job/39264898987"
|
160 |
+
}
|
161 |
+
}
|