hf-transformers-bot commited on
Commit
1cb34ef
·
verified ·
1 Parent(s): 13a0989

Upload 2026-04-10/runs/32770-24249338965/ci_results_run_models_gpu/new_failures.json with huggingface_hub

Browse files
2026-04-10/runs/32770-24249338965/ci_results_run_models_gpu/new_failures.json ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "gemma3": {
3
+ "failures": {
4
+ "multi-gpu": [
5
+ {
6
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_dynamic_sliding_window_is_default",
7
+ "trace": "(line 862) AssertionError: 'DynamicSlidingWindowLayer' unexpectedly found in 'DynamicCache(layers=[DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer])'"
8
+ },
9
+ {
10
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_1b_text_only",
11
+ "trace": "(line 716) AssertionError: Lists differ: ['Wri[48 chars]data streams, a boundless flow,\\nA silent worl[63 chars]ing'] != ['Wri[48 chars]data flows, a silent stream,\\nInto the neural [51 chars],\\n']"
12
+ },
13
+ {
14
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch",
15
+ "trace": "(line 530) AssertionError: Lists differ: [\"user\\nYou are a helpful assistant.\\n\\n\\n[416 chars]”s.'] != ['user\\nYou are a helpful assistant.\\n\\n\\n[389 chars]own\"]"
16
+ },
17
+ {
18
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch_crops",
19
+ "trace": "(line 650) AssertionError: Lists differ: ['user\\nYou are a helpful assistant.\\n\\nHe[706 chars]te,'] != [\"user\\nYou are a helpful assistant.\\n\\nHe[674 chars]h a']"
20
+ },
21
+ {
22
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_bf16",
23
+ "trace": "(line 461) AssertionError: Lists differ: [\"user\\nYou are a helpful assistant.\\n\\n\\n[117 chars]me?\"] != ['user\\nYou are a helpful assistant.\\n\\n\\n[178 chars]ike']"
24
+ },
25
+ {
26
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_crops",
27
+ "trace": "(line 577) AssertionError: Lists differ: [\"user\\nYou are a helpful assistant.\\n\\nHe[162 chars]'m'\"] != ['user\\nYou are a helpful assistant.\\n\\nHe[268 chars]the']"
28
+ },
29
+ {
30
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_multiimage",
31
+ "trace": "(line 693) AssertionError: Lists differ: ['user\\nYou are a helpful assistant.\\n\\n\\n[146 chars]\\'d'] != [\"user\\nYou are a helpful assistant.\\n\\n\\n[141 chars]rch\"]"
32
+ }
33
+ ],
34
+ "single-gpu": [
35
+ {
36
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_dynamic_sliding_window_is_default",
37
+ "trace": "(line 862) AssertionError: 'DynamicSlidingWindowLayer' unexpectedly found in 'DynamicCache(layers=[DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer, DynamicLayer, DynamicSlidingWindowLayer, DynamicSlidingWindowLayer])'"
38
+ },
39
+ {
40
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_1b_text_only",
41
+ "trace": "(line 716) AssertionError: Lists differ: ['Wri[48 chars]data streams, a boundless flow,\\nA silent worl[63 chars]ing'] != ['Wri[48 chars]data flows, a silent stream,\\nInto the neural [51 chars],\\n']"
42
+ },
43
+ {
44
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch",
45
+ "trace": "(line 530) AssertionError: Lists differ: [\"user\\nYou are a helpful assistant.\\n\\n\\n[416 chars]”s.'] != ['user\\nYou are a helpful assistant.\\n\\n\\n[389 chars]own\"]"
46
+ },
47
+ {
48
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_batch_crops",
49
+ "trace": "(line 650) AssertionError: Lists differ: ['user\\nYou are a helpful assistant.\\n\\nHe[706 chars]te,'] != [\"user\\nYou are a helpful assistant.\\n\\nHe[674 chars]h a']"
50
+ },
51
+ {
52
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_bf16",
53
+ "trace": "(line 461) AssertionError: Lists differ: [\"user\\nYou are a helpful assistant.\\n\\n\\n[117 chars]me?\"] != ['user\\nYou are a helpful assistant.\\n\\n\\n[178 chars]ike']"
54
+ },
55
+ {
56
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_crops",
57
+ "trace": "(line 577) AssertionError: Lists differ: [\"user\\nYou are a helpful assistant.\\n\\nHe[162 chars]'m'\"] != ['user\\nYou are a helpful assistant.\\n\\nHe[268 chars]the']"
58
+ },
59
+ {
60
+ "line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3IntegrationTest::test_model_4b_multiimage",
61
+ "trace": "(line 693) AssertionError: Lists differ: ['user\\nYou are a helpful assistant.\\n\\n\\n[146 chars]\\'d'] != [\"user\\nYou are a helpful assistant.\\n\\n\\n[141 chars]rch\"]"
62
+ }
63
+ ]
64
+ },
65
+ "job_link": {
66
+ "multi": "https://github.com/huggingface/transformers/actions/runs/24249338965/job/70804772220",
67
+ "single": "https://github.com/huggingface/transformers/actions/runs/24249338965/job/70804772280"
68
+ }
69
+ },
70
+ "llava": {
71
+ "failures": {
72
+ "multi-gpu": [
73
+ {
74
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_batched_generation",
75
+ "trace": "(line 566) AssertionError: Lists differ: [\"\\n [51 chars]TANT:\", '\\nUSER: Describe the image.\\nASSISTAN[139 chars]man'] != [\"\\n [51 chars]TANT: In the two images, the primary differenc[294 chars]ama']"
76
+ },
77
+ {
78
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_generation_no_images",
79
+ "trace": "(line 528) OSError: llava-hf/llava-1.5-7b-hf does not appear to have a file named model-00001-of-00003.safetensors. Checkout 'https://huggingface.co/llava-hf/llava-1.5-7b-hf/tree/main' for available files."
80
+ },
81
+ {
82
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_generation_siglip_backbone",
83
+ "trace": "(line 625) AssertionError: 'user[29 chars]t These are two images of the Microsoft Window[121 chars]shot' != 'user[29 chars]t The image shows two cats, one on the left an[80 chars] cat'"
84
+ },
85
+ {
86
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_pixtral",
87
+ "trace": "(line 4832) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 21.10 GiB. GPU 0 has a total capacity of 22.30 GiB of which 20.14 GiB is free. Process 2453681 has 2.16 GiB memory in use. Of the allocated memory 1.64 GiB is allocated by PyTorch, and 20.66 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://docs.pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf)"
88
+ },
89
+ {
90
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_pixtral_4bit",
91
+ "trace": "(line 687) AssertionError: False is not true"
92
+ },
93
+ {
94
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_pixtral_batched",
95
+ "trace": "(line 724) AssertionError: Lists differ: ['Wha[97 chars]mage?A narrow dirt path is surrounded by grass[74 chars]ue.'] != ['Wha[97 chars]mage?The image depicts a narrow, winding dirt [175 chars]ere']"
96
+ },
97
+ {
98
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test",
99
+ "trace": "(line 415) AssertionError"
100
+ },
101
+ {
102
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch",
103
+ "trace": "(line 415) AssertionError"
104
+ },
105
+ {
106
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama_batched",
107
+ "trace": "(line 415) AssertionError"
108
+ },
109
+ {
110
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama_batched_regression",
111
+ "trace": "(line 415) AssertionError"
112
+ },
113
+ {
114
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama_single",
115
+ "trace": "(line 415) AssertionError"
116
+ }
117
+ ],
118
+ "single-gpu": [
119
+ {
120
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_batched_generation",
121
+ "trace": "(line 566) AssertionError: Lists differ: [\"\\n [51 chars]TANT:\", '\\nUSER: Describe the image.\\nASSISTAN[139 chars]man'] != [\"\\n [51 chars]TANT: In the two images, the primary differenc[294 chars]ama']"
122
+ },
123
+ {
124
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_generation_siglip_backbone",
125
+ "trace": "(line 625) AssertionError: 'user[29 chars]t These are two different types of animals: a [15 chars]key.' != 'user[29 chars]t The image shows two cats, one on the left an[80 chars] cat'"
126
+ },
127
+ {
128
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_pixtral",
129
+ "trace": "(line 4832) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 21.10 GiB. GPU 0 has a total capacity of 22.30 GiB of which 20.28 GiB is free. Process 2453640 has 2.01 GiB memory in use. Of the allocated memory 1.63 GiB is allocated by PyTorch, and 98.50 KiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://docs.pytorch.org/docs/stable/notes/cuda.html#optimizing-memory-usage-with-pytorch-cuda-alloc-conf)"
130
+ },
131
+ {
132
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_pixtral_4bit",
133
+ "trace": "(line 687) AssertionError: False is not true"
134
+ },
135
+ {
136
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_pixtral_batched",
137
+ "trace": "(line 724) AssertionError: Lists differ: ['Wha[97 chars]mage?A narrow dirt path is surrounded by grass[74 chars]ue.'] != ['Wha[97 chars]mage?The image depicts a narrow, winding dirt [175 chars]ere']"
138
+ },
139
+ {
140
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test",
141
+ "trace": "(line 415) AssertionError"
142
+ },
143
+ {
144
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch",
145
+ "trace": "(line 415) AssertionError"
146
+ },
147
+ {
148
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama_batched",
149
+ "trace": "(line 415) AssertionError"
150
+ },
151
+ {
152
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama_batched_regression",
153
+ "trace": "(line 415) AssertionError"
154
+ },
155
+ {
156
+ "line": "tests/models/llava/test_modeling_llava.py::LlavaForConditionalGenerationIntegrationTest::test_small_model_integration_test_llama_single",
157
+ "trace": "(line 415) AssertionError"
158
+ }
159
+ ]
160
+ },
161
+ "job_link": {
162
+ "multi": "https://github.com/huggingface/transformers/actions/runs/24249338965/job/70804772227",
163
+ "single": "https://github.com/huggingface/transformers/actions/runs/24249338965/job/70804772216"
164
+ }
165
+ },
166
+ "llava_next_video": {
167
+ "failures": {
168
+ "single-gpu": [
169
+ {
170
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test",
171
+ "trace": "(line 415) AssertionError"
172
+ },
173
+ {
174
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch",
175
+ "trace": "(line 415) AssertionError"
176
+ },
177
+ {
178
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch_different_vision_types",
179
+ "trace": "(line 415) AssertionError"
180
+ },
181
+ {
182
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch_matches_single",
183
+ "trace": "(line 415) AssertionError"
184
+ }
185
+ ],
186
+ "multi-gpu": [
187
+ {
188
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test",
189
+ "trace": "(line 415) AssertionError"
190
+ },
191
+ {
192
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch",
193
+ "trace": "(line 415) AssertionError"
194
+ },
195
+ {
196
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch_different_vision_types",
197
+ "trace": "(line 415) AssertionError"
198
+ },
199
+ {
200
+ "line": "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationIntegrationTest::test_small_model_integration_test_batch_matches_single",
201
+ "trace": "(line 415) AssertionError"
202
+ }
203
+ ]
204
+ },
205
+ "job_link": {
206
+ "single": "https://github.com/huggingface/transformers/actions/runs/24249338965/job/70804772264",
207
+ "multi": "https://github.com/huggingface/transformers/actions/runs/24249338965/job/70804772260"
208
+ }
209
+ }
210
+ }