Upload lightonai_LightOnOCR-2-1B_0.txt with huggingface_hub
Browse files- lightonai_LightOnOCR-2-1B_0.txt +1 -109
lightonai_LightOnOCR-2-1B_0.txt
CHANGED
|
@@ -1,109 +1 @@
|
|
| 1 |
-
|
| 2 |
-
# Use a pipeline as a high-level helper
|
| 3 |
-
from transformers import pipeline
|
| 4 |
-
|
| 5 |
-
pipe = pipeline("image-text-to-text", model="lightonai/LightOnOCR-2-1B")
|
| 6 |
-
messages = [
|
| 7 |
-
{
|
| 8 |
-
"role": "user",
|
| 9 |
-
"content": [
|
| 10 |
-
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
|
| 11 |
-
{"type": "text", "text": "What animal is on the candy?"}
|
| 12 |
-
]
|
| 13 |
-
},
|
| 14 |
-
]
|
| 15 |
-
pipe(text=messages)
|
| 16 |
-
```
|
| 17 |
-
|
| 18 |
-
ERROR:
|
| 19 |
-
Traceback (most recent call last):
|
| 20 |
-
File "/tmp/lightonai_LightOnOCR-2-1B_07O8H7r.py", line 36, in <module>
|
| 21 |
-
pipe(text=messages)
|
| 22 |
-
~~~~^^^^^^^^^^^^^^^
|
| 23 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/image_text_to_text.py", line 283, in __call__
|
| 24 |
-
return super().__call__(Chat(text), **kwargs)
|
| 25 |
-
~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^
|
| 26 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1274, in __call__
|
| 27 |
-
return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
|
| 28 |
-
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 29 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1281, in run_single
|
| 30 |
-
model_outputs = self.forward(model_inputs, **forward_params)
|
| 31 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/base.py", line 1173, in forward
|
| 32 |
-
model_outputs = self._forward(model_inputs, **forward_params)
|
| 33 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/pipelines/image_text_to_text.py", line 372, in _forward
|
| 34 |
-
generated_sequence = self.model.generate(**model_inputs, **generate_kwargs)
|
| 35 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/utils/_contextlib.py", line 124, in decorate_context
|
| 36 |
-
return func(*args, **kwargs)
|
| 37 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/generation/utils.py", line 2669, in generate
|
| 38 |
-
result = decoding_method(
|
| 39 |
-
self,
|
| 40 |
-
...<5 lines>...
|
| 41 |
-
**model_kwargs,
|
| 42 |
-
)
|
| 43 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/generation/utils.py", line 2864, in _sample
|
| 44 |
-
outputs = self._prefill(input_ids, generation_config, model_kwargs)
|
| 45 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/generation/utils.py", line 3853, in _prefill
|
| 46 |
-
return self(**model_inputs, return_dict=True)
|
| 47 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
|
| 48 |
-
return self._call_impl(*args, **kwargs)
|
| 49 |
-
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
|
| 50 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
|
| 51 |
-
return forward_call(*args, **kwargs)
|
| 52 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line 1002, in wrapper
|
| 53 |
-
outputs = func(self, *args, **kwargs)
|
| 54 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/mistral3/modeling_mistral3.py", line 446, in forward
|
| 55 |
-
outputs = self.model(
|
| 56 |
-
input_ids=input_ids,
|
| 57 |
-
...<11 lines>...
|
| 58 |
-
**kwargs,
|
| 59 |
-
)
|
| 60 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
|
| 61 |
-
return self._call_impl(*args, **kwargs)
|
| 62 |
-
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
|
| 63 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
|
| 64 |
-
return forward_call(*args, **kwargs)
|
| 65 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line 1002, in wrapper
|
| 66 |
-
outputs = func(self, *args, **kwargs)
|
| 67 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/mistral3/modeling_mistral3.py", line 313, in forward
|
| 68 |
-
image_features = self.get_image_features(
|
| 69 |
-
~~~~~~~~~~~~~~~~~~~~~~~^
|
| 70 |
-
pixel_values=pixel_values,
|
| 71 |
-
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 72 |
-
...<2 lines>...
|
| 73 |
-
return_dict=True,
|
| 74 |
-
^^^^^^^^^^^^^^^^^
|
| 75 |
-
).pooler_output
|
| 76 |
-
^
|
| 77 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line 1002, in wrapper
|
| 78 |
-
outputs = func(self, *args, **kwargs)
|
| 79 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/mistral3/modeling_mistral3.py", line 232, in get_image_features
|
| 80 |
-
image_outputs = self.vision_tower(
|
| 81 |
-
pixel_values,
|
| 82 |
-
...<3 lines>...
|
| 83 |
-
**kwargs,
|
| 84 |
-
)
|
| 85 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
|
| 86 |
-
return self._call_impl(*args, **kwargs)
|
| 87 |
-
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
|
| 88 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
|
| 89 |
-
return forward_call(*args, **kwargs)
|
| 90 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/utils/generic.py", line 835, in wrapper
|
| 91 |
-
output = func(self, *args, **kwargs)
|
| 92 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/transformers/models/pixtral/modeling_pixtral.py", line 496, in forward
|
| 93 |
-
patch_embeds = self.patch_conv(pixel_values)
|
| 94 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1776, in _wrapped_call_impl
|
| 95 |
-
return self._call_impl(*args, **kwargs)
|
| 96 |
-
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
|
| 97 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/module.py", line 1787, in _call_impl
|
| 98 |
-
return forward_call(*args, **kwargs)
|
| 99 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/conv.py", line 553, in forward
|
| 100 |
-
return self._conv_forward(input, self.weight, self.bias)
|
| 101 |
-
~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 102 |
-
File "/tmp/.cache/uv/environments-v2/156a2f51d9a7a48b/lib/python3.13/site-packages/torch/nn/modules/conv.py", line 548, in _conv_forward
|
| 103 |
-
return F.conv2d(
|
| 104 |
-
~~~~~~~~^
|
| 105 |
-
input, weight, bias, self.stride, self.padding, self.dilation, self.groups
|
| 106 |
-
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 107 |
-
)
|
| 108 |
-
^
|
| 109 |
-
RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (CUDABFloat16Type) should be the same
|
|
|
|
| 1 |
+
Everything was good in lightonai_LightOnOCR-2-1B_0.txt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|