Video-Text-to-Text
Transformers
Safetensors
qwen2_vl_bev
text-generation
llama-factory
full
Generated from Trainer
spatial-intelligence
3d-vision
Instructions to use Spacewanderer8263/Proxy3D-8B with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use Spacewanderer8263/Proxy3D-8B with Transformers:
# Load model directly from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("Spacewanderer8263/Proxy3D-8B", dtype="auto") - Notebooks
- Google Colab
- Kaggle
| { | |
| "architectures": [ | |
| "Qwen2VLBEVForConditionalGeneration" | |
| ], | |
| "attention_dropout": 0.0, | |
| "bev_config": { | |
| "anchor_heights": [ | |
| 0.0, | |
| 0.5, | |
| 1.0, | |
| 1.5, | |
| 2.0, | |
| 2.5, | |
| 3.0, | |
| 3.5 | |
| ], | |
| "bev_grid_size": [ | |
| 100, | |
| 100, | |
| 20 | |
| ], | |
| "dropout": 0.1, | |
| "embed_dim": 3584, | |
| "hidden_size": 3584, | |
| "level_start_index": [ | |
| 0 | |
| ], | |
| "model_type": "qwen2_vl_bev", | |
| "num_feature_levels": 1, | |
| "num_heads": 4, | |
| "num_images": 32, | |
| "num_layers": 1, | |
| "num_points": 32, | |
| "orig_img_size": [ | |
| 1296, | |
| 968 | |
| ], | |
| "resized_img_size": [ | |
| 21, | |
| 16 | |
| ], | |
| "spatial_shapes": [ | |
| [ | |
| 16, | |
| 21 | |
| ] | |
| ], | |
| "streaming": true, | |
| "torch_dtype": "float32", | |
| "use_history": true | |
| }, | |
| "bos_token_id": 151643, | |
| "eos_token_id": 151645, | |
| "hidden_act": "silu", | |
| "hidden_size": 3584, | |
| "image_token_id": 151655, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 18944, | |
| "max_position_embeddings": 128000, | |
| "max_window_layers": 28, | |
| "model_type": "qwen2_vl_bev", | |
| "num_attention_heads": 28, | |
| "num_hidden_layers": 28, | |
| "num_key_value_heads": 4, | |
| "rms_norm_eps": 1e-06, | |
| "rope_scaling": { | |
| "mrope_section": [ | |
| 16, | |
| 24, | |
| 24 | |
| ], | |
| "rope_type": "default", | |
| "type": "default" | |
| }, | |
| "rope_theta": 1000000.0, | |
| "sliding_window": null, | |
| "text_config": { | |
| "architectures": [ | |
| "Qwen2_5_VLForConditionalGeneration" | |
| ], | |
| "attention_dropout": 0.0, | |
| "bos_token_id": 151643, | |
| "eos_token_id": 151645, | |
| "hidden_act": "silu", | |
| "hidden_size": 3584, | |
| "image_token_id": null, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 18944, | |
| "layer_types": [ | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention", | |
| "full_attention" | |
| ], | |
| "max_position_embeddings": 128000, | |
| "max_window_layers": 28, | |
| "model_type": "qwen2_vl_bev", | |
| "num_attention_heads": 28, | |
| "num_hidden_layers": 28, | |
| "num_key_value_heads": 4, | |
| "rms_norm_eps": 1e-06, | |
| "rope_scaling": { | |
| "mrope_section": [ | |
| 16, | |
| 24, | |
| 24 | |
| ], | |
| "rope_type": "default", | |
| "type": "default" | |
| }, | |
| "rope_theta": 1000000.0, | |
| "sliding_window": null, | |
| "torch_dtype": "float32", | |
| "use_cache": false, | |
| "use_sliding_window": false, | |
| "video_token_id": null, | |
| "vision_end_token_id": 151653, | |
| "vision_start_token_id": 151652, | |
| "vision_token_id": 151654, | |
| "vocab_size": 152064 | |
| }, | |
| "tie_word_embeddings": false, | |
| "torch_dtype": "bfloat16", | |
| "transformers_version": "4.55.0", | |
| "use_cache": false, | |
| "use_sliding_window": false, | |
| "video_token_id": 151656, | |
| "vision_config": { | |
| "depth": 32, | |
| "fullatt_block_indexes": [ | |
| 7, | |
| 15, | |
| 23, | |
| 31 | |
| ], | |
| "hidden_act": "silu", | |
| "hidden_size": 1280, | |
| "in_channels": 3, | |
| "in_chans": 3, | |
| "initializer_range": 0.02, | |
| "intermediate_size": 3420, | |
| "model_type": "qwen2_vl_bev", | |
| "num_heads": 16, | |
| "out_hidden_size": 3584, | |
| "patch_size": 14, | |
| "spatial_merge_size": 2, | |
| "spatial_patch_size": 14, | |
| "temporal_patch_size": 2, | |
| "tokens_per_second": 2, | |
| "torch_dtype": "float32", | |
| "window_size": 112 | |
| }, | |
| "vision_end_token_id": 151653, | |
| "vision_start_token_id": 151652, | |
| "vision_token_id": 151654, | |
| "vocab_size": 152064 | |
| } |