id
string | scripts
list | code_urls
list | execution_urls
list | estimated_vram
float64 |
|---|---|---|---|---|
openbmb/AgentCPM-Report
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('openbmb_AgentCPM-Report_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openbmb_AgentCPM-Report_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openbmb_AgentCPM-Report_0.txt|openbmb_AgentCPM-Report_0.txt>',\n )\n\n with open('openbmb_AgentCPM-Report_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openbmb_AgentCPM-Report_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openbmb_AgentCPM-Report_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 19.82
|
Qwen/Qwen3-TTS-12Hz-1.7B-CustomVoice
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Qwen_Qwen3-TTS-12Hz-1.7B-CustomVoice_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-TTS-12Hz-1.7B-CustomVoice_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-TTS-12Hz-1.7B-CustomVoice_0.txt|Qwen_Qwen3-TTS-12Hz-1.7B-CustomVoice_0.txt>',\n )\n\n with open('Qwen_Qwen3-TTS-12Hz-1.7B-CustomVoice_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-TTS-12Hz-1.7B-CustomVoice_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-TTS-12Hz-1.7B-CustomVoice_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 4.64
|
FlashLabs/Chroma-4B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('FlashLabs_Chroma-4B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in FlashLabs_Chroma-4B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/FlashLabs_Chroma-4B_0.txt|FlashLabs_Chroma-4B_0.txt>',\n )\n\n with open('FlashLabs_Chroma-4B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='FlashLabs_Chroma-4B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='FlashLabs_Chroma-4B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"FlashLabs/Chroma-4B\", trust_remote_code=True, dtype=\"auto\")\n with open('FlashLabs_Chroma-4B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in FlashLabs_Chroma-4B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/FlashLabs_Chroma-4B_1.txt|FlashLabs_Chroma-4B_1.txt>',\n )\n\n with open('FlashLabs_Chroma-4B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"FlashLabs/Chroma-4B\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='FlashLabs_Chroma-4B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='FlashLabs_Chroma-4B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/FlashLabs_Chroma-4B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/FlashLabs_Chroma-4B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/FlashLabs_Chroma-4B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/FlashLabs_Chroma-4B_1.txt"
] | 28.68
|
lightonai/LightOnOCR-2-1B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"lightonai/LightOnOCR-2-1B\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('lightonai_LightOnOCR-2-1B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in lightonai_LightOnOCR-2-1B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/lightonai_LightOnOCR-2-1B_0.txt|lightonai_LightOnOCR-2-1B_0.txt>',\n )\n\n with open('lightonai_LightOnOCR-2-1B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"lightonai/LightOnOCR-2-1B\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='lightonai_LightOnOCR-2-1B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='lightonai_LightOnOCR-2-1B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForSeq2SeqLM\n \n processor = AutoProcessor.from_pretrained(\"lightonai/LightOnOCR-2-1B\")\n model = AutoModelForSeq2SeqLM.from_pretrained(\"lightonai/LightOnOCR-2-1B\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('lightonai_LightOnOCR-2-1B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in lightonai_LightOnOCR-2-1B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/lightonai_LightOnOCR-2-1B_1.txt|lightonai_LightOnOCR-2-1B_1.txt>',\n )\n\n with open('lightonai_LightOnOCR-2-1B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForSeq2SeqLM\n\nprocessor = AutoProcessor.from_pretrained(\"lightonai/LightOnOCR-2-1B\")\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"lightonai/LightOnOCR-2-1B\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='lightonai_LightOnOCR-2-1B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='lightonai_LightOnOCR-2-1B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/lightonai_LightOnOCR-2-1B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/lightonai_LightOnOCR-2-1B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/lightonai_LightOnOCR-2-1B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/lightonai_LightOnOCR-2-1B_1.txt"
] | 2.44
|
stepfun-ai/Step3-VL-10B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('stepfun-ai_Step3-VL-10B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stepfun-ai_Step3-VL-10B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stepfun-ai_Step3-VL-10B_0.txt|stepfun-ai_Step3-VL-10B_0.txt>',\n )\n\n with open('stepfun-ai_Step3-VL-10B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stepfun-ai_Step3-VL-10B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stepfun-ai_Step3-VL-10B_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 24.63
|
Qwen/Qwen3-TTS-12Hz-1.7B-Base
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Qwen_Qwen3-TTS-12Hz-1.7B-Base_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-TTS-12Hz-1.7B-Base_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-TTS-12Hz-1.7B-Base_0.txt|Qwen_Qwen3-TTS-12Hz-1.7B-Base_0.txt>',\n )\n\n with open('Qwen_Qwen3-TTS-12Hz-1.7B-Base_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-TTS-12Hz-1.7B-Base_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-TTS-12Hz-1.7B-Base_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
google/translategemma-4b-it
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('google_translategemma-4b-it_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_translategemma-4b-it_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_translategemma-4b-it_0.txt|google_translategemma-4b-it_0.txt>',\n )\n\n with open('google_translategemma-4b-it_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_translategemma-4b-it_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_translategemma-4b-it_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"google/translategemma-4b-it\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('google_translategemma-4b-it_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_translategemma-4b-it_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_translategemma-4b-it_1.txt|google_translategemma-4b-it_1.txt>',\n )\n\n with open('google_translategemma-4b-it_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"google/translategemma-4b-it\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_translategemma-4b-it_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_translategemma-4b-it_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForImageTextToText\n \n processor = AutoProcessor.from_pretrained(\"google/translategemma-4b-it\")\n model = AutoModelForImageTextToText.from_pretrained(\"google/translategemma-4b-it\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('google_translategemma-4b-it_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_translategemma-4b-it_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_translategemma-4b-it_2.txt|google_translategemma-4b-it_2.txt>',\n )\n\n with open('google_translategemma-4b-it_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForImageTextToText\n\nprocessor = AutoProcessor.from_pretrained(\"google/translategemma-4b-it\")\nmodel = AutoModelForImageTextToText.from_pretrained(\"google/translategemma-4b-it\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_translategemma-4b-it_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_translategemma-4b-it_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_translategemma-4b-it_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_translategemma-4b-it_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_translategemma-4b-it_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_translategemma-4b-it_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_translategemma-4b-it_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_translategemma-4b-it_2.txt"
] | 12.04
|
microsoft/VibeVoice-ASR
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"automatic-speech-recognition\", model=\"microsoft/VibeVoice-ASR\")\n with open('microsoft_VibeVoice-ASR_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in microsoft_VibeVoice-ASR_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/microsoft_VibeVoice-ASR_0.txt|microsoft_VibeVoice-ASR_0.txt>',\n )\n\n with open('microsoft_VibeVoice-ASR_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"automatic-speech-recognition\", model=\"microsoft/VibeVoice-ASR\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='microsoft_VibeVoice-ASR_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='microsoft_VibeVoice-ASR_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import VibeVoiceForASRTraining\n model = VibeVoiceForASRTraining.from_pretrained(\"microsoft/VibeVoice-ASR\", dtype=\"auto\")\n with open('microsoft_VibeVoice-ASR_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in microsoft_VibeVoice-ASR_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/microsoft_VibeVoice-ASR_1.txt|microsoft_VibeVoice-ASR_1.txt>',\n )\n\n with open('microsoft_VibeVoice-ASR_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import VibeVoiceForASRTraining\nmodel = VibeVoiceForASRTraining.from_pretrained(\"microsoft/VibeVoice-ASR\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='microsoft_VibeVoice-ASR_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='microsoft_VibeVoice-ASR_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/microsoft_VibeVoice-ASR_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/microsoft_VibeVoice-ASR_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/microsoft_VibeVoice-ASR_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/microsoft_VibeVoice-ASR_1.txt"
] | 21
|
nvidia/personaplex-7b-v1
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('nvidia_personaplex-7b-v1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_personaplex-7b-v1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_personaplex-7b-v1_0.txt|nvidia_personaplex-7b-v1_0.txt>',\n )\n\n with open('nvidia_personaplex-7b-v1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_personaplex-7b-v1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_personaplex-7b-v1_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_personaplex-7b-v1_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_personaplex-7b-v1_0.txt"
] | 0
|
HeartMuLa/HeartMuLa-oss-3B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('HeartMuLa_HeartMuLa-oss-3B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in HeartMuLa_HeartMuLa-oss-3B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/HeartMuLa_HeartMuLa-oss-3B_0.txt|HeartMuLa_HeartMuLa-oss-3B_0.txt>',\n )\n\n with open('HeartMuLa_HeartMuLa-oss-3B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='HeartMuLa_HeartMuLa-oss-3B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='HeartMuLa_HeartMuLa-oss-3B_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 19.07
|
LiquidAI/LFM2.5-1.2B-Thinking
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"LiquidAI/LFM2.5-1.2B-Thinking\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('LiquidAI_LFM2.5-1.2B-Thinking_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in LiquidAI_LFM2.5-1.2B-Thinking_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/LiquidAI_LFM2.5-1.2B-Thinking_0.txt|LiquidAI_LFM2.5-1.2B-Thinking_0.txt>',\n )\n\n with open('LiquidAI_LFM2.5-1.2B-Thinking_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"LiquidAI/LFM2.5-1.2B-Thinking\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='LiquidAI_LFM2.5-1.2B-Thinking_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='LiquidAI_LFM2.5-1.2B-Thinking_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"LiquidAI/LFM2.5-1.2B-Thinking\")\n model = AutoModelForCausalLM.from_pretrained(\"LiquidAI/LFM2.5-1.2B-Thinking\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('LiquidAI_LFM2.5-1.2B-Thinking_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in LiquidAI_LFM2.5-1.2B-Thinking_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/LiquidAI_LFM2.5-1.2B-Thinking_1.txt|LiquidAI_LFM2.5-1.2B-Thinking_1.txt>',\n )\n\n with open('LiquidAI_LFM2.5-1.2B-Thinking_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"LiquidAI/LFM2.5-1.2B-Thinking\")\nmodel = AutoModelForCausalLM.from_pretrained(\"LiquidAI/LFM2.5-1.2B-Thinking\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='LiquidAI_LFM2.5-1.2B-Thinking_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='LiquidAI_LFM2.5-1.2B-Thinking_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/LiquidAI_LFM2.5-1.2B-Thinking_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/LiquidAI_LFM2.5-1.2B-Thinking_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/LiquidAI_LFM2.5-1.2B-Thinking_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/LiquidAI_LFM2.5-1.2B-Thinking_1.txt"
] | 2.83
|
zai-org/GLM-4.7-Flash
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.7-Flash\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('zai-org_GLM-4.7-Flash_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.7-Flash_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.7-Flash_0.txt|zai-org_GLM-4.7-Flash_0.txt>',\n )\n\n with open('zai-org_GLM-4.7-Flash_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.7-Flash\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.7-Flash_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.7-Flash_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.7-Flash\")\n model = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.7-Flash\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('zai-org_GLM-4.7-Flash_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.7-Flash_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.7-Flash_1.txt|zai-org_GLM-4.7-Flash_1.txt>',\n )\n\n with open('zai-org_GLM-4.7-Flash_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.7-Flash\")\nmodel = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.7-Flash\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.7-Flash_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.7-Flash_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.7-Flash_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.7-Flash_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.7-Flash_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.7-Flash_1.txt"
] | 75.6
|
zai-org/GLM-Image
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"zai-org/GLM-Image\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('zai-org_GLM-Image_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-Image_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-Image_0.txt|zai-org_GLM-Image_0.txt>',\n )\n\n with open('zai-org_GLM-Image_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"zai-org/GLM-Image\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-Image_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-Image_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-Image_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-Image_0.txt"
] | 0
|
Qwen/Qwen3-TTS-12Hz-1.7B-VoiceDesign
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Qwen_Qwen3-TTS-12Hz-1.7B-VoiceDesign_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-TTS-12Hz-1.7B-VoiceDesign_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-TTS-12Hz-1.7B-VoiceDesign_0.txt|Qwen_Qwen3-TTS-12Hz-1.7B-VoiceDesign_0.txt>',\n )\n\n with open('Qwen_Qwen3-TTS-12Hz-1.7B-VoiceDesign_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-TTS-12Hz-1.7B-VoiceDesign_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-TTS-12Hz-1.7B-VoiceDesign_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 4.64
|
black-forest-labs/FLUX.2-klein-4B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.2-klein-4B\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('black-forest-labs_FLUX.2-klein-4B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.2-klein-4B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.2-klein-4B_0.txt|black-forest-labs_FLUX.2-klein-4B_0.txt>',\n )\n\n with open('black-forest-labs_FLUX.2-klein-4B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.2-klein-4B\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.2-klein-4B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.2-klein-4B_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.2-klein-4B_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.2-klein-4B_0.txt"
] | 0
|
black-forest-labs/FLUX.2-klein-9B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('black-forest-labs_FLUX.2-klein-9B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.2-klein-9B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.2-klein-9B_0.txt|black-forest-labs_FLUX.2-klein-9B_0.txt>',\n )\n\n with open('black-forest-labs_FLUX.2-klein-9B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.2-klein-9B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.2-klein-9B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.2-klein-9B\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('black-forest-labs_FLUX.2-klein-9B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.2-klein-9B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.2-klein-9B_1.txt|black-forest-labs_FLUX.2-klein-9B_1.txt>',\n )\n\n with open('black-forest-labs_FLUX.2-klein-9B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.2-klein-9B\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.2-klein-9B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.2-klein-9B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.2-klein-9B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.2-klein-9B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.2-klein-9B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.2-klein-9B_1.txt"
] | 0
|
zai-org/GLM-4.7
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.7\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('zai-org_GLM-4.7_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.7_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.7_0.txt|zai-org_GLM-4.7_0.txt>',\n )\n\n with open('zai-org_GLM-4.7_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.7\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.7_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.7_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.7\")\n model = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.7\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('zai-org_GLM-4.7_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.7_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.7_1.txt|zai-org_GLM-4.7_1.txt>',\n )\n\n with open('zai-org_GLM-4.7_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.7\")\nmodel = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.7\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.7_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.7_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.7_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.7_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.7_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.7_1.txt"
] | 867.69
|
Alibaba-Apsara/DASD-4B-Thinking
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"Alibaba-Apsara/DASD-4B-Thinking\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('Alibaba-Apsara_DASD-4B-Thinking_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Alibaba-Apsara_DASD-4B-Thinking_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Alibaba-Apsara_DASD-4B-Thinking_0.txt|Alibaba-Apsara_DASD-4B-Thinking_0.txt>',\n )\n\n with open('Alibaba-Apsara_DASD-4B-Thinking_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"Alibaba-Apsara/DASD-4B-Thinking\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Alibaba-Apsara_DASD-4B-Thinking_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Alibaba-Apsara_DASD-4B-Thinking_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"Alibaba-Apsara/DASD-4B-Thinking\")\n model = AutoModelForCausalLM.from_pretrained(\"Alibaba-Apsara/DASD-4B-Thinking\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Alibaba-Apsara_DASD-4B-Thinking_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Alibaba-Apsara_DASD-4B-Thinking_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Alibaba-Apsara_DASD-4B-Thinking_1.txt|Alibaba-Apsara_DASD-4B-Thinking_1.txt>',\n )\n\n with open('Alibaba-Apsara_DASD-4B-Thinking_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"Alibaba-Apsara/DASD-4B-Thinking\")\nmodel = AutoModelForCausalLM.from_pretrained(\"Alibaba-Apsara/DASD-4B-Thinking\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Alibaba-Apsara_DASD-4B-Thinking_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Alibaba-Apsara_DASD-4B-Thinking_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Alibaba-Apsara_DASD-4B-Thinking_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Alibaba-Apsara_DASD-4B-Thinking_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Alibaba-Apsara_DASD-4B-Thinking_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Alibaba-Apsara_DASD-4B-Thinking_1.txt"
] | 9.74
|
numind/NuMarkdown-8B-Thinking
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-to-text\", model=\"numind/NuMarkdown-8B-Thinking\")\n with open('numind_NuMarkdown-8B-Thinking_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in numind_NuMarkdown-8B-Thinking_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/numind_NuMarkdown-8B-Thinking_0.txt|numind_NuMarkdown-8B-Thinking_0.txt>',\n )\n\n with open('numind_NuMarkdown-8B-Thinking_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-to-text\", model=\"numind/NuMarkdown-8B-Thinking\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='numind_NuMarkdown-8B-Thinking_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='numind_NuMarkdown-8B-Thinking_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForImageTextToText\n \n processor = AutoProcessor.from_pretrained(\"numind/NuMarkdown-8B-Thinking\")\n model = AutoModelForImageTextToText.from_pretrained(\"numind/NuMarkdown-8B-Thinking\")\n with open('numind_NuMarkdown-8B-Thinking_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in numind_NuMarkdown-8B-Thinking_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/numind_NuMarkdown-8B-Thinking_1.txt|numind_NuMarkdown-8B-Thinking_1.txt>',\n )\n\n with open('numind_NuMarkdown-8B-Thinking_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForImageTextToText\n\nprocessor = AutoProcessor.from_pretrained(\"numind/NuMarkdown-8B-Thinking\")\nmodel = AutoModelForImageTextToText.from_pretrained(\"numind/NuMarkdown-8B-Thinking\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='numind_NuMarkdown-8B-Thinking_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='numind_NuMarkdown-8B-Thinking_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/numind_NuMarkdown-8B-Thinking_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/numind_NuMarkdown-8B-Thinking_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/numind_NuMarkdown-8B-Thinking_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/numind_NuMarkdown-8B-Thinking_1.txt"
] | 20.08
|
Qwen/Qwen3-TTS-12Hz-0.6B-Base
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Qwen_Qwen3-TTS-12Hz-0.6B-Base_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-TTS-12Hz-0.6B-Base_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-TTS-12Hz-0.6B-Base_0.txt|Qwen_Qwen3-TTS-12Hz-0.6B-Base_0.txt>',\n )\n\n with open('Qwen_Qwen3-TTS-12Hz-0.6B-Base_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-TTS-12Hz-0.6B-Base_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-TTS-12Hz-0.6B-Base_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
ekwek/Soprano-1.1-80M
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-to-speech\", model=\"ekwek/Soprano-1.1-80M\")\n with open('ekwek_Soprano-1.1-80M_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ekwek_Soprano-1.1-80M_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ekwek_Soprano-1.1-80M_0.txt|ekwek_Soprano-1.1-80M_0.txt>',\n )\n\n with open('ekwek_Soprano-1.1-80M_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-to-speech\", model=\"ekwek/Soprano-1.1-80M\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ekwek_Soprano-1.1-80M_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ekwek_Soprano-1.1-80M_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"ekwek/Soprano-1.1-80M\")\n model = AutoModelForCausalLM.from_pretrained(\"ekwek/Soprano-1.1-80M\")\n with open('ekwek_Soprano-1.1-80M_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ekwek_Soprano-1.1-80M_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ekwek_Soprano-1.1-80M_1.txt|ekwek_Soprano-1.1-80M_1.txt>',\n )\n\n with open('ekwek_Soprano-1.1-80M_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"ekwek/Soprano-1.1-80M\")\nmodel = AutoModelForCausalLM.from_pretrained(\"ekwek/Soprano-1.1-80M\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ekwek_Soprano-1.1-80M_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ekwek_Soprano-1.1-80M_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ekwek_Soprano-1.1-80M_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ekwek_Soprano-1.1-80M_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ekwek_Soprano-1.1-80M_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ekwek_Soprano-1.1-80M_1.txt"
] | 0.19
|
fal/Qwen-Image-Edit-2511-Multiple-Angles-LoRA
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2511\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"fal/Qwen-Image-Edit-2511-Multiple-Angles-LoRA\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('fal_Qwen-Image-Edit-2511-Multiple-Angles-LoRA_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in fal_Qwen-Image-Edit-2511-Multiple-Angles-LoRA_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/fal_Qwen-Image-Edit-2511-Multiple-Angles-LoRA_0.txt|fal_Qwen-Image-Edit-2511-Multiple-Angles-LoRA_0.txt>',\n )\n\n with open('fal_Qwen-Image-Edit-2511-Multiple-Angles-LoRA_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2511\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"fal/Qwen-Image-Edit-2511-Multiple-Angles-LoRA\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='fal_Qwen-Image-Edit-2511-Multiple-Angles-LoRA_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='fal_Qwen-Image-Edit-2511-Multiple-Angles-LoRA_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/fal_Qwen-Image-Edit-2511-Multiple-Angles-LoRA_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/fal_Qwen-Image-Edit-2511-Multiple-Angles-LoRA_0.txt"
] | 0
|
Lightricks/LTX-2
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image, export_to_video\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Lightricks/LTX-2\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.to(\"cuda\")\n \n prompt = \"A man with short gray hair plays a red electric guitar.\"\n image = load_image(\n \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png\"\n )\n \n output = pipe(image=image, prompt=prompt).frames[0]\n export_to_video(output, \"output.mp4\")\n with open('Lightricks_LTX-2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Lightricks_LTX-2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Lightricks_LTX-2_0.txt|Lightricks_LTX-2_0.txt>',\n )\n\n with open('Lightricks_LTX-2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image, export_to_video\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Lightricks/LTX-2\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.to(\"cuda\")\n\nprompt = \"A man with short gray hair plays a red electric guitar.\"\nimage = load_image(\n \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png\"\n)\n\noutput = pipe(image=image, prompt=prompt).frames[0]\nexport_to_video(output, \"output.mp4\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Lightricks_LTX-2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Lightricks_LTX-2_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Lightricks_LTX-2_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Lightricks_LTX-2_0.txt"
] | 0
|
google/translategemma-12b-it
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('google_translategemma-12b-it_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_translategemma-12b-it_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_translategemma-12b-it_0.txt|google_translategemma-12b-it_0.txt>',\n )\n\n with open('google_translategemma-12b-it_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_translategemma-12b-it_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_translategemma-12b-it_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"google/translategemma-12b-it\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('google_translategemma-12b-it_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_translategemma-12b-it_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_translategemma-12b-it_1.txt|google_translategemma-12b-it_1.txt>',\n )\n\n with open('google_translategemma-12b-it_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"google/translategemma-12b-it\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_translategemma-12b-it_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_translategemma-12b-it_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForImageTextToText\n \n processor = AutoProcessor.from_pretrained(\"google/translategemma-12b-it\")\n model = AutoModelForImageTextToText.from_pretrained(\"google/translategemma-12b-it\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('google_translategemma-12b-it_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_translategemma-12b-it_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_translategemma-12b-it_2.txt|google_translategemma-12b-it_2.txt>',\n )\n\n with open('google_translategemma-12b-it_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForImageTextToText\n\nprocessor = AutoProcessor.from_pretrained(\"google/translategemma-12b-it\")\nmodel = AutoModelForImageTextToText.from_pretrained(\"google/translategemma-12b-it\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_translategemma-12b-it_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_translategemma-12b-it_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_translategemma-12b-it_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_translategemma-12b-it_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_translategemma-12b-it_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_translategemma-12b-it_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_translategemma-12b-it_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_translategemma-12b-it_2.txt"
] | 31.95
|
Overworld/Waypoint-1-Small
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Overworld/Waypoint-1-Small\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('Overworld_Waypoint-1-Small_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Overworld_Waypoint-1-Small_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Overworld_Waypoint-1-Small_0.txt|Overworld_Waypoint-1-Small_0.txt>',\n )\n\n with open('Overworld_Waypoint-1-Small_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Overworld/Waypoint-1-Small\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Overworld_Waypoint-1-Small_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Overworld_Waypoint-1-Small_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Overworld_Waypoint-1-Small_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Overworld_Waypoint-1-Small_0.txt"
] | 0
|
Qwen/Qwen3-TTS-12Hz-0.6B-CustomVoice
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Qwen_Qwen3-TTS-12Hz-0.6B-CustomVoice_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-TTS-12Hz-0.6B-CustomVoice_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-TTS-12Hz-0.6B-CustomVoice_0.txt|Qwen_Qwen3-TTS-12Hz-0.6B-CustomVoice_0.txt>',\n )\n\n with open('Qwen_Qwen3-TTS-12Hz-0.6B-CustomVoice_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-TTS-12Hz-0.6B-CustomVoice_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-TTS-12Hz-0.6B-CustomVoice_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 2.19
|
microsoft/OptiMind-SFT
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"microsoft/OptiMind-SFT\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('microsoft_OptiMind-SFT_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in microsoft_OptiMind-SFT_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/microsoft_OptiMind-SFT_0.txt|microsoft_OptiMind-SFT_0.txt>',\n )\n\n with open('microsoft_OptiMind-SFT_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"microsoft/OptiMind-SFT\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='microsoft_OptiMind-SFT_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='microsoft_OptiMind-SFT_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"microsoft/OptiMind-SFT\")\n model = AutoModelForCausalLM.from_pretrained(\"microsoft/OptiMind-SFT\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('microsoft_OptiMind-SFT_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in microsoft_OptiMind-SFT_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/microsoft_OptiMind-SFT_1.txt|microsoft_OptiMind-SFT_1.txt>',\n )\n\n with open('microsoft_OptiMind-SFT_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"microsoft/OptiMind-SFT\")\nmodel = AutoModelForCausalLM.from_pretrained(\"microsoft/OptiMind-SFT\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='microsoft_OptiMind-SFT_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='microsoft_OptiMind-SFT_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/microsoft_OptiMind-SFT_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/microsoft_OptiMind-SFT_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/microsoft_OptiMind-SFT_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/microsoft_OptiMind-SFT_1.txt"
] | 50.64
|
stepfun-ai/Step-Audio-R1.1
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"stepfun-ai/Step-Audio-R1.1\", trust_remote_code=True, dtype=\"auto\")\n with open('stepfun-ai_Step-Audio-R1.1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stepfun-ai_Step-Audio-R1.1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stepfun-ai_Step-Audio-R1.1_0.txt|stepfun-ai_Step-Audio-R1.1_0.txt>',\n )\n\n with open('stepfun-ai_Step-Audio-R1.1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"stepfun-ai/Step-Audio-R1.1\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stepfun-ai_Step-Audio-R1.1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stepfun-ai_Step-Audio-R1.1_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stepfun-ai_Step-Audio-R1.1_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stepfun-ai_Step-Audio-R1.1_0.txt"
] | 81.09
|
google/translategemma-27b-it
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('google_translategemma-27b-it_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_translategemma-27b-it_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_translategemma-27b-it_0.txt|google_translategemma-27b-it_0.txt>',\n )\n\n with open('google_translategemma-27b-it_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_translategemma-27b-it_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_translategemma-27b-it_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"google/translategemma-27b-it\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('google_translategemma-27b-it_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_translategemma-27b-it_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_translategemma-27b-it_1.txt|google_translategemma-27b-it_1.txt>',\n )\n\n with open('google_translategemma-27b-it_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"google/translategemma-27b-it\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_translategemma-27b-it_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_translategemma-27b-it_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForImageTextToText\n \n processor = AutoProcessor.from_pretrained(\"google/translategemma-27b-it\")\n model = AutoModelForImageTextToText.from_pretrained(\"google/translategemma-27b-it\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('google_translategemma-27b-it_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_translategemma-27b-it_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_translategemma-27b-it_2.txt|google_translategemma-27b-it_2.txt>',\n )\n\n with open('google_translategemma-27b-it_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForImageTextToText\n\nprocessor = AutoProcessor.from_pretrained(\"google/translategemma-27b-it\")\nmodel = AutoModelForImageTextToText.from_pretrained(\"google/translategemma-27b-it\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_translategemma-27b-it_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_translategemma-27b-it_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_translategemma-27b-it_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_translategemma-27b-it_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_translategemma-27b-it_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_translategemma-27b-it_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_translategemma-27b-it_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_translategemma-27b-it_2.txt"
] | 69.84
|
RuneXX/LTX-2-Workflows
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('RuneXX_LTX-2-Workflows_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in RuneXX_LTX-2-Workflows_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/RuneXX_LTX-2-Workflows_0.txt|RuneXX_LTX-2-Workflows_0.txt>',\n )\n\n with open('RuneXX_LTX-2-Workflows_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='RuneXX_LTX-2-Workflows_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='RuneXX_LTX-2-Workflows_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
ByteDance-Seed/Stable-DiffCoder-8B-Instruct
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"ByteDance-Seed/Stable-DiffCoder-8B-Instruct\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('ByteDance-Seed_Stable-DiffCoder-8B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ByteDance-Seed_Stable-DiffCoder-8B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ByteDance-Seed_Stable-DiffCoder-8B-Instruct_0.txt|ByteDance-Seed_Stable-DiffCoder-8B-Instruct_0.txt>',\n )\n\n with open('ByteDance-Seed_Stable-DiffCoder-8B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"ByteDance-Seed/Stable-DiffCoder-8B-Instruct\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ByteDance-Seed_Stable-DiffCoder-8B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ByteDance-Seed_Stable-DiffCoder-8B-Instruct_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"ByteDance-Seed/Stable-DiffCoder-8B-Instruct\", trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"ByteDance-Seed/Stable-DiffCoder-8B-Instruct\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('ByteDance-Seed_Stable-DiffCoder-8B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ByteDance-Seed_Stable-DiffCoder-8B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ByteDance-Seed_Stable-DiffCoder-8B-Instruct_1.txt|ByteDance-Seed_Stable-DiffCoder-8B-Instruct_1.txt>',\n )\n\n with open('ByteDance-Seed_Stable-DiffCoder-8B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"ByteDance-Seed/Stable-DiffCoder-8B-Instruct\", trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained(\"ByteDance-Seed/Stable-DiffCoder-8B-Instruct\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ByteDance-Seed_Stable-DiffCoder-8B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ByteDance-Seed_Stable-DiffCoder-8B-Instruct_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ByteDance-Seed_Stable-DiffCoder-8B-Instruct_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ByteDance-Seed_Stable-DiffCoder-8B-Instruct_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ByteDance-Seed_Stable-DiffCoder-8B-Instruct_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ByteDance-Seed_Stable-DiffCoder-8B-Instruct_1.txt"
] | 19.98
|
Tongyi-MAI/Z-Image-Turbo
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Tongyi-MAI/Z-Image-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('Tongyi-MAI_Z-Image-Turbo_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Tongyi-MAI_Z-Image-Turbo_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Tongyi-MAI_Z-Image-Turbo_0.txt|Tongyi-MAI_Z-Image-Turbo_0.txt>',\n )\n\n with open('Tongyi-MAI_Z-Image-Turbo_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Tongyi-MAI/Z-Image-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Tongyi-MAI_Z-Image-Turbo_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Tongyi-MAI_Z-Image-Turbo_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Tongyi-MAI_Z-Image-Turbo_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Tongyi-MAI_Z-Image-Turbo_0.txt"
] | 0
|
openbmb/AgentCPM-Explore
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"openbmb/AgentCPM-Explore\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('openbmb_AgentCPM-Explore_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openbmb_AgentCPM-Explore_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openbmb_AgentCPM-Explore_0.txt|openbmb_AgentCPM-Explore_0.txt>',\n )\n\n with open('openbmb_AgentCPM-Explore_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"openbmb/AgentCPM-Explore\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openbmb_AgentCPM-Explore_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openbmb_AgentCPM-Explore_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"openbmb/AgentCPM-Explore\")\n model = AutoModelForCausalLM.from_pretrained(\"openbmb/AgentCPM-Explore\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('openbmb_AgentCPM-Explore_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openbmb_AgentCPM-Explore_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openbmb_AgentCPM-Explore_1.txt|openbmb_AgentCPM-Explore_1.txt>',\n )\n\n with open('openbmb_AgentCPM-Explore_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"openbmb/AgentCPM-Explore\")\nmodel = AutoModelForCausalLM.from_pretrained(\"openbmb/AgentCPM-Explore\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openbmb_AgentCPM-Explore_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openbmb_AgentCPM-Explore_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openbmb_AgentCPM-Explore_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openbmb_AgentCPM-Explore_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openbmb_AgentCPM-Explore_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openbmb_AgentCPM-Explore_1.txt"
] | 9.74
|
YatharthS/LuxTTS
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('YatharthS_LuxTTS_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in YatharthS_LuxTTS_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/YatharthS_LuxTTS_0.txt|YatharthS_LuxTTS_0.txt>',\n )\n\n with open('YatharthS_LuxTTS_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='YatharthS_LuxTTS_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='YatharthS_LuxTTS_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
briaai/Fibo-Edit
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('briaai_Fibo-Edit_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in briaai_Fibo-Edit_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/briaai_Fibo-Edit_0.txt|briaai_Fibo-Edit_0.txt>',\n )\n\n with open('briaai_Fibo-Edit_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='briaai_Fibo-Edit_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='briaai_Fibo-Edit_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"briaai/Fibo-Edit\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('briaai_Fibo-Edit_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in briaai_Fibo-Edit_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/briaai_Fibo-Edit_1.txt|briaai_Fibo-Edit_1.txt>',\n )\n\n with open('briaai_Fibo-Edit_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"briaai/Fibo-Edit\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='briaai_Fibo-Edit_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='briaai_Fibo-Edit_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/briaai_Fibo-Edit_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/briaai_Fibo-Edit_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/briaai_Fibo-Edit_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/briaai_Fibo-Edit_1.txt"
] | 0
|
LiquidAI/LFM2.5-1.2B-Instruct
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"LiquidAI/LFM2.5-1.2B-Instruct\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('LiquidAI_LFM2.5-1.2B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in LiquidAI_LFM2.5-1.2B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/LiquidAI_LFM2.5-1.2B-Instruct_0.txt|LiquidAI_LFM2.5-1.2B-Instruct_0.txt>',\n )\n\n with open('LiquidAI_LFM2.5-1.2B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"LiquidAI/LFM2.5-1.2B-Instruct\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='LiquidAI_LFM2.5-1.2B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='LiquidAI_LFM2.5-1.2B-Instruct_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"LiquidAI/LFM2.5-1.2B-Instruct\")\n model = AutoModelForCausalLM.from_pretrained(\"LiquidAI/LFM2.5-1.2B-Instruct\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('LiquidAI_LFM2.5-1.2B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in LiquidAI_LFM2.5-1.2B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/LiquidAI_LFM2.5-1.2B-Instruct_1.txt|LiquidAI_LFM2.5-1.2B-Instruct_1.txt>',\n )\n\n with open('LiquidAI_LFM2.5-1.2B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"LiquidAI/LFM2.5-1.2B-Instruct\")\nmodel = AutoModelForCausalLM.from_pretrained(\"LiquidAI/LFM2.5-1.2B-Instruct\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='LiquidAI_LFM2.5-1.2B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='LiquidAI_LFM2.5-1.2B-Instruct_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/LiquidAI_LFM2.5-1.2B-Instruct_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/LiquidAI_LFM2.5-1.2B-Instruct_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/LiquidAI_LFM2.5-1.2B-Instruct_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/LiquidAI_LFM2.5-1.2B-Instruct_1.txt"
] | 2.83
|
MiniMaxAI/MiniMax-M2.1
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"MiniMaxAI/MiniMax-M2.1\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('MiniMaxAI_MiniMax-M2.1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in MiniMaxAI_MiniMax-M2.1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/MiniMaxAI_MiniMax-M2.1_0.txt|MiniMaxAI_MiniMax-M2.1_0.txt>',\n )\n\n with open('MiniMaxAI_MiniMax-M2.1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"MiniMaxAI/MiniMax-M2.1\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='MiniMaxAI_MiniMax-M2.1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='MiniMaxAI_MiniMax-M2.1_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"MiniMaxAI/MiniMax-M2.1\", trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"MiniMaxAI/MiniMax-M2.1\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('MiniMaxAI_MiniMax-M2.1_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in MiniMaxAI_MiniMax-M2.1_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/MiniMaxAI_MiniMax-M2.1_1.txt|MiniMaxAI_MiniMax-M2.1_1.txt>',\n )\n\n with open('MiniMaxAI_MiniMax-M2.1_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"MiniMaxAI/MiniMax-M2.1\", trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained(\"MiniMaxAI/MiniMax-M2.1\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='MiniMaxAI_MiniMax-M2.1_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='MiniMaxAI_MiniMax-M2.1_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/MiniMaxAI_MiniMax-M2.1_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/MiniMaxAI_MiniMax-M2.1_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/MiniMaxAI_MiniMax-M2.1_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/MiniMaxAI_MiniMax-M2.1_1.txt"
] | 1,107.58
|
facebook/sam3
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('facebook_sam3_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam3_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam3_0.txt|facebook_sam3_0.txt>',\n )\n\n with open('facebook_sam3_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam3_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam3_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"mask-generation\", model=\"facebook/sam3\")\n with open('facebook_sam3_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam3_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam3_1.txt|facebook_sam3_1.txt>',\n )\n\n with open('facebook_sam3_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"mask-generation\", model=\"facebook/sam3\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam3_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam3_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModel\n \n tokenizer = AutoTokenizer.from_pretrained(\"facebook/sam3\")\n model = AutoModel.from_pretrained(\"facebook/sam3\")\n with open('facebook_sam3_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam3_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam3_2.txt|facebook_sam3_2.txt>',\n )\n\n with open('facebook_sam3_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModel\n\ntokenizer = AutoTokenizer.from_pretrained(\"facebook/sam3\")\nmodel = AutoModel.from_pretrained(\"facebook/sam3\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam3_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam3_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_sam3_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_sam3_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_sam3_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_sam3_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_sam3_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_sam3_2.txt"
] | 4.16
|
Linum-AI/linum-v2-720p
|
[] |
[] |
[] | 0
|
QuantFunc/Nunchaku-Qwen-Image-EDIT-2511
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"QuantFunc/Nunchaku-Qwen-Image-EDIT-2511\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('QuantFunc_Nunchaku-Qwen-Image-EDIT-2511_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in QuantFunc_Nunchaku-Qwen-Image-EDIT-2511_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/QuantFunc_Nunchaku-Qwen-Image-EDIT-2511_0.txt|QuantFunc_Nunchaku-Qwen-Image-EDIT-2511_0.txt>',\n )\n\n with open('QuantFunc_Nunchaku-Qwen-Image-EDIT-2511_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"QuantFunc/Nunchaku-Qwen-Image-EDIT-2511\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='QuantFunc_Nunchaku-Qwen-Image-EDIT-2511_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='QuantFunc_Nunchaku-Qwen-Image-EDIT-2511_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/QuantFunc_Nunchaku-Qwen-Image-EDIT-2511_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/QuantFunc_Nunchaku-Qwen-Image-EDIT-2511_0.txt"
] | 0
|
huihui-ai/Huihui-GLM-4.7-Flash-abliterated
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"huihui-ai/Huihui-GLM-4.7-Flash-abliterated\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('huihui-ai_Huihui-GLM-4.7-Flash-abliterated_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in huihui-ai_Huihui-GLM-4.7-Flash-abliterated_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/huihui-ai_Huihui-GLM-4.7-Flash-abliterated_0.txt|huihui-ai_Huihui-GLM-4.7-Flash-abliterated_0.txt>',\n )\n\n with open('huihui-ai_Huihui-GLM-4.7-Flash-abliterated_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"huihui-ai/Huihui-GLM-4.7-Flash-abliterated\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='huihui-ai_Huihui-GLM-4.7-Flash-abliterated_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='huihui-ai_Huihui-GLM-4.7-Flash-abliterated_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"huihui-ai/Huihui-GLM-4.7-Flash-abliterated\")\n model = AutoModelForCausalLM.from_pretrained(\"huihui-ai/Huihui-GLM-4.7-Flash-abliterated\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('huihui-ai_Huihui-GLM-4.7-Flash-abliterated_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in huihui-ai_Huihui-GLM-4.7-Flash-abliterated_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/huihui-ai_Huihui-GLM-4.7-Flash-abliterated_1.txt|huihui-ai_Huihui-GLM-4.7-Flash-abliterated_1.txt>',\n )\n\n with open('huihui-ai_Huihui-GLM-4.7-Flash-abliterated_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"huihui-ai/Huihui-GLM-4.7-Flash-abliterated\")\nmodel = AutoModelForCausalLM.from_pretrained(\"huihui-ai/Huihui-GLM-4.7-Flash-abliterated\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='huihui-ai_Huihui-GLM-4.7-Flash-abliterated_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='huihui-ai_Huihui-GLM-4.7-Flash-abliterated_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/huihui-ai_Huihui-GLM-4.7-Flash-abliterated_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/huihui-ai_Huihui-GLM-4.7-Flash-abliterated_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/huihui-ai_Huihui-GLM-4.7-Flash-abliterated_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/huihui-ai_Huihui-GLM-4.7-Flash-abliterated_1.txt"
] | 75.6
|
openbmb/VoxCPM1.5
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import soundfile as sf\n from voxcpm import VoxCPM\n \n model = VoxCPM.from_pretrained(\"openbmb/VoxCPM1.5\")\n \n wav = model.generate(\n text=\"VoxCPM is an innovative end-to-end TTS model from ModelBest, designed to generate highly expressive speech.\",\n prompt_wav_path=None, # optional: path to a prompt speech for voice cloning\n prompt_text=None, # optional: reference text\n cfg_value=2.0, # LM guidance on LocDiT, higher for better adherence to the prompt, but maybe worse\n inference_timesteps=10, # LocDiT inference timesteps, higher for better result, lower for fast speed\n normalize=True, # enable external TN tool\n denoise=True, # enable external Denoise tool\n retry_badcase=True, # enable retrying mode for some bad cases (unstoppable)\n retry_badcase_max_times=3, # maximum retrying times\n retry_badcase_ratio_threshold=6.0, # maximum length restriction for bad case detection (simple but effective), it could be adjusted for slow pace speech\n )\n \n sf.write(\"output.wav\", wav, 16000)\n print(\"saved: output.wav\")\n with open('openbmb_VoxCPM1.5_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openbmb_VoxCPM1.5_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openbmb_VoxCPM1.5_0.txt|openbmb_VoxCPM1.5_0.txt>',\n )\n\n with open('openbmb_VoxCPM1.5_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport soundfile as sf\nfrom voxcpm import VoxCPM\n\nmodel = VoxCPM.from_pretrained(\"openbmb/VoxCPM1.5\")\n\nwav = model.generate(\n text=\"VoxCPM is an innovative end-to-end TTS model from ModelBest, designed to generate highly expressive speech.\",\n prompt_wav_path=None, # optional: path to a prompt speech for voice cloning\n prompt_text=None, # optional: reference text\n cfg_value=2.0, # LM guidance on LocDiT, higher for better adherence to the prompt, but maybe worse\n inference_timesteps=10, # LocDiT inference timesteps, higher for better result, lower for fast speed\n normalize=True, # enable external TN tool\n denoise=True, # enable external Denoise tool\n retry_badcase=True, # enable retrying mode for some bad cases (unstoppable)\n retry_badcase_max_times=3, # maximum retrying times\n retry_badcase_ratio_threshold=6.0, # maximum length restriction for bad case detection (simple but effective), it could be adjusted for slow pace speech\n)\n\nsf.write(\"output.wav\", wav, 16000)\nprint(\"saved: output.wav\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openbmb_VoxCPM1.5_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openbmb_VoxCPM1.5_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openbmb_VoxCPM1.5_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openbmb_VoxCPM1.5_0.txt"
] | 0
|
deepseek-ai/DeepSeek-V3.2
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('deepseek-ai_DeepSeek-V3.2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2_0.txt|deepseek-ai_DeepSeek-V3.2_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2\", dtype=\"auto\")\n with open('deepseek-ai_DeepSeek-V3.2_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2_1.txt|deepseek-ai_DeepSeek-V3.2_1.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2_1.txt"
] | 1,659.65
|
mlx-community/GLM-4.7-Flash-4bit
|
[] |
[] |
[] | 72.51
|
Qwen/Qwen-Image-Edit-2511
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2511\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('Qwen_Qwen-Image-Edit-2511_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen-Image-Edit-2511_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen-Image-Edit-2511_0.txt|Qwen_Qwen-Image-Edit-2511_0.txt>',\n )\n\n with open('Qwen_Qwen-Image-Edit-2511_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2511\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen-Image-Edit-2511_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen-Image-Edit-2511_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen-Image-Edit-2511_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen-Image-Edit-2511_0.txt"
] | 0
|
Supertone/supertonic-2
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from supertonic import TTS\n \n tts = TTS(auto_download=True)\n \n style = tts.get_voice_style(voice_name=\"M1\")\n \n text = \"The train delay was announced at 4:45 PM on Wed, Apr 3, 2024 due to track maintenance.\"\n wav, duration = tts.synthesize(text, voice_style=style)\n \n tts.save_audio(wav, \"output.wav\")\n with open('Supertone_supertonic-2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Supertone_supertonic-2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Supertone_supertonic-2_0.txt|Supertone_supertonic-2_0.txt>',\n )\n\n with open('Supertone_supertonic-2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom supertonic import TTS\n\ntts = TTS(auto_download=True)\n\nstyle = tts.get_voice_style(voice_name=\"M1\")\n\ntext = \"The train delay was announced at 4:45 PM on Wed, Apr 3, 2024 due to track maintenance.\"\nwav, duration = tts.synthesize(text, voice_style=style)\n\ntts.save_audio(wav, \"output.wav\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Supertone_supertonic-2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Supertone_supertonic-2_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Supertone_supertonic-2_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Supertone_supertonic-2_0.txt"
] | 0
|
QuantFunc/Nunchaku-Qwen-Image-2512
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"QuantFunc/Nunchaku-Qwen-Image-2512\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('QuantFunc_Nunchaku-Qwen-Image-2512_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in QuantFunc_Nunchaku-Qwen-Image-2512_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/QuantFunc_Nunchaku-Qwen-Image-2512_0.txt|QuantFunc_Nunchaku-Qwen-Image-2512_0.txt>',\n )\n\n with open('QuantFunc_Nunchaku-Qwen-Image-2512_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"QuantFunc/Nunchaku-Qwen-Image-2512\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='QuantFunc_Nunchaku-Qwen-Image-2512_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='QuantFunc_Nunchaku-Qwen-Image-2512_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/QuantFunc_Nunchaku-Qwen-Image-2512_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/QuantFunc_Nunchaku-Qwen-Image-2512_0.txt"
] | 0
|
GadflyII/GLM-4.7-Flash-NVFP4
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"GadflyII/GLM-4.7-Flash-NVFP4\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('GadflyII_GLM-4.7-Flash-NVFP4_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in GadflyII_GLM-4.7-Flash-NVFP4_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/GadflyII_GLM-4.7-Flash-NVFP4_0.txt|GadflyII_GLM-4.7-Flash-NVFP4_0.txt>',\n )\n\n with open('GadflyII_GLM-4.7-Flash-NVFP4_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"GadflyII/GLM-4.7-Flash-NVFP4\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='GadflyII_GLM-4.7-Flash-NVFP4_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='GadflyII_GLM-4.7-Flash-NVFP4_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"GadflyII/GLM-4.7-Flash-NVFP4\")\n model = AutoModelForCausalLM.from_pretrained(\"GadflyII/GLM-4.7-Flash-NVFP4\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('GadflyII_GLM-4.7-Flash-NVFP4_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in GadflyII_GLM-4.7-Flash-NVFP4_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/GadflyII_GLM-4.7-Flash-NVFP4_1.txt|GadflyII_GLM-4.7-Flash-NVFP4_1.txt>',\n )\n\n with open('GadflyII_GLM-4.7-Flash-NVFP4_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"GadflyII/GLM-4.7-Flash-NVFP4\")\nmodel = AutoModelForCausalLM.from_pretrained(\"GadflyII/GLM-4.7-Flash-NVFP4\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='GadflyII_GLM-4.7-Flash-NVFP4_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='GadflyII_GLM-4.7-Flash-NVFP4_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/GadflyII_GLM-4.7-Flash-NVFP4_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/GadflyII_GLM-4.7-Flash-NVFP4_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/GadflyII_GLM-4.7-Flash-NVFP4_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/GadflyII_GLM-4.7-Flash-NVFP4_1.txt"
] | 89.29
|
black-forest-labs/FLUX.2-dev
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('black-forest-labs_FLUX.2-dev_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.2-dev_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.2-dev_0.txt|black-forest-labs_FLUX.2-dev_0.txt>',\n )\n\n with open('black-forest-labs_FLUX.2-dev_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.2-dev_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.2-dev_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.2-dev\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('black-forest-labs_FLUX.2-dev_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.2-dev_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.2-dev_1.txt|black-forest-labs_FLUX.2-dev_1.txt>',\n )\n\n with open('black-forest-labs_FLUX.2-dev_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.2-dev\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.2-dev_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.2-dev_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.2-dev_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.2-dev_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.2-dev_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.2-dev_1.txt"
] | 0
|
Phr00t/LTX2-Rapid-Merges
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Phr00t_LTX2-Rapid-Merges_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Phr00t_LTX2-Rapid-Merges_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Phr00t_LTX2-Rapid-Merges_0.txt|Phr00t_LTX2-Rapid-Merges_0.txt>',\n )\n\n with open('Phr00t_LTX2-Rapid-Merges_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Phr00t_LTX2-Rapid-Merges_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Phr00t_LTX2-Rapid-Merges_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
LiquidAI/LFM2.5-Audio-1.5B
|
[] |
[] |
[] | 7.12
|
nvidia/nemotron-speech-streaming-en-0.6b
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import nemo.collections.asr as nemo_asr\n asr_model = nemo_asr.models.ASRModel.from_pretrained(\"nvidia/nemotron-speech-streaming-en-0.6b\")\n \n transcriptions = asr_model.transcribe([\"file.wav\"])\n with open('nvidia_nemotron-speech-streaming-en-0.6b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_nemotron-speech-streaming-en-0.6b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_nemotron-speech-streaming-en-0.6b_0.txt|nvidia_nemotron-speech-streaming-en-0.6b_0.txt>',\n )\n\n with open('nvidia_nemotron-speech-streaming-en-0.6b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport nemo.collections.asr as nemo_asr\nasr_model = nemo_asr.models.ASRModel.from_pretrained(\"nvidia/nemotron-speech-streaming-en-0.6b\")\n\ntranscriptions = asr_model.transcribe([\"file.wav\"])\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_nemotron-speech-streaming-en-0.6b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_nemotron-speech-streaming-en-0.6b_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_nemotron-speech-streaming-en-0.6b_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_nemotron-speech-streaming-en-0.6b_0.txt"
] | 0
|
Alissonerdx/BFS-Best-Face-Swap
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Alissonerdx_BFS-Best-Face-Swap_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Alissonerdx_BFS-Best-Face-Swap_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Alissonerdx_BFS-Best-Face-Swap_0.txt|Alissonerdx_BFS-Best-Face-Swap_0.txt>',\n )\n\n with open('Alissonerdx_BFS-Best-Face-Swap_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Alissonerdx_BFS-Best-Face-Swap_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Alissonerdx_BFS-Best-Face-Swap_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
cerebras/GLM-4.7-Flash-REAP-23B-A3B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"cerebras/GLM-4.7-Flash-REAP-23B-A3B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('cerebras_GLM-4.7-Flash-REAP-23B-A3B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in cerebras_GLM-4.7-Flash-REAP-23B-A3B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/cerebras_GLM-4.7-Flash-REAP-23B-A3B_0.txt|cerebras_GLM-4.7-Flash-REAP-23B-A3B_0.txt>',\n )\n\n with open('cerebras_GLM-4.7-Flash-REAP-23B-A3B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"cerebras/GLM-4.7-Flash-REAP-23B-A3B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='cerebras_GLM-4.7-Flash-REAP-23B-A3B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='cerebras_GLM-4.7-Flash-REAP-23B-A3B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"cerebras/GLM-4.7-Flash-REAP-23B-A3B\")\n model = AutoModelForCausalLM.from_pretrained(\"cerebras/GLM-4.7-Flash-REAP-23B-A3B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('cerebras_GLM-4.7-Flash-REAP-23B-A3B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in cerebras_GLM-4.7-Flash-REAP-23B-A3B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/cerebras_GLM-4.7-Flash-REAP-23B-A3B_1.txt|cerebras_GLM-4.7-Flash-REAP-23B-A3B_1.txt>',\n )\n\n with open('cerebras_GLM-4.7-Flash-REAP-23B-A3B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"cerebras/GLM-4.7-Flash-REAP-23B-A3B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"cerebras/GLM-4.7-Flash-REAP-23B-A3B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='cerebras_GLM-4.7-Flash-REAP-23B-A3B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='cerebras_GLM-4.7-Flash-REAP-23B-A3B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/cerebras_GLM-4.7-Flash-REAP-23B-A3B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/cerebras_GLM-4.7-Flash-REAP-23B-A3B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/cerebras_GLM-4.7-Flash-REAP-23B-A3B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/cerebras_GLM-4.7-Flash-REAP-23B-A3B_1.txt"
] | 111.37
|
Phr00t/Qwen-Image-Edit-Rapid-AIO
|
[] |
[] |
[] | 0
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 102