| | import os |
| | import shutil |
| | import torch |
| | import gradio as gr |
| | from huggingface_hub import HfApi, whoami, ModelCard, model_info |
| | from gradio_huggingfacehub_search import HuggingfaceHubSearch |
| | from textwrap import dedent |
| | from pathlib import Path |
| |
|
| | from tempfile import TemporaryDirectory |
| |
|
| | from huggingface_hub.file_download import repo_folder_name |
| | from optimum.intel.utils.constant import _TASK_ALIASES |
| | from optimum.exporters import TasksManager |
| |
|
| | from optimum.intel.utils.modeling_utils import _find_files_matching_pattern |
| | from optimum.intel import ( |
| | OVModelForAudioClassification, |
| | OVModelForCausalLM, |
| | OVModelForFeatureExtraction, |
| | OVModelForImageClassification, |
| | OVModelForMaskedLM, |
| | OVModelForQuestionAnswering, |
| | OVModelForSeq2SeqLM, |
| | OVModelForSequenceClassification, |
| | OVModelForTokenClassification, |
| | OVModelForPix2Struct, |
| | OVModelForVisualCausalLM, |
| | OVWeightQuantizationConfig, |
| | OVDiffusionPipeline, |
| | ) |
| | from diffusers import ConfigMixin |
| |
|
| | _HEAD_TO_AUTOMODELS = { |
| | "feature-extraction": "OVModelForFeatureExtraction", |
| | "fill-mask": "OVModelForMaskedLM", |
| | "text-generation": "OVModelForCausalLM", |
| | "text-classification": "OVModelForSequenceClassification", |
| | "token-classification": "OVModelForTokenClassification", |
| | "question-answering": "OVModelForQuestionAnswering", |
| | "image-classification": "OVModelForImageClassification", |
| | "audio-classification": "OVModelForAudioClassification", |
| | "image-text-to-text": "OVModelForVisualCausalLM" |
| | } |
| |
|
| |
|
| | def export(model_id: str, private_repo: bool, overwritte: bool, oauth_token: gr.OAuthToken): |
| | if oauth_token.token is None: |
| | return "You must be logged in to use this space" |
| |
|
| | if not model_id: |
| | return f"### Invalid input 🐞 Please specify a model name, got {model_id}" |
| |
|
| | try: |
| | model_name = model_id.split("/")[-1] |
| | username = whoami(oauth_token.token)["name"] |
| | new_repo_id = f"{username}/{model_name}-openvino" |
| | library_name = TasksManager.infer_library_from_model(model_id, token=oauth_token.token) |
| |
|
| | if library_name == "diffusers": |
| | auto_model_class = "OVDiffusionPipeline" |
| | elif library_name == "transformers": |
| | task = TasksManager.infer_task_from_model(model_id, token=oauth_token.token) |
| |
|
| | if task == "text2text-generation": |
| | return "Export of Seq2Seq models is currently disabled" |
| |
|
| | if task not in _HEAD_TO_AUTOMODELS: |
| | return f"The task '{task}' is not supported, only {_HEAD_TO_AUTOMODELS.keys()} tasks are supported" |
| |
|
| | auto_model_class = _HEAD_TO_AUTOMODELS[task] |
| | else: |
| | |
| | return f"Library {library_name} not yet supported" |
| |
|
| | ov_files = _find_files_matching_pattern( |
| | model_id, |
| | pattern=r"(.*)?openvino(.*)?\_model(.*)?.xml$", |
| | use_auth_token=oauth_token.token, |
| | ) |
| |
|
| | if len(ov_files) > 0: |
| | return f"Model {model_id} is already converted, skipping.." |
| |
|
| | api = HfApi(token=oauth_token.token) |
| | if api.repo_exists(new_repo_id) and not overwritte: |
| | return f"Model {new_repo_id} already exist, please tick the overwritte box to push on an existing repository" |
| |
|
| | with TemporaryDirectory() as d: |
| | folder = os.path.join(d, repo_folder_name(repo_id=model_id, repo_type="models")) |
| | os.makedirs(folder) |
| | try: |
| | api.snapshot_download(repo_id=model_id, local_dir=folder, allow_patterns=["*.json"]) |
| | ov_model = eval(auto_model_class).from_pretrained(model_id, export=True, cache_dir=folder, token=oauth_token.token) |
| | ov_model.save_pretrained(folder) |
| | new_repo_url = api.create_repo(repo_id=new_repo_id, exist_ok=True, private=private_repo) |
| | new_repo_id = new_repo_url.repo_id |
| | print("Repository created successfully!", new_repo_url) |
| |
|
| | folder = Path(folder) |
| | for dir_name in ( |
| | "", |
| | "vae_encoder", |
| | "vae_decoder", |
| | "text_encoder", |
| | "text_encoder_2", |
| | "unet", |
| | "tokenizer", |
| | "tokenizer_2", |
| | "scheduler", |
| | "feature_extractor", |
| | ): |
| | if not (folder / dir_name).is_dir(): |
| | continue |
| | for file_path in (folder / dir_name).iterdir(): |
| | if file_path.is_file(): |
| | try: |
| | api.upload_file( |
| | path_or_fileobj=file_path, |
| | path_in_repo=os.path.join(dir_name, file_path.name), |
| | repo_id=new_repo_id, |
| | ) |
| | except Exception as e: |
| | return f"Error uploading file {file_path}: {e}" |
| |
|
| | try: |
| | card = ModelCard.load(model_id, token=oauth_token.token) |
| | except: |
| | card = ModelCard("") |
| |
|
| | if card.data.tags is None: |
| | card.data.tags = [] |
| | card.data.tags.append("openvino") |
| | card.data.tags.append("openvino-export") |
| | card.data.base_model = model_id |
| |
|
| | pipeline_tag = getattr(model_info(model_id, token=oauth_token.token), "pipeline_tag", None) |
| | if pipeline_tag is not None: |
| | card.data.pipeline_tag = pipeline_tag |
| |
|
| | card.text = dedent( |
| | f""" |
| | This model was converted to OpenVINO from [`{model_id}`](https://huggingface.co/{model_id}) using [optimum-intel](https://github.com/huggingface/optimum-intel) |
| | via the [export](https://huggingface.co/spaces/echarlaix/openvino-export) space. |
| | |
| | First make sure you have optimum-intel installed: |
| | |
| | ```bash |
| | pip install optimum[openvino] |
| | ``` |
| | |
| | To load your model you can do as follows: |
| | |
| | ```python |
| | from optimum.intel import {auto_model_class} |
| | |
| | model_id = "{new_repo_id}" |
| | model = {auto_model_class}.from_pretrained(model_id) |
| | ``` |
| | """ |
| | ) |
| | card_path = os.path.join(folder, "README.md") |
| | card.save(card_path) |
| |
|
| | api.upload_file( |
| | path_or_fileobj=card_path, |
| | path_in_repo="README.md", |
| | repo_id=new_repo_id, |
| | ) |
| | return f"This model was successfully exported, find it under your repository {new_repo_url}" |
| | finally: |
| | shutil.rmtree(folder, ignore_errors=True) |
| | except Exception as e: |
| | return f"### Error: {e}" |
| |
|
| | DESCRIPTION = """ |
| | This Space uses [Optimum Intel](https://huggingface.co/docs/optimum/main/en/intel/openvino/export) to automatically export a model from the Hub to the [OpenVINO IR format](https://docs.openvino.ai/2024/documentation/openvino-ir-format.html). |
| | |
| | After conversion, a repository will be pushed under your namespace with the resulting model. |
| | |
| | The list of supported architectures can be found in the [documentation](https://huggingface.co/docs/optimum/main/en/intel/openvino/models). |
| | """ |
| |
|
| | model_id = HuggingfaceHubSearch( |
| | label="Hub Model ID", |
| | placeholder="Search for model ID on the hub", |
| | search_type="model", |
| | ) |
| | private_repo = gr.Checkbox( |
| | value=False, |
| | label="Private repository", |
| | info="Create a private repository instead of a public one", |
| | ) |
| | overwritte = gr.Checkbox( |
| | value=False, |
| | label="Overwrite repository content", |
| | info="Enable pushing files on existing repositories, potentially overwriting existing files", |
| | ) |
| | interface = gr.Interface( |
| | fn=export, |
| | inputs=[ |
| | model_id, |
| | private_repo, |
| | overwritte, |
| | ], |
| | outputs=[ |
| | gr.Markdown(label="output"), |
| | ], |
| | title="Export your model to OpenVINO", |
| | description=DESCRIPTION, |
| | api_name=False, |
| | ) |
| |
|
| | with gr.Blocks() as demo: |
| | gr.Markdown("You must be logged in to use this space") |
| | gr.LoginButton(min_width=250) |
| | interface.render() |
| |
|
| | demo.launch() |
| |
|