justinblalock87
commited on
Commit
·
8e97121
1
Parent(s):
0e43e61
Hf login
Browse files- __pycache__/app.cpython-38.pyc +0 -0
- __pycache__/quantize.cpython-38.pyc +0 -0
- app.py +3 -2
- quantize.py +0 -1
__pycache__/app.cpython-38.pyc
CHANGED
|
Binary files a/__pycache__/app.cpython-38.pyc and b/__pycache__/app.cpython-38.pyc differ
|
|
|
__pycache__/quantize.cpython-38.pyc
CHANGED
|
Binary files a/__pycache__/quantize.cpython-38.pyc and b/__pycache__/quantize.cpython-38.pyc differ
|
|
|
app.py
CHANGED
|
@@ -5,7 +5,7 @@ from typing import Optional
|
|
| 5 |
import gradio as gr
|
| 6 |
|
| 7 |
import quantize
|
| 8 |
-
from huggingface_hub import HfApi, Repository
|
| 9 |
|
| 10 |
|
| 11 |
DATASET_REPO_URL = "https://huggingface.co/datasets/safetensors/conversions"
|
|
@@ -27,6 +27,7 @@ def run(model_id: str, is_private: bool, token: Optional[str] = None) -> str:
|
|
| 27 |
|
| 28 |
Please fill a token and model_id.
|
| 29 |
"""
|
|
|
|
| 30 |
try:
|
| 31 |
if is_private:
|
| 32 |
api = HfApi(token=token)
|
|
@@ -74,7 +75,7 @@ title="Quantize model and convert to CoreML"
|
|
| 74 |
allow_flagging="never"
|
| 75 |
|
| 76 |
def token_text(visible=False):
|
| 77 |
-
return gr.Text(max_lines=1, label="your_hf_token", visible=True
|
| 78 |
|
| 79 |
with gr.Blocks(title=title) as demo:
|
| 80 |
description = gr.Markdown(f"""# {title}""")
|
|
|
|
| 5 |
import gradio as gr
|
| 6 |
|
| 7 |
import quantize
|
| 8 |
+
from huggingface_hub import HfApi, Repository, login
|
| 9 |
|
| 10 |
|
| 11 |
DATASET_REPO_URL = "https://huggingface.co/datasets/safetensors/conversions"
|
|
|
|
| 27 |
|
| 28 |
Please fill a token and model_id.
|
| 29 |
"""
|
| 30 |
+
login(token=token)
|
| 31 |
try:
|
| 32 |
if is_private:
|
| 33 |
api = HfApi(token=token)
|
|
|
|
| 75 |
allow_flagging="never"
|
| 76 |
|
| 77 |
def token_text(visible=False):
|
| 78 |
+
return gr.Text(max_lines=1, label="your_hf_token", visible=True)
|
| 79 |
|
| 80 |
with gr.Blocks(title=title) as demo:
|
| 81 |
description = gr.Markdown(f"""# {title}""")
|
quantize.py
CHANGED
|
@@ -28,7 +28,6 @@ def convert_generic(
|
|
| 28 |
# --bundle-resources-for-swift-cli \
|
| 29 |
# --quantize-nbits 2
|
| 30 |
|
| 31 |
-
|
| 32 |
print("Starting conversion") #
|
| 33 |
|
| 34 |
subprocess.run(["python3", "-m" , "python_coreml_stable_diffusion.torch2coreml", "--model-version", "stabilityai/sd-turbo", "-o", folder, "--convert-unet", "--convert-text-encoder", "--convert-vae-decoder", "--chunk-unet", "--attention-implementation", "ORIGINAL", "--bundle-resources-for-swift-cli"])
|
|
|
|
| 28 |
# --bundle-resources-for-swift-cli \
|
| 29 |
# --quantize-nbits 2
|
| 30 |
|
|
|
|
| 31 |
print("Starting conversion") #
|
| 32 |
|
| 33 |
subprocess.run(["python3", "-m" , "python_coreml_stable_diffusion.torch2coreml", "--model-version", "stabilityai/sd-turbo", "-o", folder, "--convert-unet", "--convert-text-encoder", "--convert-vae-decoder", "--chunk-unet", "--attention-implementation", "ORIGINAL", "--bundle-resources-for-swift-cli"])
|