| | import requests |
| | import io |
| | from PIL import Image |
| | import os |
| | from dotenv import load_dotenv |
| |
|
| | |
| | load_dotenv() |
| |
|
| | |
| | HF_TOKEN = os.getenv("HF_TOKEN") |
| |
|
| | |
| | API_BASE = "http://localhost:8000" |
| |
|
| | def text_to_image(prompt=None, model=None, negative_prompt=None, guidance_scale=None, num_inference_steps=None): |
| | """ |
| | Generate image from text using the API |
| | All parameters are optional and will use server defaults if not provided |
| | """ |
| | url = f"{API_BASE}/text-to-image" |
| | |
| | |
| | |
| | data = {} |
| | |
| | if prompt is not None: |
| | data["prompt"] = prompt |
| | |
| | if model is not None: |
| | data["model"] = model |
| | |
| | if negative_prompt is not None: |
| | data["negative_prompt"] = negative_prompt |
| | |
| | if guidance_scale is not None: |
| | data["guidance_scale"] = guidance_scale |
| | |
| | if num_inference_steps is not None: |
| | data["num_inference_steps"] = num_inference_steps |
| | |
| | |
| | response = requests.post(url, data=data) |
| | |
| | if response.status_code == 200: |
| | |
| | image = Image.open(io.BytesIO(response.content)) |
| | return image |
| | else: |
| | print(f"Error: {response.status_code}") |
| | print(response.text) |
| | return None |
| |
|
| | def image_to_image(image_path, prompt=None, model=None, negative_prompt=None, |
| | guidance_scale=None, num_inference_steps=None, use_controlnet=False): |
| | """ |
| | Transform image using the API |
| | Only image_path is required, other parameters are optional and will use server defaults |
| | """ |
| | url = f"{API_BASE}/image-to-image" |
| | |
| | |
| | data = {} |
| | |
| | if prompt is not None: |
| | data["prompt"] = prompt |
| | |
| | if model is not None: |
| | data["model"] = model |
| | |
| | if negative_prompt is not None: |
| | data["negative_prompt"] = negative_prompt |
| | |
| | if guidance_scale is not None: |
| | data["guidance_scale"] = guidance_scale |
| | |
| | if num_inference_steps is not None: |
| | data["num_inference_steps"] = num_inference_steps |
| | |
| | if use_controlnet: |
| | data["use_controlnet"] = "True" |
| | |
| | |
| | files = { |
| | "image": open(image_path, "rb") |
| | } |
| | |
| | |
| | response = requests.post(url, data=data, files=files) |
| | |
| | if response.status_code == 200: |
| | |
| | image = Image.open(io.BytesIO(response.content)) |
| | return image |
| | else: |
| | print(f"Error: {response.status_code}") |
| | print(response.text) |
| | return None |
| |
|
| | if __name__ == "__main__": |
| | |
| | print("Text to Image example:") |
| | |
| | image = text_to_image() |
| | if image: |
| | image.save("text2img_output.png") |
| | print("Image saved as text2img_output.png") |
| | |
| | |
| | |
| | |
| | print("Image to Image example (requires an input image):") |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|