| | |
| | """finetune-utility-scripts.ipynb |
| | |
| | Automatically generated by Colab. |
| | |
| | Original file is located at |
| | https://colab.research.google.com/drive/14ZbhUPHtNt3EB0XunV_qN6OxWZHyU9wA |
| | """ |
| |
|
| | !pip install openai |
| |
|
| | import base64 |
| | import requests |
| |
|
| | api_key = "sk-proj-uCiflA45fuchFdjkbNJ7T3BlbkFJF5WiEf2zHkttr7s9kijX" |
| | prompt = """As an AI image tagging expert, please provide precise tags for |
| | these images to enhance CLIP model's understanding of the content. |
| | Employ succinct keywords or phrases, steering clear of elaborate |
| | sentences and extraneous conjunctions. Prioritize the tags by relevance. |
| | Your tags should capture key elements such as the main subject, setting, |
| | artistic style, composition, image quality, color tone, filter, and camera |
| | specifications, and any other tags crucial for the image. When tagging |
| | photos of people, include specific details like gender, nationality, |
| | attire, actions, pose, expressions, accessories, makeup, composition |
| | type, age, etc. For other image categories, apply appropriate and |
| | common descriptive tags as well. Recognize and tag any celebrities, |
| | well-known landmark or IPs if clearly featured in the image. |
| | Your tags should be accurate, non-duplicative, and within a |
| | 20-75 word count range. These tags will use for image re-creation, |
| | so the closer the resemblance to the original image, the better the |
| | tag quality. Tags should be comma-separated. Exceptional tagging will |
| | be rewarded with $10 per image. |
| | """ |
| |
|
| | def encode_image(image_path): |
| | with open(image_path, "rb") as image_file: |
| | return base64.b64encode(image_file.read()).decode('utf-8') |
| |
|
| | def create_openai_query(image_path): |
| | base64_image = encode_image(image_path) |
| | headers = { |
| | "Content-Type": "application/json", |
| | "Authorization": f"Bearer {api_key}" |
| | } |
| | payload = { |
| | "model": "gpt-4o", |
| | "messages": [ |
| | { |
| | "role": "user", |
| | "content": [ |
| | { |
| | "type": "text", |
| | "text": prompt |
| | }, |
| | { |
| | "type": "image_url", |
| | "image_url": { |
| | "url": f"data:image/jpeg;base64,{base64_image}" |
| | } |
| | } |
| | ] |
| | } |
| | ], |
| | "max_tokens": 300 |
| | } |
| |
|
| | response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload) |
| | output = response.json() |
| | print(output) |
| | return output['choices'][0]['message']['content'] |
| |
|
| | !rm -rf "/content/drive/MyDrive/Finetune-Dataset/Pexels_Caption" |
| |
|
| | import os |
| | os.mkdir("/content/drive/MyDrive/Finetune-Dataset/Pexels_Caption") |
| |
|
| | import os |
| | import time |
| |
|
| |
|
| | |
| | def process_images_in_folder(input_folder, output_folder, resume_from=None): |
| | os.makedirs(output_folder, exist_ok=True) |
| | image_files = [f for f in os.listdir(input_folder) if os.path.isfile(os.path.join(input_folder, f))] |
| |
|
| | |
| | processed_log = os.path.join(output_folder, "processed_log.txt") |
| | processed_images = set() |
| |
|
| | |
| | if os.path.exists(processed_log): |
| | with open(processed_log, 'r') as log_file: |
| | processed_images = {line.strip() for line in log_file.readlines()} |
| |
|
| | try: |
| | for image_file in image_files: |
| | if resume_from and image_file <= resume_from: |
| | continue |
| |
|
| | image_path = os.path.join(input_folder, image_file) |
| |
|
| | |
| | if image_file in processed_images: |
| | print(f"Skipping {image_file} as it is already processed.") |
| | continue |
| |
|
| | try: |
| | processed_output = create_openai_query(image_path) |
| | except Exception as e: |
| | print(f"Error processing {image_file}: {str(e)}") |
| | processed_output = "" |
| |
|
| | output_file_path = os.path.join(output_folder, f"{os.path.splitext(image_file)[0]}.txt") |
| |
|
| | with open(output_file_path, 'w') as f: |
| | f.write(processed_output) |
| |
|
| | |
| | with open(processed_log, 'a') as log_file: |
| | log_file.write(f"{image_file}\n") |
| |
|
| | print(f"Processed {image_file} and saved result to {output_file_path}") |
| |
|
| | except Exception as e: |
| | print(f"Error occurred: {str(e)}. Resuming might not be possible.") |
| | return |
| |
|
| | if __name__ == "__main__": |
| | input_folder = "/content/drive/MyDrive/inference-images/inference-images/caimera" |
| | output_folder = "/content/drive/MyDrive/inference-images/caimera_captions" |
| |
|
| | |
| | resume_from = None |
| |
|
| | process_images_in_folder(input_folder, output_folder, resume_from) |
| |
|
| | import os |
| | import shutil |
| |
|
| | def move_json_files(source_folder, destination_folder): |
| | |
| | if not os.path.exists(destination_folder): |
| | os.makedirs(destination_folder) |
| |
|
| | |
| | for file_name in os.listdir(source_folder): |
| | if file_name.endswith('.png'): |
| | source_file = os.path.join(source_folder, file_name) |
| | destination_file = os.path.join(destination_folder, file_name) |
| | try: |
| | shutil.move(source_file, destination_file) |
| | print(f"Moved {file_name} to {destination_folder}") |
| | except Exception as e: |
| | print(f"Failed to move {file_name}: {e}") |
| |
|
| | |
| | source_folder = "/content/drive/MyDrive/inference-images/inference-images/1683/saved" |
| | destination_folder = "/content/drive/MyDrive/inference-images/inference-images/caimera" |
| |
|
| | move_json_files(source_folder, destination_folder) |
| |
|
| | os.mkdir('/content/drive/MyDrive/kohya_finetune_data') |
| |
|
| | import os |
| | import shutil |
| |
|
| | def merge_folders(folder_paths, destination_folder): |
| | if not os.path.exists(destination_folder): |
| | os.makedirs(destination_folder) |
| | for folder_path in folder_paths: |
| | for filename in os.listdir(folder_path): |
| | source_file = os.path.join(folder_path, filename) |
| | destination_file = os.path.join(destination_folder, filename) |
| | if os.path.exists(destination_file): |
| | base, extension = os.path.splitext(filename) |
| | count = 1 |
| | while os.path.exists(os.path.join(destination_folder, f"{base}_{count}{extension}")): |
| | count += 1 |
| | destination_file = os.path.join(destination_folder, f"{base}_{count}{extension}") |
| | shutil.copy2(source_file, destination_file) |
| | print(f"Copied {source_file} to {destination_file}") |
| |
|
| | if __name__ == "__main__": |
| | |
| | folder1 = '/content/drive/MyDrive/inference-images/caimera_captions' |
| | folder2 = '/content/drive/MyDrive/inference-images/inference-images/caimera' |
| | folder3 = '/content/drive/MyDrive/Finetune-Dataset/Burst' |
| | folder4 = '/content/drive/MyDrive/Finetune-Dataset/Burst_Caption' |
| | folder5 = '/content/drive/MyDrive/Finetune-Dataset/Pexels' |
| | folder6 = '/content/drive/MyDrive/Finetune-Dataset/Pexels_Caption' |
| | destination = '/content/drive/MyDrive/kohya_finetune_data' |
| |
|
| | folders_to_merge = [folder1, folder2, folder3, folder4, folder5, folder6] |
| | merge_folders(folders_to_merge, destination) |
| |
|
| |
|