Keras
File size: 7,817 Bytes
98ea101
1
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"gpuType":"T4","authorship_tag":"ABX9TyMPsuOBY6O1+zEj7zuQZNol"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","source":["# Run Deepanel"],"metadata":{"id":"0-p1_2Rr-AIv"}},{"cell_type":"code","source":["#@markdown **Cell 1: Install Dependencies (run this first in a new session)**\n","\n","\n","!pip install -q tensorflow opencv-python-headless numpy tqdm datasets huggingface_hub\n","\n","print(\"βœ… All dependencies installed for this fresh notebook session\")"],"metadata":{"cellView":"form","id":"G2tglru299y9"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@markdown **Cell 2: Mount Google Drive**\n","\n","\n","force_remount = True #@param {type:\"boolean\"}\n","\n","from google.colab import drive\n","drive.mount('/content/drive', force_remount=force_remount)"],"metadata":{"cellView":"form","id":"6JxaEkhQ-OmD"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@markdown **Cell 3: Unzip comics.zip**\n","\n","path = '/content/drive/MyDrive/comics.zip' #@param {type:\"string\"}\n","quiet_unzip = False #@param {type:\"boolean\"}\n","manual_upload_zip = True #@param {type:\"boolean\"}\n","\n","import os\n","import shutil\n","import ipywidgets as widgets\n","from IPython.display import display, HTML\n","from google.colab import files\n","\n","# Define the target directory for unzipping\n","target_dir = '/content/images_to_extract'\n","\n","# Clear the target directory if it exists\n","if os.path.exists(target_dir):\n","    shutil.rmtree(target_dir)\n","os.makedirs(target_dir, exist_ok=True)\n","\n","%cd /content/\n","unzip_flag = \"-q\" if quiet_unzip else \"\"\n","\n","if manual_upload_zip:\n","    print(\"Please upload your comics.zip file:\")\n","    uploaded = files.upload()\n","    if uploaded:\n","        uploaded_filename = list(uploaded.keys())[0]\n","        print(f\"Uploaded file: {uploaded_filename}\")\n","        !unzip {unzip_flag} \"{uploaded_filename}\" -d \"{target_dir}\"\n","        os.remove(uploaded_filename) # Clean up the uploaded zip file\n","    else:\n","        print(\"No file uploaded. Using default path from Google Drive.\")\n","        !unzip {unzip_flag} \"{path}\" -d \"{target_dir}\"\n","else:\n","    print(f\"Unzipping from Google Drive path: {path}\")\n","    !unzip {unzip_flag} \"{path}\" -d \"{target_dir}\"\n","\n","print(\"βœ… Unzipped successfully!\")\n","!ls {target_dir} | head -20"],"metadata":{"cellView":"form","id":"REfbIQXy-cYu"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@markdown **Cell 4: Fetch DeepPanel Model from Hugging Face & Run Extraction**\n","\n","repo_id = \"codeShare/comic-panel-extract\" #@param {type:\"string\"}\n","model_filename = \"deeppanel_model.keras\" #@param {type:\"string\"}\n","\n","from huggingface_hub import hf_hub_download\n","import tensorflow as tf\n","import os\n","\n","# ==================== FETCH MODEL FROM HUGGING FACE ====================\n","print(f\"πŸ”„ Downloading DeepPanel model from https://huggingface.co/{repo_id} ...\")\n","model_path = hf_hub_download(\n","    repo_id=repo_id,\n","    filename=model_filename,\n","    # token is not needed because you set the repo to public\n",")\n","print(\"βœ… Model fetched successfully!\")\n","\n","model = tf.keras.models.load_model(model_path)\n","print(\"βœ… Model loaded successfully from Hugging Face:\", model_path)\n","\n","# ==================== EXTRACTION SETTINGS ====================\n","output_dir = '/content/extracted_panels' #@param {type:\"string\"}\n","zip_output_path = '/content/extracted_panels.zip' #@param {type:\"string\"}\n","create_zip = True #@param {type:\"boolean\"}\n","auto_download = True #@param {type:\"boolean\"}\n","\n","import cv2\n","import numpy as np\n","from tqdm import tqdm\n","import glob\n","import zipfile\n","from google.colab import files\n","\n","os.makedirs(output_dir, exist_ok=True)\n","\n","# ---------------------------------------------------\n","def preprocess_image(image_path, target_size=(256, 256)):\n","    image = cv2.imread(image_path)\n","    if image is None:\n","        raise ValueError(f\"Failed to load image: {image_path}\")\n","    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n","    original_shape = image.shape[:2]\n","    image = cv2.resize(image, target_size)\n","    image = image / 255.0\n","    return image, original_shape\n","\n","# ---------------------------------------------------\n","def extract_panels(image_path, mask, original_shape, output_dir, cluster_name):\n","    mask = cv2.resize(mask, (original_shape[1], original_shape[0]), interpolation=cv2.INTER_NEAREST)\n","    mask = (mask > 0.5).astype(np.uint8) * 255\n","\n","    contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n","\n","    original_image = cv2.imread(image_path)\n","    if original_image is None:\n","        raise ValueError(f\"Failed to load original image: {image_path}\")\n","\n","    panel_dir = os.path.join(output_dir, cluster_name)\n","    os.makedirs(panel_dir, exist_ok=True)\n","\n","    panel_count = 0\n","    img_name = os.path.splitext(os.path.basename(image_path))[0]\n","\n","    for contour in contours:\n","        x, y, w, h = cv2.boundingRect(contour)\n","        if w < 30 or h < 30:\n","            continue\n","        panel = original_image[y:y+h, x:x+w]\n","        panel_filename = f\"{img_name}_panel_{panel_count:02d}.jpg\"\n","        cv2.imwrite(os.path.join(panel_dir, panel_filename), panel)\n","        panel_count += 1\n","\n","    return panel_count\n","\n","# ====================== MAIN EXTRACTION ======================\n","print(\"\\nπŸ” Finding all comic pages...\")\n","image_paths = []\n","image_source_dir = '/content/images_to_extract'\n","for ext in ('*.jpg', '*.jpeg', '*.png', '*.JPG', '*.JPEG', '*.PNG'):\n","    image_paths.extend(glob.glob(f'{image_source_dir}/**/{ext}', recursive=True))\n","\n","print(f\"Found {len(image_paths)} images.\")\n","\n","image_count = 0\n","total_panels = 0\n","\n","for image_path in tqdm(image_paths, desc=\"Extracting panels\"):\n","    try:\n","        processed, original_shape = preprocess_image(image_path)\n","        input_tensor = np.expand_dims(processed, axis=0).astype(np.float32)\n","        mask_pred = model.predict(input_tensor, verbose=0)[0]\n","        if len(mask_pred.shape) == 3 and mask_pred.shape[-1] == 1:\n","            mask_pred = mask_pred.squeeze(axis=-1)\n","\n","        cluster_name = os.path.basename(os.path.dirname(image_path))\n","        if cluster_name == 'images_to_extract' or cluster_name in ('', '.'):\n","            cluster_name = 'comics'\n","\n","        num_panels = extract_panels(image_path, mask_pred, original_shape, output_dir, cluster_name)\n","        total_panels += num_panels\n","        image_count += 1\n","    except Exception as e:\n","        print(f\"⚠️ Skipped {os.path.basename(image_path)} β†’ {e}\")\n","\n","print(f\"\\nπŸŽ‰ Extraction finished! Pages: {image_count} | Panels: {total_panels}\")\n","\n","# ====================== ZIP & DOWNLOAD ======================\n","if create_zip:\n","    print(\"\\nπŸ“¦ Zipping extracted panels...\")\n","    with zipfile.ZipFile(zip_output_path, 'w', zipfile.ZIP_DEFLATED) as zipf:\n","        for root, _, file_list in os.walk(output_dir):\n","            for file_name in file_list:\n","                zipf.write(os.path.join(root, file_name), os.path.relpath(os.path.join(root, file_name), output_dir))\n","    print(f\"βœ… Zipped β†’ {zip_output_path}\")\n","    if auto_download:\n","        files.download(zip_output_path)"],"metadata":{"cellView":"form","id":"zi_PSEsV-ipa"},"execution_count":null,"outputs":[]}]}