| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | import tensorflow as tf |
| | gpus = tf.config.list_physical_devices('GPU') |
| | print(gpus) |
| | if gpus: |
| | try: |
| | |
| | |
| | print(f"GPU aktif: {gpus}") |
| | except Exception as e: |
| | print("Set memory growth gagal:", e) |
| | else: |
| | print("Tidak ada GPU terdeteksi.") |
| |
|
| |
|
| | |
| | import os |
| |
|
| | BASE_DIR = "/workspace/dataset" |
| | START, END = 0, 59 |
| | DRY_RUN = False |
| |
|
| | def main(): |
| | base = os.path.abspath(BASE_DIR) |
| | ref_dir = os.path.join(base, f"style{START}") |
| | if not os.path.isdir(ref_dir): |
| | print(f"❌ Folder {ref_dir} tidak ditemukan.") |
| | return |
| |
|
| | files_ref = sorted([f for f in os.listdir(ref_dir) if f.lower().endswith(".png")]) |
| | print(f"🔍 Total referensi dari style{START}: {len(files_ref)} file") |
| |
|
| | |
| | complete = [] |
| | missing = {} |
| |
|
| | for fname in files_ref: |
| | ok = True |
| | for i in range(START, END + 1): |
| | style_path = os.path.join(base, f"style{i}", fname) |
| | if not os.path.isfile(style_path): |
| | ok = False |
| | missing.setdefault(fname, []).append(f"style{i}") |
| | if ok: |
| | complete.append(fname) |
| |
|
| | print(f"✅ Lengkap di semua style: {len(complete)} file") |
| | print(f"❌ Tidak lengkap: {len(missing)} file") |
| |
|
| | |
| | if missing: |
| | for fname, styles in missing.items(): |
| | for i in range(START, END + 1): |
| | path = os.path.join(base, f"style{i}", fname) |
| | if os.path.isfile(path): |
| | if not DRY_RUN: |
| | os.remove(path) |
| | print(f"🗑️ Hapus {path}") |
| | print(f"\n🔥 Selesai! Total {len(missing)} file dibersihkan dari semua style folder.") |
| | else: |
| | print("Semua file sudah lengkap di semua style — tidak ada yang dihapus.") |
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|
| | import os |
| | from glob import glob |
| | import pandas as pd |
| |
|
| | data = [] |
| |
|
| | root_dir = "/workspace/dataset" |
| |
|
| | for style_id in range(60): |
| | folder_path = os.path.join(root_dir, f"style{style_id}") |
| | image_paths = glob(os.path.join(folder_path, "*.png")) |
| |
|
| | for path in image_paths: |
| | label = os.path.splitext(os.path.basename(path))[0] |
| | data.append((path, label, f"style{style_id}")) |
| |
|
| | df = pd.DataFrame(data, columns=["filepath", "label", "style"]) |
| |
|
| | df |
| |
|
| | import re |
| | import pandas as pd |
| | from collections import Counter |
| |
|
| | |
| | ALLOWED_REGEX_STRICT = r'^[A-Z0-9]{5}$' |
| | ALLOWED_REGEX_LEN5_ALNUM = r'^[A-Za-z0-9]{5}$' |
| |
|
| | |
| | df['label'] = df['label'].astype(str).str.strip() |
| |
|
| | |
| | invalid_mask = ~df['label'].str.match(ALLOWED_REGEX_STRICT, na=True) |
| | invalid_df = df[invalid_mask].copy() |
| |
|
| | |
| | df['len'] = df['label'].str.len() |
| | too_short = df[df['len'] < 5] |
| | too_long = df[df['len'] > 5] |
| | has_non_alnum = df[df['label'].str.contains(r'[^A-Za-z0-9]', na=True)] |
| | has_lower = df[df['label'].str.contains(r'[a-z]', na=True)] |
| |
|
| | |
| | def extract_bad_chars(s: str): |
| | return re.findall(r'[^A-Za-z0-9]', s) |
| |
|
| | bad_chars_counter = Counter() |
| | for lab in has_non_alnum['label'].dropna().tolist(): |
| | bad_chars_counter.update(extract_bad_chars(lab)) |
| | bad_chars_list = sorted(bad_chars_counter.items(), key=lambda x: -x[1]) |
| |
|
| | |
| | print("=== VALIDASI LABEL ===") |
| | print(f"Total data : {len(df)}") |
| | print(f"Tidak valid (ketat): {len(invalid_df)}") |
| | print(f"- Panjang < 5 : {len(too_short)}") |
| | print(f"- Panjang > 5 : {len(too_long)}") |
| | print(f"- Ada non-alnum : {len(has_non_alnum)}") |
| | print(f"- Ada lowercase : {len(has_lower)}") |
| |
|
| | |
| | if len(invalid_df) > 0: |
| | sampel = invalid_df['label'].head(20).tolist() |
| | print("\nContoh label tidak valid (maks 20):", sampel) |
| |
|
| | |
| | if bad_chars_list: |
| | print("\nKarakter non-alnum yang muncul (char, count):", bad_chars_list[:20]) |
| |
|
| | |
| | if len(invalid_df) > 0: |
| | invalid_df.to_csv("invalid_labels.csv", index=False) |
| | print("\n>> Disimpan: invalid_labels.csv") |
| |
|
| | |
| | if len(invalid_df) > 0: |
| | raise ValueError( |
| | f"Ditemukan {len(invalid_df)} label tidak valid. Perbaiki dulu (lihat invalid_labels.csv)." |
| | ) |
| |
|
| | |
| | |
| |
|
| | df |
| |
|
| | from sklearn.model_selection import train_test_split |
| |
|
| | train_df, test_df = train_test_split(df, test_size=0.1, random_state=42, stratify=df['style']) |
| | train_df, val_df = train_test_split(train_df, test_size=0.1, random_state=42, stratify=train_df['style']) |
| |
|
| | from torchvision import transforms |
| | from PIL import Image |
| |
|
| | transform = transforms.Compose([ |
| | transforms.Resize((50, 250)), |
| | transforms.ToTensor(), |
| | transforms.Normalize((0.5,), (0.5,)) |
| | ]) |
| |
|
| | def load_image(path): |
| | img = Image.open(path).convert("L") |
| | return transform(img) |
| |
|
| | from torch.utils.data import Dataset |
| |
|
| | class CaptchaDataset(Dataset): |
| | def __init__(self, dataframe, transform): |
| | self.dataframe = dataframe.reset_index(drop=True) |
| | self.transform = transform |
| |
|
| | def __len__(self): |
| | return len(self.dataframe) |
| |
|
| | def __getitem__(self, idx): |
| | row = self.dataframe.iloc[idx] |
| | image = Image.open(row.filepath).convert("L") |
| | image = self.transform(image) |
| | label = row.label |
| | return image, label |
| |
|
| |
|
| | from tensorflow.keras import mixed_precision |
| | mixed_precision.set_global_policy('mixed_float16') |
| |
|
| | import tensorflow as tf |
| | from tensorflow.keras.models import Model |
| | from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, Reshape, Bidirectional, LSTM, Dense, Dropout, Activation, BatchNormalization |
| | from tensorflow.keras import backend as K |
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | char_set = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| | num_classes = len(char_set) + 1 |
| |
|
| | |
| | |
| | input_shape = (50, 250, 1) |
| | lstm_units = 128 |
| |
|
| | |
| | input_tensor = Input(shape=input_shape, name='input') |
| |
|
| | |
| | x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_tensor) |
| | x = BatchNormalization()(x) |
| | x = MaxPooling2D((2, 2))(x) |
| |
|
| | x = Conv2D(64, (3, 3), activation='relu', padding='same')(x) |
| | x = BatchNormalization()(x) |
| | x = MaxPooling2D((2, 2))(x) |
| |
|
| | x = Conv2D(128, (3, 3), activation='relu', padding='same')(x) |
| | x = BatchNormalization()(x) |
| | x = MaxPooling2D((2, 2))(x) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | shape_before_rnn = K.int_shape(x) |
| | x = Reshape(target_shape=(shape_before_rnn[2], shape_before_rnn[1] * shape_before_rnn[3]))(x) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | x = Bidirectional(tf.keras.layers.LSTM( |
| | 128, return_sequences=True, |
| | dropout=0.0, recurrent_dropout=0.0 |
| | ))(x) |
| | x = Bidirectional(tf.keras.layers.LSTM( |
| | 128, return_sequences=True, |
| | dropout=0.0, recurrent_dropout=0.0 |
| | ))(x) |
| |
|
| |
|
| | |
| | x = Dense(num_classes, activation='softmax', name='predictions')(x) |
| |
|
| | |
| | model = Model(inputs=input_tensor, outputs=x) |
| |
|
| |
|
| | |
| | |
| | labels = tf.keras.Input(name='labels', shape=(None,), dtype='int32') |
| | input_length= tf.keras.Input(name='input_length', shape=(1,), dtype='int32') |
| | label_length= tf.keras.Input(name='label_length', shape=(1,), dtype='int32') |
| |
|
| | def ctc_lambda_func(args): |
| | y_pred, labels_t, in_len, lab_len = args |
| | |
| | return tf.keras.backend.ctc_batch_cost(labels_t, y_pred, in_len, lab_len) |
| |
|
| | ctc_loss_output = tf.keras.layers.Lambda( |
| | ctc_lambda_func, output_shape=(1,), name='ctc_loss', dtype='float32' |
| | )([x, labels, input_length, label_length]) |
| |
|
| | |
| | model_with_ctc = Model(inputs=[input_tensor, labels, input_length, label_length], outputs=ctc_loss_output) |
| |
|
| | |
| | model_with_ctc.compile(loss={'ctc_loss': lambda y_true, y_pred: y_pred}, optimizer='adam') |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | model.summary() |
| |
|
| | from torchvision import transforms as T |
| | from torchvision.transforms import InterpolationMode |
| | import tensorflow as tf |
| |
|
| | |
| | transform = transforms.Compose([ |
| | transforms.Resize((50, 250), interpolation=InterpolationMode.BILINEAR, antialias=True), |
| | transforms.ToTensor(), |
| | transforms.Normalize((0.5,), (0.5,)), |
| | ]) |
| |
|
| | CHARSET = list("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ") |
| |
|
| | |
| | char_to_num = tf.keras.layers.StringLookup( |
| | vocabulary=CHARSET, |
| | oov_token=None, |
| | mask_token=None, |
| | num_oov_indices=0 |
| | ) |
| |
|
| |
|
| | |
| | num_to_char = tf.keras.layers.StringLookup( |
| | vocabulary=CHARSET, |
| | invert=True, |
| | num_oov_indices=0, |
| | mask_token=None, |
| | ) |
| |
|
| | print("vocab size:", len(char_to_num.get_vocabulary())) |
| | print(char_to_num.get_vocabulary()) |
| | print(num_to_char.get_vocabulary()) |
| |
|
| | class DataGenerator(tf.keras.utils.Sequence): |
| | def __init__(self, dataframe, char_to_num, |
| | batch_size=32, img_width=250, img_height=50, max_label_length=5): |
| | self.dataframe = dataframe.reset_index(drop=True) |
| | self.char_to_num = char_to_num |
| | self.batch_size = batch_size |
| | self.img_width = img_width |
| | self.img_height = img_height |
| | self.max_label_length = max_label_length |
| | |
| | self.time_steps = self.img_width // 8 |
| | self.on_epoch_end() |
| |
|
| | def __len__(self): |
| | return len(self.dataframe) // self.batch_size |
| |
|
| | def __getitem__(self, index): |
| | start_index = index * self.batch_size |
| | end_index = (index + 1) * self.batch_size |
| | batch_df = self.dataframe.iloc[start_index:end_index] |
| |
|
| | images = [] |
| | labels = [] |
| | input_lengths = np.full((len(batch_df), 1), self.time_steps, dtype=np.int64) |
| | label_lengths = [] |
| |
|
| | for _, row in batch_df.iterrows(): |
| | |
| | img = Image.open(row.filepath).convert("L") |
| | t = transform(img) |
| | arr = t.permute(1, 2, 0).numpy() |
| | images.append(arr) |
| |
|
| | |
| | lab = row.label.upper() |
| | lab_ids = self.char_to_num(tf.constant(list(lab))).numpy().astype(np.int32) |
| | pad_len = self.max_label_length - len(lab_ids) |
| | if pad_len < 0: |
| | lab_ids = lab_ids[:self.max_label_length] |
| | pad_len = 0 |
| | lab_ids = np.pad(lab_ids, (0, pad_len), mode="constant", constant_values=-1) |
| | labels.append(lab_ids) |
| |
|
| | |
| | label_lengths.append([len(lab)]) |
| |
|
| | images = np.asarray(images, dtype=np.float32) |
| | labels = np.asarray(labels, dtype=np.int32) |
| | label_lengths = np.asarray(label_lengths, dtype=np.int64) |
| |
|
| | inputs = { |
| | 'input': images, |
| | 'labels': labels, |
| | 'input_length': input_lengths, |
| | 'label_length': label_lengths |
| | } |
| | |
| | outputs = np.zeros((images.shape[0],), dtype=np.float32) |
| |
|
| | return inputs, outputs |
| |
|
| | def on_epoch_end(self): |
| | self.dataframe = self.dataframe.sample(frac=1.0).reset_index(drop=True) |
| |
|
| | |
| | train_generator = DataGenerator(train_df, char_to_num, batch_size=32, max_label_length=5) |
| | val_generator = DataGenerator(val_df, char_to_num, batch_size=32, max_label_length=5) |
| |
|
| | import numpy as np |
| | |
| | |
| | (inputs, outputs) = train_generator[0] |
| |
|
| | x = inputs['input'] |
| | y = inputs['labels'] |
| | inlen = inputs['input_length'] |
| | lablen = inputs['label_length'] |
| |
|
| | print("x:", x.shape, x.dtype, x.min(), x.max()) |
| | print("labels:", y.shape, y.dtype, "unique pads:", sorted(set(y.flatten()) - set(range(0,36)))[:5]) |
| | print("input_length uniq:", set(inlen.flatten().tolist())) |
| | print("label_length uniq:", set(lablen.flatten().tolist())) |
| | print("outputs (dummy):", outputs.shape, outputs.dtype) |
| |
|
| | |
| | assert x.shape[1:] == (50, 250, 1) |
| | assert y.shape[1] == 5 |
| | assert inlen.min() == inlen.max() == 31 |
| | assert lablen.min() >= 1 and lablen.max() <= 5 |
| | assert y.dtype == np.int32 |
| |
|
| | |
| | |
| | assert y.min() >= 0 and y.max() <= 35, f"Label di luar rentang 0..35: min={y.min()}, max={y.max()}" |
| |
|
| | |
| | yp = model.predict(x[:4], verbose=0) |
| | loss = tf.keras.backend.ctc_batch_cost(y[:4], yp, inlen[:4], lablen[:4]).numpy() |
| | print("CTC sample loss:", loss) |
| | assert np.all(np.isfinite(loss)), f"CTC loss non-finite: {loss}" |
| |
|
| | |
| | CHARSET = np.array(list("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ")) |
| | def decode_ids_row_np(ids_1d): |
| | ids_1d = [int(t) for t in ids_1d if int(t) >= 0] |
| | return "".join(CHARSET[ids_1d]) if ids_1d else "" |
| |
|
| | for i in range(3): |
| | print(i, "GT:", decode_ids_row_np(y[i])) |
| |
|
| |
|
| |
|
| | """SIMPAN TIAP EPOCH""" |
| |
|
| | import os, re, glob |
| | from pathlib import Path |
| | import tensorflow as tf |
| | from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint |
| |
|
| | |
| | CKPT_DIR = Path("/workspace") |
| | CKPT_DIR.mkdir(parents=True, exist_ok=True) |
| |
|
| | BEST_PATH = CKPT_DIR / "captcha_best.weights.h5" |
| | EPOCH_PATH = CKPT_DIR / "captcha_ep{epoch:03d}.weights.h5" |
| |
|
| | |
| | |
| | ckpt_best = ModelCheckpoint( |
| | filepath=str(BEST_PATH), |
| | monitor="val_loss", |
| | save_best_only=True, |
| | save_weights_only=True, |
| | save_freq="epoch", |
| | verbose=1, |
| | ) |
| |
|
| | |
| | ckpt_every_epoch = ModelCheckpoint( |
| | filepath=str(EPOCH_PATH), |
| | save_best_only=False, |
| | save_weights_only=True, |
| | save_freq="epoch", |
| | verbose=0, |
| | ) |
| |
|
| | early_stopping = EarlyStopping( |
| | monitor="val_loss", |
| | patience=15, |
| | restore_best_weights=True, |
| | verbose=1, |
| | ) |
| |
|
| | |
| | def find_latest_epoch_ckpt(dir_path: Path): |
| | files = glob.glob(str(dir_path / "captcha_ep*.weights.h5")) |
| | if not files: |
| | return None, None |
| | pairs = [] |
| | for f in files: |
| | m = re.search(r"captcha_ep(\d{3})\.weights\.h5$", os.path.basename(f)) |
| | if m: |
| | pairs.append((int(m.group(1)), f)) |
| | if not pairs: |
| | return None, None |
| | pairs.sort(key=lambda x: x[0]) |
| | return pairs[-1] |
| |
|
| | initial_epoch = 0 |
| | ep, last_path = find_latest_epoch_ckpt(CKPT_DIR) |
| | if last_path: |
| | print(f"[RESUME] Loading weights from {last_path}") |
| | model_with_ctc.load_weights(last_path) |
| | initial_epoch = ep |
| | print(f"[RESUME] initial_epoch set to {initial_epoch}") |
| | elif BEST_PATH.exists(): |
| | print(f"[RESUME] Loading BEST weights from {BEST_PATH}") |
| | model_with_ctc.load_weights(str(BEST_PATH)) |
| | initial_epoch = 0 |
| | else: |
| | print("[RESUME] No checkpoint found. Starting from scratch.") |
| |
|
| | |
| | history = model_with_ctc.fit( |
| | train_generator, |
| | validation_data=val_generator, |
| | epochs=100, |
| | |
| | initial_epoch=initial_epoch, |
| | callbacks=[ckpt_best, ckpt_every_epoch, early_stopping], |
| | verbose=1, |
| | ) |
| |
|
| | |
| | model_with_ctc.save_weights(str(CKPT_DIR / "captcha_final.weights.h5")) |
| | model.save(str(CKPT_DIR / "captcha_final_model_base.h5")) |
| | model.save(str(CKPT_DIR / "captcha_final_model_base.keras")) |