| import tensorflow as tf |
| from tensorflow.keras import layers, Model |
| import numpy as np |
| import tensorflow.keras.backend as K |
| from tensorflow.keras import mixed_precision |
| import sentencepiece as spm |
| import os, json |
| import requests |
|
|
| print('1') |
|
|
| tf.get_logger().setLevel("ERROR") |
| SEED = 42 |
| tf.random.set_seed(SEED) |
| np.random.seed(SEED) |
| max_len = 150 |
| batch_size = 128 |
|
|
| |
| try: |
| resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="local") |
| tf.tpu.experimental.initialize_tpu_system(resolver) |
| strategy = tf.distribute.TPUStrategy(resolver) |
| print("โ
TPU ์ด๊ธฐํ ์๋ฃ:", resolver.cluster_spec().as_dict()) |
| on_tpu = True |
|
|
| except Exception as e: |
| print("โ ๏ธ TPU ๋ฏธ์ฌ์ฉ, GPU/CPU๋ก ์งํ:", e) |
| strategy = tf.distribute.get_strategy() |
| on_tpu = False |
|
|
| |
| policy = mixed_precision.Policy("mixed_bfloat16" if on_tpu else "float32") |
| mixed_precision.set_global_policy(policy) |
| print("โ
Mixed precision:", policy) |
|
|
| |
| |
| |
|
|
| def download_file(url, save_path): |
| r = requests.get(url, stream=True) |
| r.raise_for_status() |
| with open(save_path, "wb") as f: |
| for chunk in r.iter_content(8192*2): |
| f.write(chunk) |
| print(f"โ
{save_path} ์ ์ฅ๋จ") |
|
|
| DATA_PATH = "converted.jsonl" |
| TOKENIZER_PATH = "ko_unigram.model" |
|
|
| if not os.path.exists(DATA_PATH): |
| download_file( |
| "https://huggingface.co/datasets/Yuchan5386/TinyInst/resolve/main/output.jsonl?download=true", |
| DATA_PATH |
| ) |
|
|
| if not os.path.exists(TOKENIZER_PATH): |
| download_file( |
| "https://huggingface.co/datasets/Yuchan5386/TinyInst/resolve/main/ko_unigram.model?download=true", |
| TOKENIZER_PATH |
| ) |
|
|
| sp = spm.SentencePieceProcessor(TOKENIZER_PATH) |
|
|
| pad_id = sp.piece_to_id("<pad>") if sp.piece_to_id("<pad>") != -1 else 0 |
| start_id = sp.piece_to_id("<start>") |
| sep_id = sp.piece_to_id("<sep>") |
| end_id = sp.piece_to_id("<end>") |
| unk_id = sp.piece_to_id("<unk>") |
| vocab_size = sp.get_piece_size() |
| print(f"โ
Vocabulary size: {vocab_size}") |
|
|
| def text_to_ids(text): |
| return sp.encode(text, out_type=int) |
|
|
| def ids_to_text(ids): |
| return sp.decode(ids) |
|
|
| |
| |
| |
|
|
| class SwiGLU(layers.Layer): |
| def __init__(self, d_model, d_ff): |
| super().__init__() |
| self.proj = layers.Dense(d_ff) |
| self.out = layers.Dense(d_model) |
| def call(self, x): |
| x_proj = self.proj(x) |
| x_val, x_gate = tf.split(x_proj, 2, axis=-1) |
| return self.out(x_val * tf.nn.silu(x_gate)) |
| |
| class gMLPBlock(layers.Layer): |
| def __init__(self, d_model, seq_len, dropout=0.1): |
| super().__init__() |
| self.d_model = d_model |
| self.seq_len = seq_len |
| self.norm = layers.LayerNormalization(epsilon=1e-6) |
| |
| |
| |
| self.channel_proj = layers.Dense(d_model * 4, use_bias=True) |
| self.dropout = layers.Dropout(dropout) |
| |
| |
| self.sgu_norm = layers.LayerNormalization(epsilon=1e-6) |
| self.sgu_proj = layers.Dense(seq_len, use_bias=False) |
| |
| |
| self.sgu_final = layers.Dense(d_model * 2, use_bias=True) |
| |
| self.out_proj = layers.Dense(d_model, use_bias=True) |
|
|
| def call(self, x, training=False): |
| |
| residual = x |
| x_norm = self.norm(x) |
| x_proj = self.channel_proj(x_norm) |
| |
| |
| u, v = tf.split(x_proj, 2, axis=-1) |
| |
| |
| v_norm = self.sgu_norm(v) |
| v_norm_T = tf.transpose(v_norm, perm=[0, 2, 1]) |
| |
| |
| v_proj = self.sgu_proj(v_norm_T) |
| v_proj_T = tf.transpose(v_proj, perm=[0, 2, 1]) |
| |
| |
| |
| |
| u_act = tf.nn.gelu(u) |
| v_gate = self.sgu_final(v_proj_T) |
| |
| |
| z = u_act * v_gate |
| z = self.dropout(z, training=training) |
| out = self.out_proj(z) |
| |
| |
| return residual + out |
|
|
| class CrossBlock(layers.Layer): |
| def __init__(self, clip_value=5.0, eps=1e-6): |
| super().__init__() |
| self.clip_value = clip_value |
| self.eps = eps |
| self.attn = layers.MultiHeadAttention(8, 20) |
| |
| def call(self, x, z): |
| y = self.attn(x, z, z) |
| return y |
|
|
| class LoU(layers.Layer): |
| def __init__(self, d_model, clip_value=5.0, eps=1e-6): |
| super().__init__() |
| self.d_model = d_model |
| self.clip_value = float(clip_value) |
| self.mha = layers.MultiHeadAttention(8, 20) |
| self.norm1 = layers.LayerNormalization(epsilon=1e-5, dtype='float32') |
| self.norm = layers.LayerNormalization(epsilon=1e-5, dtype='float32') |
| |
| self.glu = SwiGLU(d_model, 350) |
| self.cross = CrossBlock() |
|
|
| def call(self, x, z): |
| x_f32 = tf.cast(x, tf.float32) |
| residual = x_f32 |
| x = self.norm1(x) |
|
|
| x_comb = self.mha(x, x, x, use_causal_mask=True) |
| |
| out = self.norm(x_comb + residual) |
| out = self.cross(out, z) |
| out = self.glu(out) |
| return tf.cast(out, x.dtype) |
| |
| |
| |
|
|
| class AlphaS2S(tf.keras.Model): |
| def __init__(self, num_layers, d_model, num_heads, input_vocab_size, target_vocab_size, max_len=200, dropout=0.1): |
| super().__init__() |
| self.max_len = max_len |
| self.d_model = d_model |
| |
| |
| self.enc_embedding = layers.Embedding(input_vocab_size, d_model) |
| self.enc_pos_embedding = layers.Embedding(max_len, d_model) |
| self.dec_embedding = layers.Embedding(target_vocab_size, d_model) |
| self.dec_pos_embedding = layers.Embedding(max_len, d_model) |
| |
| |
| self.enc_layers = [gMLPBlock(d_model, seq_len=max_len) for _ in range(num_layers)] |
| self.dec_layers = [LoU(d_model) for _ in range(num_layers)] |
| |
| self.final_layer = layers.Dense(target_vocab_size, use_bias=False) |
| |
| def call(self, inputs, training=False): |
| |
| enc_inputs = inputs["enc_inputs"] |
| dec_inputs = inputs["dec_inputs"] |
| |
| enc_pos = tf.range(tf.shape(enc_inputs)[1])[tf.newaxis, :] |
| dec_pos = tf.range(tf.shape(dec_inputs)[1])[tf.newaxis, :] |
| |
| |
| x = self.enc_embedding(enc_inputs) + self.enc_pos_embedding(enc_pos) |
| |
| for layer in self.enc_layers: x = layer(x, training=training) |
| enc_out = x |
| |
| |
| y = self.dec_embedding(dec_inputs) + self.dec_pos_embedding(dec_pos) |
| |
| for layer in self.dec_layers: y = layer(y, enc_out, training=training) |
| |
| return self.final_layer(y) |
|
|
| |
| chat_model = AlphaS2S(num_layers=4, d_model=160, num_heads=8, |
| input_vocab_size=vocab_size, target_vocab_size=vocab_size, max_len=max_len) |
| |
| dummy_input = { |
| "enc_inputs": tf.zeros((1, max_len), dtype=tf.int32), |
| "dec_inputs": tf.zeros((1, max_len), dtype=tf.int32) |
| } |
| _ = chat_model(dummy_input) |
|
|
| chat_model.load_weights('/kaggle/working/chat_model.weights.h5') |
| print("๋ชจ๋ธ ๊ฐ์ค์น ๋ก๋ ์๋ฃ!") |
| |
| |
| |
|
|
| def generate_text_topp(model, prompt, max_len=150, max_gen=100, p=0.9, temperature=0.8, min_len=20): |
| |
| model_input = text_to_ids(f"<start> {prompt} <sep>") |
| model_input = model_input[:max_len] |
| generated = list(model_input) |
| |
| for step in range(max_gen): |
| current_len = len(generated) |
| |
| |
| if current_len > max_len: |
| input_seq = generated[-max_len:] |
| else: |
| input_seq = generated |
| |
| |
| input_padded = np.pad(input_seq, (0, max_len - len(input_seq)), constant_values=pad_id) |
| input_tensor = tf.convert_to_tensor([input_padded]) |
| |
| |
| dummy_input = { |
| "enc_inputs": input_tensor, |
| "dec_inputs": input_tensor |
| } |
| logits = model(dummy_input, training=False) |
| |
| |
| |
| next_token_logits = logits[0, len(input_seq) - 1].numpy() |
| |
| |
| next_token_logits[end_id] -= 5.0 |
| next_token_logits[pad_id] -= 10.0 |
| |
| probs = tf.nn.softmax(next_token_logits / temperature).numpy() |
| sorted_indices = np.argsort(probs)[::-1] |
| sorted_probs = probs[sorted_indices] |
| |
| |
| cumulative_probs = np.cumsum(sorted_probs) |
| cutoff = np.searchsorted(cumulative_probs, p) |
| top_indices = sorted_indices[:cutoff + 1] |
| top_probs = sorted_probs[:cutoff + 1] |
| top_probs /= np.sum(top_probs) |
| next_token_id = np.random.choice(top_indices, p=top_probs) |
|
|
| if next_token_id == end_id and len(generated) >= min_len: |
| break |
| |
| generated.append(int(next_token_id)) |
|
|
| |
| try: |
| sep_index = generated.index(sep_id) |
| |
| result_ids = generated[sep_index + 1:] |
| try: |
| end_index = result_ids.index(end_id) |
| result_ids = result_ids[:end_index] |
| except ValueError: |
| pass |
| return ids_to_text(result_ids) |
| except ValueError: |
| return ids_to_text(generated) |
|
|
| print("\n\n===== ์์ฑ ๊ฒฐ๊ณผ =====") |
| |
| print(generate_text_topp(chat_model, "์ ๊ฐ ์ด๋ฐ๊ฐ ๋ฒ์ค๋ฅผ ํ์ผ ํด์ ์ค๋น ์ข ํด์ผ๊ฒ ์ด์. ์ฌ๋ฏธ์๋ ๋ํ์์ต๋๋ค!", p=0.9)) |
|
|