| import re |
| import torch |
| import logging |
|
|
| |
|
|
| |
| |
| |
|
|
| vae_conversion_map = [ |
| |
| ("nin_shortcut", "conv_shortcut"), |
| ("norm_out", "conv_norm_out"), |
| ("mid.attn_1.", "mid_block.attentions.0."), |
| ] |
|
|
| for i in range(4): |
| |
| for j in range(2): |
| hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}." |
| sd_down_prefix = f"encoder.down.{i}.block.{j}." |
| vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) |
|
|
| if i < 3: |
| hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0." |
| sd_downsample_prefix = f"down.{i}.downsample." |
| vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) |
|
|
| hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." |
| sd_upsample_prefix = f"up.{3 - i}.upsample." |
| vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) |
|
|
| |
| |
| for j in range(3): |
| hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}." |
| sd_up_prefix = f"decoder.up.{3 - i}.block.{j}." |
| vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) |
|
|
| |
| for i in range(2): |
| hf_mid_res_prefix = f"mid_block.resnets.{i}." |
| sd_mid_res_prefix = f"mid.block_{i + 1}." |
| vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) |
|
|
| vae_conversion_map_attn = [ |
| |
| ("norm.", "group_norm."), |
| ("q.", "query."), |
| ("k.", "key."), |
| ("v.", "value."), |
| ("q.", "to_q."), |
| ("k.", "to_k."), |
| ("v.", "to_v."), |
| ("proj_out.", "to_out.0."), |
| ("proj_out.", "proj_attn."), |
| ] |
|
|
|
|
| def reshape_weight_for_sd(w, conv3d=False): |
| |
| if conv3d: |
| return w.reshape(*w.shape, 1, 1, 1) |
| else: |
| return w.reshape(*w.shape, 1, 1) |
|
|
|
|
| def convert_vae_state_dict(vae_state_dict): |
| mapping = {k: k for k in vae_state_dict.keys()} |
| conv3d = False |
| for k, v in mapping.items(): |
| for sd_part, hf_part in vae_conversion_map: |
| v = v.replace(hf_part, sd_part) |
| if v.endswith(".conv.weight"): |
| if not conv3d and vae_state_dict[k].ndim == 5: |
| conv3d = True |
| mapping[k] = v |
| for k, v in mapping.items(): |
| if "attentions" in k: |
| for sd_part, hf_part in vae_conversion_map_attn: |
| v = v.replace(hf_part, sd_part) |
| mapping[k] = v |
| new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()} |
| weights_to_convert = ["q", "k", "v", "proj_out"] |
| for k, v in new_state_dict.items(): |
| for weight_name in weights_to_convert: |
| if f"mid.attn_1.{weight_name}.weight" in k: |
| logging.debug(f"Reshaping {k} for SD format") |
| new_state_dict[k] = reshape_weight_for_sd(v, conv3d=conv3d) |
| return new_state_dict |
|
|
|
|
| |
| |
| |
|
|
|
|
| textenc_conversion_lst = [ |
| |
| ("resblocks.", "text_model.encoder.layers."), |
| ("ln_1", "layer_norm1"), |
| ("ln_2", "layer_norm2"), |
| (".c_fc.", ".fc1."), |
| (".c_proj.", ".fc2."), |
| (".attn", ".self_attn"), |
| ("ln_final.", "transformer.text_model.final_layer_norm."), |
| ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), |
| ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), |
| ] |
| protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} |
| textenc_pattern = re.compile("|".join(protected.keys())) |
|
|
| |
| code2idx = {"q": 0, "k": 1, "v": 2} |
|
|
|
|
| |
| def cat_tensors(tensors): |
| x = 0 |
| for t in tensors: |
| x += t.shape[0] |
|
|
| shape = [x] + list(tensors[0].shape)[1:] |
| out = torch.empty(shape, device=tensors[0].device, dtype=tensors[0].dtype) |
|
|
| x = 0 |
| for t in tensors: |
| out[x:x + t.shape[0]] = t |
| x += t.shape[0] |
|
|
| return out |
|
|
|
|
| def convert_text_enc_state_dict_v20(text_enc_dict, prefix=""): |
| new_state_dict = {} |
| capture_qkv_weight = {} |
| capture_qkv_bias = {} |
| for k, v in text_enc_dict.items(): |
| if not k.startswith(prefix): |
| continue |
| if ( |
| k.endswith(".self_attn.q_proj.weight") |
| or k.endswith(".self_attn.k_proj.weight") |
| or k.endswith(".self_attn.v_proj.weight") |
| ): |
| k_pre = k[: -len(".q_proj.weight")] |
| k_code = k[-len("q_proj.weight")] |
| if k_pre not in capture_qkv_weight: |
| capture_qkv_weight[k_pre] = [None, None, None] |
| capture_qkv_weight[k_pre][code2idx[k_code]] = v |
| continue |
|
|
| if ( |
| k.endswith(".self_attn.q_proj.bias") |
| or k.endswith(".self_attn.k_proj.bias") |
| or k.endswith(".self_attn.v_proj.bias") |
| ): |
| k_pre = k[: -len(".q_proj.bias")] |
| k_code = k[-len("q_proj.bias")] |
| if k_pre not in capture_qkv_bias: |
| capture_qkv_bias[k_pre] = [None, None, None] |
| capture_qkv_bias[k_pre][code2idx[k_code]] = v |
| continue |
|
|
| text_proj = "transformer.text_projection.weight" |
| if k.endswith(text_proj): |
| new_state_dict[k.replace(text_proj, "text_projection")] = v.transpose(0, 1).contiguous() |
| else: |
| relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k) |
| new_state_dict[relabelled_key] = v |
|
|
| for k_pre, tensors in capture_qkv_weight.items(): |
| if None in tensors: |
| raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing") |
| relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre) |
| new_state_dict[relabelled_key + ".in_proj_weight"] = cat_tensors(tensors) |
|
|
| for k_pre, tensors in capture_qkv_bias.items(): |
| if None in tensors: |
| raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing") |
| relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre) |
| new_state_dict[relabelled_key + ".in_proj_bias"] = cat_tensors(tensors) |
|
|
| return new_state_dict |
|
|
|
|
| def convert_text_enc_state_dict(text_enc_dict): |
| return text_enc_dict |
|
|