-
Notifications
You must be signed in to change notification settings - Fork 16
Description
When i try to load the weights getting issue ,
It looks few keys are not present in file unet.py
I am pasting error generation code.
import os
import torch
import torch.nn as nn
import numpy as np
from PIL import Image
from torch.utils.data import DataLoader, random_split
import torchvision
from tqdm import tqdm
from torch import optim
import copy
import argparse
import uuid
import json
from diffusers import AutoencoderKL, DDIMScheduler
import random
from unet import UNetModel
import wandb
from torchvision import transforms
from feature_extractor import ImageEncoder
from utils.iam_dataset import IAMDataset
from utils.GNHK_dataset import GNHK_Dataset
from utils.auxilary_functions import *
from torchvision.utils import save_image
from torch.nn import DataParallel
from transformers import CanineModel, CanineTokenizer
torch.cuda.empty_cache()
OUTPUT_MAX_LEN = 95 #+ 2 # +groundtruth+
IMG_WIDTH = 256
IMG_HEIGHT = 64
c_classes = '_!"#&'()*+,-./0123456789:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz '
cdict = {c:i for i,c in enumerate(c_classes)}
icdict = {i:c for i,c in enumerate(c_classes)}
print("##")
tokenizer = CanineTokenizer.from_pretrained("google/canine-c")
text_encoder = CanineModel.from_pretrained("google/canine-c")
#text_encoder = nn.DataParallel(text_encoder, device_ids=device_ids)
text_encoder = text_encoder.to("cuda:0")
unet = UNetModel(image_size = (64,256), in_channels=4, model_channels=320, out_channels=4, num_res_blocks=1, attention_resolutions=(1,1), channel_mult=(1, 1), num_heads=4, num_classes=339, context_dim=320, vocab_size=79, text_encoder=text_encoder)
unet = unet.to("cuda:0")
unet.load_state_dict(torch.load('./ckpt.pt', map_location="cuda:0"))
Error logs
RuntimeError Traceback (most recent call last)
Cell In[14], line 1
----> 1 unet.load_state_dict(torch.load('/media//c4eb0693-4a65-4f0c-8d65-a6dad4b97ff9/authorsModel/diffPen/ckpt.pt', map_location="cuda:0"))
File /media//c4eb0693-4a65-4f0c-8d65-a6dad4b97ff9/allCondaEnv/boxDiff/lib/python3.8/site-packages/torch/nn/modules/module.py:1604, in Module.load_state_dict(self, state_dict, strict)
1599 error_msgs.insert(
1600 0, 'Missing key(s) in state_dict: {}. '.format(
1601 ', '.join('"{}"'.format(k) for k in missing_keys)))
1603 if len(error_msgs) > 0:
-> 1604 raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
1605 self.class.name, "\n\t".join(error_msgs)))
1606 return _IncompatibleKeys(missing_keys, unexpected_keys)
RuntimeError: Error(s) in loading state_dict for UNetModel:
Missing key(s) in state_dict: "text_encoder.char_embeddings.position_ids", "text_encoder.char_embeddings.HashBucketCodepointEmbedder_0.weight", "text_encoder.char_embeddings.HashBucketCodepointEmbedder_1.weight", "text_encoder.char_embeddings.HashBucketCodepointEmbedder_2.weight", "text_encoder.char_embeddings.HashBucketCodepointEmbedder_3.weight", "text_encoder.char_embeddings.HashBucketCodepointEmbedder_4.weight", "text_encoder.char_embeddings.HashBucketCodepointEmbedder_5.weight", "text_encoder.char_embeddings.HashBucketCodepointEmbedder_6.weight", "text_encoder.char_embeddings.HashBucketCodepointEmbedder_7.weight", "text_encoder.char_embeddings.char_position_embeddings.weight", "text_encoder.char_embeddings.token_type_embeddings.weight", "text_encoder.char_embeddings.LayerNorm.weight", "text_encoder.char_embeddings.LayerNorm.bias", "text_encoder.initial_char_encoder.layer.0.attention.self.query.weight", "text_encoder.initial_char_encoder.layer.0.attention.self.query.bias", "text_encoder.initial_char_encoder.layer.0.attention.self.key.weight", "text_encoder.initial_char_encoder.layer.0.attention.self.key.bias", "text_encoder.initial_char_encoder.layer.0.attention.self.value.weight", "text_encoder.initial_char_encoder.layer.0.attention.self.value.bias", "text_encoder.initial_char_encoder.layer.0.attention.output.dense.weight", "text_encoder.initial_char_encoder.layer.0.attention.output.dense.bias", "text_encoder.initial_char_encoder.layer.0.attention.output.LayerNorm.weight", "text_encoder.initial_char_encoder.layer.0.attention.output.LayerNorm.bias", "text_encoder.initial_char_encoder.layer.0.intermediate.dense.weight", "text_encoder.initial_char_encoder.layer.0.intermediate.dense.bias", "text_encoder.initial_char_encoder.layer.0.output.dense.weight", "text_encoder.initial_char_encoder.layer.0.output.dense.bias", "text_encoder.initial_char_encoder.layer.0.output.LayerNorm.weight", "text_encoder.initial_char_encoder.layer.0.output.LayerNorm.bias", "text_encoder.chars_to_molecules.conv.weight", "text_encoder.chars_to_molecules.conv.bias", "text_encoder.chars_to_molecules.LayerNorm.weight", "text_encoder.chars_to_molecules.LayerNorm.bias", "text_encoder.encoder.layer.0.attention.self.query.weight", "text_encoder.encoder.layer.0.attention.self.query.bias", "text_encoder.encoder.layer.0.attention.self.key.weight", "text_encoder.encoder.layer.0.attention.self.key.bias", "text_encoder.encoder.layer.0.attention.self.value.weight", "text_encoder.encoder.layer.0.attention.self.value.bias", "text_encoder.encoder.layer.0.attention.output.dense.weight", "text_encoder.encoder.layer.0.attention.output.dense.bias", "text_encoder.encoder.layer.0.attention.output.LayerNorm.weight", "text_encoder.encoder.layer.0.attention.output.LayerNorm.bias", "text_encoder.encoder.layer.0.intermediate.dense.weight", "text_encoder.encoder.layer.0.intermediate.dense.bias", "text_encoder.encoder.layer.0.output.dense.weight", "text_encoder.encoder.layer.0.output.dense.bias", "text_encoder.encoder.layer.0.output.LayerNorm.weight", "text_encoder.encoder.layer.0.output.LayerNorm.bias", "text_encoder.encoder.layer.1.attention.self.query.weight", "text_encoder.encoder.layer.1.attention.self.query.bias", "text_encoder.encoder.layer.1.attention.self.key.weight", "text_encoder.encoder.layer.1.attention.self.key.bias", "text_encoder.encoder.layer.1.attention.self.value.weight", "text_encoder.encoder.layer.1.attention.self.value.bias", "text_encoder.encoder.layer.1.attention.output.dense.weight", "text_encoder.encoder.layer.1.attention.output.dense.bias", "text_encoder.encoder.layer.1.attention.output.LayerNorm.weight", "text_encoder.encoder.layer.1.attention.output.LayerNorm.bias", "text_encoder.encoder.layer.1.intermediate.dense.weight", "text_encoder.encoder.layer.1.intermediate.dense.bias", "text_encoder.encoder.layer.1.output.dense.weight", "text_encoder.encoder.layer.1.output.dense.bias", "text_encoder.encoder.layer.1.output.LayerNorm.weight", "text_encoder.encoder.layer.1.output.LayerNorm.bias", "text_encoder.encoder.layer.2.attention.self.query.weight", "text_encoder.encoder.layer.2.attention.self.query.bias", "text_encoder.encoder.layer.2.attention.self.key.weight", "text_encoder.encoder.layer.2.attention.self.key.bias", "text_encoder.encoder.layer.2.attention.self.value.weight", "text_encoder.encoder.layer.2.attention.self.value.bias", "text_encoder.encoder.layer.2.attention.output.dense.weight", "text_encoder.encoder.layer.2.attention.output.dense.bias", "text_encoder.encoder.layer.2.attention.output.LayerNorm.weight", "text_encoder.encoder.layer.2.attention.output.LayerNorm.bias", "text_encoder.encoder.layer.2.intermediate.dense.weight", "text_encoder.encoder.layer.2.intermediate.dense.bias", "text_encoder.encoder.layer.2.output.dense.weight", "text_encoder.encoder.layer.2.output.dense.bias", "text_encoder.encoder.layer.2.output.LayerNorm.weight", "text_encoder.encoder.layer.2.output.LayerNorm.bias", "text_encoder.encoder.layer.3.attention.self.query.weight", "text_encoder.encoder.layer.3.attention.self.query.bias", "text_encoder.encoder.layer.3.attention.self.key.weight", "text_encoder.encoder.layer.3.attention.self.key.bias", "text_encoder.encoder.layer.3.attention.self.value.weight", "text_encoder.encoder.layer.3.attention.self.value.bias", "text_encoder.encoder.layer.3.attention.output.dense.weight", "text_encoder.encoder.layer.3.attention.output.dense.bias", "text_encoder.encoder.layer.3.attention.output.LayerNorm.weight", "text_encoder.encoder.layer.3.attention.output.LayerNorm.bias", "text_encoder.encoder.layer.3.intermediate.dense.weight", "text_encoder.encoder.layer.3.intermediate.dense.bias", "text_encoder.encoder.layer.3.output.dense.weight", "text_encoder.encoder.layer.3.output.dense.bias", "text_encoder.encoder.layer.3.output.LayerNorm.weight", "text_encoder.encoder.layer.3.output.LayerNorm.bias", "text_encoder.encoder.layer.4.attention.self.query.weight", "text_encoder.encoder.layer.4.attention.self.query.bias", "text_encoder.encoder.layer.4.attention.self.key.weight", "text_encoder.encoder.layer.4.attention.self.key.bias", "text_encoder.encoder.layer.4.attention.self.value.weight", "text_encoder.encoder.layer.4.attention.self.value.bias", "text_encoder.encoder.layer.4.attention.output.dense.weight", "text_encoder.encoder.layer.4.attention.output.dense.bias", "text_encoder.encoder.layer.4.attention.output.LayerNorm.weight", "text_encoder.encoder.layer.4.attention.output.LayerNorm.bias", "text_encoder.encoder.layer.4.intermediate.dense.weight", "text_encoder.encoder.layer.4.intermediate.dense.bias", "text_encoder.encoder.layer.4.output.dense.weight", "text_encoder.encoder.layer.4.output.dense.bias", "text_encoder.encoder.layer.4.output.LayerNorm.weight", "text_encoder.encoder.layer.4.output.LayerNorm.bias", "text_encoder.encoder.layer.5.attention.self.query.weight", "text_encoder.encoder.layer.5.attention.self.query.bias", "text_encoder.encoder.layer.5.attention.self.key.weight", "text_encoder.encoder.layer.5.attention.self.key.bias", "text_encoder.encoder.layer.5.attention.self.value.weight", "text_encoder.encoder.layer.5.attention.self.value.bias", "text_encoder.encoder.layer.5.attention.output.dense.weight", "text_encoder.encoder.layer.5.attention.output.dense.bias", "text_encoder.encoder.layer.5.attention.output.LayerNorm.weight", "text_encoder.encoder.layer.5.attention.output.LayerNorm.bias", "text_encoder.encoder.layer.5.intermediate.dense.weight", "text_encoder.encoder.layer.5.intermediate.dense.bias", "text_encoder.encoder.layer.5.output.dense.weight", "text_encoder.encoder.layer.5.output.dense.bias", "text_encoder.encoder.layer.5.output.LayerNorm.weight", "text_encoder.encoder.layer.5.output.LayerNorm.bias", "text_encoder.encoder.layer.6.attention.self.query.weight", "text_encoder.encoder.layer.6.attention.self.query.bias", "text_encoder.encoder.layer.6.attention.self.key.weight", "text_encoder.encoder.layer.6.attention.self.key.bias", "text_encoder.encoder.layer.6.attention.self.value.weight", "text_encoder.encoder.layer.6.attention.self.value.bias", "text_encoder.encoder.layer.6.attention.output.dense.weight", "text_encoder.encoder.layer.6.attention.output.dense.bias", "text_encoder.encoder.layer.6.attention.output.LayerNorm.weight", "text_encoder.encoder.layer.6.attention.output.LayerNorm.bias", "text_encoder.encoder.layer.6.intermediate.dense.weight", "text_encoder.encoder.layer.6.intermediate.dense.bias", "text_encoder.encoder.layer.6.output.dense.weight", "text_encoder.encoder.layer.6.output.dense.bias", "text_encoder.encoder.layer.6.output.LayerNorm.weight", "text_encoder.encoder.layer.6.output.LayerNorm.bias", "text_encoder.encoder.layer.7.attention.self.query.weight", "text_encoder.encoder.layer.7.attention.self.query.bias", "text_encoder.encoder.layer.7.attention.self.key.weight", "text_encoder.encoder.layer.7.attention.self.key.bias", "text_encoder.encoder.layer.7.attention.self.value.weight", "text_encoder.encoder.layer.7.attention.self.value.bias", "text_encoder.encoder.layer.7.attention.output.dense.weight", "text_encoder.encoder.layer.7.attention.output.dense.bias", "text_encoder.encoder.layer.7.attention.output.LayerNorm.weight", "text_encoder.encoder.layer.7.attention.output.LayerNorm.bias", "text_encoder.encoder.layer.7.intermediate.dense.weight", "text_encoder.encoder.layer.7.intermediate.dense.bias", "text_encoder.encoder.layer.7.output.dense.weight", "text_encoder.encoder.layer.7.output.dense.bias", "text_encoder.encoder.layer.7.output.LayerNorm.weight", "text_encoder.encoder.layer.7.output.LayerNorm.bias", "text_encoder.encoder.layer.8.attention.self.query.weight", "text_encoder.encoder.layer.8.attention.self.query.bias", "text_encoder.encoder.layer.8.attention.self.key.weight", "text_encoder.encoder.layer.8.attention.self.key.bias", "text_encoder.encoder.layer.8.attention.self.value.weight", "text_encoder.encoder.layer.8.attention.self.value.bias", "text_encoder.encoder.layer.8.attention.output.dense.weight", "text_encoder.encoder.layer.8.attention.output.dense.bias", "text_encoder.encoder.layer.8.attention.output.LayerNorm.weight", "text_encoder.encoder.layer.8.attention.output.LayerNorm.bias", "text_encoder.encoder.layer.8.intermediate.dense.weight", "text_encoder.encoder.layer.8.intermediate.dense.bias", "text_encoder.encoder.layer.8.output.dense.weight", "text_encoder.encoder.layer.8.output.dense.bias", "text_encoder.encoder.layer.8.output.LayerNorm.weight", "text_encoder.encoder.layer.8.output.LayerNorm.bias", "text_encoder.encoder.layer.9.attention.self.query.weight", "text_encoder.encoder.layer.9.attention.self.query.bias", "text_encoder.encoder.layer.9.attention.self.key.weight", "text_encoder.encoder.layer.9.attention.self.key.bias", "text_encoder.encoder.layer.9.attention.self.value.weight", "text_encoder.encoder.layer.9.attention.self.value.bias", "text_encoder.encoder.layer.9.attention.output.dense.weight", "text_encoder.encoder.layer.9.attention.output.dense.bias", "text_encoder.encoder.layer.9.attention.output.LayerNorm.weight", "text_encoder.encoder.layer.9.attention.output.LayerNorm.bias", "text_encoder.encoder.layer.9.intermediate.dense.weight", "text_encoder.encoder.layer.9.intermediate.dense.bias", "text_encoder.encoder.layer.9.output.dense.weight", "text_encoder.encoder.layer.9.output.dense.bias", "text_encoder.encoder.layer.9.output.LayerNorm.weight", "text_encoder.encoder.layer.9.output.LayerNorm.bias", "text_encoder.encoder.layer.10.attention.self.query.weight", "text_encoder.encoder.layer.10.attention.self.query.bias", "text_encoder.encoder.layer.10.attention.self.key.weight", "text_encoder.encoder.layer.10.attention.self.key.bias", "text_encoder.encoder.layer.10.attention.self.value.weight", "text_encoder.encoder.layer.10.attention.self.value.bias", "text_encoder.encoder.layer.10.attention.output.dense.weight", "text_encoder.encoder.layer.10.attention.output.dense.bias", "text_encoder.encoder.layer.10.attention.output.LayerNorm.weight", "text_encoder.encoder.layer.10.attention.output.LayerNorm.bias", "text_encoder.encoder.layer.10.intermediate.dense.weight", "text_encoder.encoder.layer.10.intermediate.dense.bias", "text_encoder.encoder.layer.10.output.dense.weight", "text_encoder.encoder.layer.10.output.dense.bias", "text_encoder.encoder.layer.10.output.LayerNorm.weight", "text_encoder.encoder.layer.10.output.LayerNorm.bias", "text_encoder.encoder.layer.11.attention.self.query.weight", "text_encoder.encoder.layer.11.attention.self.query.bias", "text_encoder.encoder.layer.11.attention.self.key.weight", "text_encoder.encoder.layer.11.attention.self.key.bias", "text_encoder.encoder.layer.11.attention.self.value.weight", "text_encoder.encoder.layer.11.attention.self.value.bias", "text_encoder.encoder.layer.11.attention.output.dense.weight", "text_encoder.encoder.layer.11.attention.output.dense.bias", "text_encoder.encoder.layer.11.attention.output.LayerNorm.weight", "text_encoder.encoder.layer.11.attention.output.LayerNorm.bias", "text_encoder.encoder.layer.11.intermediate.dense.weight", "text_encoder.encoder.layer.11.intermediate.dense.bias", "text_encoder.encoder.layer.11.output.dense.weight", "text_encoder.encoder.layer.11.output.dense.bias", "text_encoder.encoder.layer.11.output.LayerNorm.weight", "text_encoder.encoder.layer.11.output.LayerNorm.bias", "text_encoder.projection.conv.weight", "text_encoder.projection.conv.bias", "text_encoder.projection.LayerNorm.weight", "text_encoder.projection.LayerNorm.bias", "text_encoder.final_char_encoder.layer.0.attention.self.query.weight", "text_encoder.final_char_encoder.layer.0.attention.self.query.bias", "text_encoder.final_char_encoder.layer.0.attention.self.key.weight", "text_encoder.final_char_encoder.layer.0.attention.self.key.bias", "text_encoder.final_char_encoder.layer.0.attention.self.value.weight", "text_encoder.final_char_encoder.layer.0.attention.self.value.bias", "text_encoder.final_char_encoder.layer.0.attention.output.dense.weight", "text_encoder.final_char_encoder.layer.0.attention.output.dense.bias", "text_encoder.final_char_encoder.layer.0.attention.output.LayerNorm.weight", "text_encoder.final_char_encoder.layer.0.attention.output.LayerNorm.bias", "text_encoder.final_char_encoder.layer.0.intermediate.dense.weight", "text_encoder.final_char_encoder.layer.0.intermediate.dense.bias", "text_encoder.final_char_encoder.layer.0.output.dense.weight", "text_encoder.final_char_encoder.layer.0.output.dense.bias", "text_encoder.final_char_encoder.layer.0.output.LayerNorm.weight", "text_encoder.final_char_encoder.layer.0.output.LayerNorm.bias", "text_encoder.pooler.dense.weight", "text_encoder.pooler.dense.bias", "time_embed.0.weight", "time_embed.0.bias", "time_embed.2.weight", "time_embed.2.bias", "label_emb.weight", "input_blocks.0.0.weight", "input_blocks.0.0.bias", "input_blocks.1.0.in_layers.0.weight", "input_blocks.1.0.in_layers.0.bias", "input_blocks.1.0.in_layers.2.weight", "input_blocks.1.0.in_layers.2.bias", "input_blocks.1.0.emb_layers.1.weight", "input_blocks.1.0.emb_layers.1.bias", "input_blocks.1.0.out_layers.0.weight", "input_blocks.1.0.out_layers.0.bias", "input_blocks.1.0.out_layers.3.weight", "input_blocks.1.0.out_layers.3.bias", "input_blocks.1.1.norm.weight", "input_blocks.1.1.norm.bias", "input_blocks.1.1.proj_in.weight", "input_blocks.1.1.proj_in.bias", "input_blocks.1.1.transformer_blocks.0.attn1.to_q.weight", "input_blocks.1.1.transformer_blocks.0.attn1.to_k.weight", "input_blocks.1.1.transformer_blocks.0.attn1.to_v.weight", "input_blocks.1.1.transformer_blocks.0.attn1.to_out.0.weight", "input_blocks.1.1.transformer_blocks.0.attn1.to_out.0.bias", "input_blocks.1.1.transformer_blocks.0.ff.net.0.proj.weight", "input_blocks.1.1.transformer_blocks.0.ff.net.0.proj.bias", "input_blocks.1.1.transformer_blocks.0.ff.net.2.weight", "input_blocks.1.1.transformer_blocks.0.ff.net.2.bias", "input_blocks.1.1.transformer_blocks.0.attn2.to_q.weight", "input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight", "input_blocks.1.1.transformer_blocks.0.attn2.to_v.weight", "input_blocks.1.1.transformer_blocks.0.attn2.to_out.0.weight", "input_blocks.1.1.transformer_blocks.0.attn2.to_out.0.bias", "input_blocks.1.1.transformer_blocks.0.norm1.weight", "input_blocks.1.1.transformer_blocks.0.norm1.bias", "input_blocks.1.1.transformer_blocks.0.norm2.weight", "input_blocks.1.1.transformer_blocks.0.norm2.bias", "input_blocks.1.1.transformer_blocks.0.norm3.weight", "input_blocks.1.1.transformer_blocks.0.norm3.bias", "input_blocks.1.1.proj_out.weight", "input_blocks.1.1.proj_out.bias", "input_blocks.2.0.op.weight", "input_blocks.2.0.op.bias", "input_blocks.3.0.in_layers.0.weight", "input_blocks.3.0.in_layers.0.bias", "input_blocks.3.0.in_layers.2.weight", "input_blocks.3.0.in_layers.2.bias", "input_blocks.3.0.emb_layers.1.weight", "input_blocks.3.0.emb_layers.1.bias", "input_blocks.3.0.out_layers.0.weight", "input_blocks.3.0.out_layers.0.bias", "input_blocks.3.0.out_layers.3.weight", "input_blocks.3.0.out_layers.3.bias", "middle_block.0.in_layers.0.weight", "middle_block.0.in_layers.0.bias", "middle_block.0.in_layers.2.weight", "middle_block.0.in_layers.2.bias", "middle_block.0.emb_layers.1.weight", "middle_block.0.emb_layers.1.bias", "middle_block.0.out_layers.0.weight", "middle_block.0.out_layers.0.bias", "middle_block.0.out_layers.3.weight", "middle_block.0.out_layers.3.bias", "middle_block.1.norm.weight", "middle_block.1.norm.bias", "middle_block.1.proj_in.weight", "middle_block.1.proj_in.bias", "middle_block.1.transformer_blocks.0.attn1.to_q.weight", "middle_block.1.transformer_blocks.0.attn1.to_k.weight", "middle_block.1.transformer_blocks.0.attn1.to_v.weight", "middle_block.1.transformer_blocks.0.attn1.to_out.0.weight", "middle_block.1.transformer_blocks.0.attn1.to_out.0.bias", "middle_block.1.transformer_blocks.0.ff.net.0.proj.weight", "middle_block.1.transformer_blocks.0.ff.net.0.proj.bias", "middle_block.1.transformer_blocks.0.ff.net.2.weight", "middle_block.1.transformer_blocks.0.ff.net.2.bias", "middle_block.1.transformer_blocks.0.attn2.to_q.weight", "middle_block.1.transformer_blocks.0.attn2.to_k.weight", "middle_block.1.transformer_blocks.0.attn2.to_v.weight", "middle_block.1.transformer_blocks.0.attn2.to_out.0.weight", "middle_block.1.transformer_blocks.0.attn2.to_out.0.bias", "middle_block.1.transformer_blocks.0.norm1.weight", "middle_block.1.transformer_blocks.0.norm1.bias", "middle_block.1.transformer_blocks.0.norm2.weight", "middle_block.1.transformer_blocks.0.norm2.bias", "middle_block.1.transformer_blocks.0.norm3.weight", "middle_block.1.transformer_blocks.0.norm3.bias", "middle_block.1.proj_out.weight", "middle_block.1.proj_out.bias", "middle_block.2.in_layers.0.weight", "middle_block.2.in_layers.0.bias", "middle_block.2.in_layers.2.weight", "middle_block.2.in_layers.2.bias", "middle_block.2.emb_layers.1.weight", "middle_block.2.emb_layers.1.bias", "middle_block.2.out_layers.0.weight", "middle_block.2.out_layers.0.bias", "middle_block.2.out_layers.3.weight", "middle_block.2.out_layers.3.bias", "output_blocks.0.0.in_layers.0.weight", "output_blocks.0.0.in_layers.0.bias", "output_blocks.0.0.in_layers.2.weight", "output_blocks.0.0.in_layers.2.bias", "output_blocks.0.0.emb_layers.1.weight", "output_blocks.0.0.emb_layers.1.bias", "output_blocks.0.0.out_layers.0.weight", "output_blocks.0.0.out_layers.0.bias", "output_blocks.0.0.out_layers.3.weight", "output_blocks.0.0.out_layers.3.bias", "output_blocks.0.0.skip_connection.weight", "output_blocks.0.0.skip_connection.bias", "output_blocks.1.0.in_layers.0.weight", "output_blocks.1.0.in_layers.0.bias", "output_blocks.1.0.in_layers.2.weight", "output_blocks.1.0.in_layers.2.bias", "output_blocks.1.0.emb_layers.1.weight", "output_blocks.1.0.emb_layers.1.bias", "output_blocks.1.0.out_layers.0.weight", "output_blocks.1.0.out_layers.0.bias", "output_blocks.1.0.out_layers.3.weight", "output_blocks.1.0.out_layers.3.bias", "output_blocks.1.0.skip_connection.weight", "output_blocks.1.0.skip_connection.bias", "output_blocks.1.1.conv.weight", "output_blocks.1.1.conv.bias", "output_blocks.2.0.in_layers.0.weight", "output_blocks.2.0.in_layers.0.bias", "output_blocks.2.0.in_layers.2.weight", "output_blocks.2.0.in_layers.2.bias", "output_blocks.2.0.emb_layers.1.weight", "output_blocks.2.0.emb_layers.1.bias", "output_blocks.2.0.out_layers.0.weight", "output_blocks.2.0.out_layers.0.bias", "output_blocks.2.0.out_layers.3.weight", "output_blocks.2.0.out_layers.3.bias", "output_blocks.2.0.skip_connection.weight", "output_blocks.2.0.skip_connection.bias", "output_blocks.2.1.norm.weight", "output_blocks.2.1.norm.bias", "output_blocks.2.1.proj_in.weight", "output_blocks.2.1.proj_in.bias", "output_blocks.2.1.transformer_blocks.0.attn1.to_q.weight", "output_blocks.2.1.transformer_blocks.0.attn1.to_k.weight", "output_blocks.2.1.transformer_blocks.0.attn1.to_v.weight", "output_blocks.2.1.transformer_blocks.0.attn1.to_out.0.weight", "output_blocks.2.1.transformer_blocks.0.attn1.to_out.0.bias", "output_blocks.2.1.transformer_blocks.0.ff.net.0.proj.weight", "output_blocks.2.1.transformer_blocks.0.ff.net.0.proj.bias", "output_blocks.2.1.transformer_blocks.0.ff.net.2.weight", "output_blocks.2.1.transformer_blocks.0.ff.net.2.bias", "output_blocks.2.1.transformer_blocks.0.attn2.to_q.weight", "output_blocks.2.1.transformer_blocks.0.attn2.to_k.weight", "output_blocks.2.1.transformer_blocks.0.attn2.to_v.weight", "output_blocks.2.1.transformer_blocks.0.attn2.to_out.0.weight", "output_blocks.2.1.transformer_blocks.0.attn2.to_out.0.bias", "output_blocks.2.1.transformer_blocks.0.norm1.weight", "output_blocks.2.1.transformer_blocks.0.norm1.bias", "output_blocks.2.1.transformer_blocks.0.norm2.weight", "output_blocks.2.1.transformer_blocks.0.norm2.bias", "output_blocks.2.1.transformer_blocks.0.norm3.weight", "output_blocks.2.1.transformer_blocks.0.norm3.bias", "output_blocks.2.1.proj_out.weight", "output_blocks.2.1.proj_out.bias", "output_blocks.3.0.in_layers.0.weight", "output_blocks.3.0.in_layers.0.bias", "output_blocks.3.0.in_layers.2.weight", "output_blocks.3.0.in_layers.2.bias", "output_blocks.3.0.emb_layers.1.weight", "output_blocks.3.0.emb_layers.1.bias", "output_blocks.3.0.out_layers.0.weight", "output_blocks.3.0.out_layers.0.bias", "output_blocks.3.0.out_layers.3.weight", "output_blocks.3.0.out_layers.3.bias", "output_blocks.3.0.skip_connection.weight", "output_blocks.3.0.skip_connection.bias", "output_blocks.3.1.norm.weight", "output_blocks.3.1.norm.bias", "output_blocks.3.1.proj_in.weight", "output_blocks.3.1.proj_in.bias", "output_blocks.3.1.transformer_blocks.0.attn1.to_q.weight", "output_blocks.3.1.transformer_blocks.0.attn1.to_k.weight", "output_blocks.3.1.transformer_blocks.0.attn1.to_v.weight", "output_blocks.3.1.transformer_blocks.0.attn1.to_out.0.weight", "output_blocks.3.1.transformer_blocks.0.attn1.to_out.0.bias", "output_blocks.3.1.transformer_blocks.0.ff.net.0.proj.weight", "output_blocks.3.1.transformer_blocks.0.ff.net.0.proj.bias", "output_blocks.3.1.transformer_blocks.0.ff.net.2.weight", "output_blocks.3.1.transformer_blocks.0.ff.net.2.bias", "output_blocks.3.1.transformer_blocks.0.attn2.to_q.weight", "output_blocks.3.1.transformer_blocks.0.attn2.to_k.weight", "output_blocks.3.1.transformer_blocks.0.attn2.to_v.weight", "output_blocks.3.1.transformer_blocks.0.attn2.to_out.0.weight", "output_blocks.3.1.transformer_blocks.0.attn2.to_out.0.bias", "output_blocks.3.1.transformer_blocks.0.norm1.weight", "output_blocks.3.1.transformer_blocks.0.norm1.bias", "output_blocks.3.1.transformer_blocks.0.norm2.weight", "output_blocks.3.1.transformer_blocks.0.norm2.bias", "output_blocks.3.1.transformer_blocks.0.norm3.weight", "output_blocks.3.1.transformer_blocks.0.norm3.bias", "output_blocks.3.1.proj_out.weight", "output_blocks.3.1.proj_out.bias", "out.0.weight", "out.0.bias", "out.2.weight", "out.2.bias", "style_lin.weight", "style_lin.bias", "text_lin.weight", "text_lin.bias".
Unexpected key(s) in state_dict: "module.text_encoder.module.char_embeddings.HashBucketCodepointEmbedder_0.weight", "module.text_encoder.module.char_embeddings.HashBucketCodepointEmbedder_1.weight", "module.text_encoder.module.char_embeddings.HashBucketCodepointEmbedder_2.weight", "module.text_encoder.module.char_embeddings.HashBucketCodepointEmbedder_3.weight", "module.text_encoder.module.char_embeddings.HashBucketCodepointEmbedder_4.weight", "module.text_encoder.module.char_embeddings.HashBucketCodepointEmbedder_5.weight", "module.text_encoder.module.char_embeddings.HashBucketCodepointEmbedder_6.weight", "module.text_encoder.module.char_embeddings.HashBucketCodepointEmbedder_7.weight", "module.text_encoder.module.char_embeddings.char_position_embeddings.weight", "module.text_encoder.module.char_embeddings.token_type_embeddings.weight", "module.text_encoder.module.char_embeddings.LayerNorm.weight", "module.text_encoder.module.char_embeddings.LayerNorm.bias", "module.text_encoder.module.initial_char_encoder.layer.0.attention.self.query.weight", "module.text_encoder.module.initial_char_encoder.layer.0.attention.self.query.bias", "module.text_encoder.module.initial_char_encoder.layer.0.attention.self.key.weight", "module.text_encoder.module.initial_char_encoder.layer.0.attention.self.key.bias", "module.text_encoder.module.initial_char_encoder.layer.0.attention.self.value.weight", "module.text_encoder.module.initial_char_encoder.layer.0.attention.self.value.bias", "module.text_encoder.module.initial_char_encoder.layer.0.attention.output.dense.weight", "module.text_encoder.module.initial_char_encoder.layer.0.attention.output.dense.bias", "module.text_encoder.module.initial_char_encoder.layer.0.attention.output.LayerNorm.weight", "module.text_encoder.module.initial_char_encoder.layer.0.attention.output.LayerNorm.bias", "module.text_encoder.module.initial_char_encoder.layer.0.intermediate.dense.weight", "module.text_encoder.module.initial_char_encoder.layer.0.intermediate.dense.bias", "module.text_encoder.module.initial_char_encoder.layer.0.output.dense.weight", "module.text_encoder.module.initial_char_encoder.layer.0.output.dense.bias", "module.text_encoder.module.initial_char_encoder.layer.0.output.LayerNorm.weight", "module.text_encoder.module.initial_char_encoder.layer.0.output.LayerNorm.bias", "module.text_encoder.module.chars_to_molecules.conv.weight", "module.text_encoder.module.chars_to_molecules.conv.bias", "module.text_encoder.module.chars_to_molecules.LayerNorm.weight", "module.text_encoder.module.chars_to_molecules.LayerNorm.bias", "module.text_encoder.module.encoder.layer.0.attention.self.query.weight", "module.text_encoder.module.encoder.layer.0.attention.self.query.bias", "module.text_encoder.module.encoder.layer.0.attention.self.key.weight", "module.text_encoder.module.encoder.layer.0.attention.self.key.bias", "module.text_encoder.module.encoder.layer.0.attention.self.value.weight", "module.text_encoder.module.encoder.layer.0.attention.self.value.bias", "module.text_encoder.module.encoder.layer.0.attention.output.dense.weight", "module.text_encoder.module.encoder.layer.0.attention.output.dense.bias", "module.text_encoder.module.encoder.layer.0.attention.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.0.attention.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.0.intermediate.dense.weight", "module.text_encoder.module.encoder.layer.0.intermediate.dense.bias", "module.text_encoder.module.encoder.layer.0.output.dense.weight", "module.text_encoder.module.encoder.layer.0.output.dense.bias", "module.text_encoder.module.encoder.layer.0.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.0.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.1.attention.self.query.weight", "module.text_encoder.module.encoder.layer.1.attention.self.query.bias", "module.text_encoder.module.encoder.layer.1.attention.self.key.weight", "module.text_encoder.module.encoder.layer.1.attention.self.key.bias", "module.text_encoder.module.encoder.layer.1.attention.self.value.weight", "module.text_encoder.module.encoder.layer.1.attention.self.value.bias", "module.text_encoder.module.encoder.layer.1.attention.output.dense.weight", "module.text_encoder.module.encoder.layer.1.attention.output.dense.bias", "module.text_encoder.module.encoder.layer.1.attention.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.1.attention.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.1.intermediate.dense.weight", "module.text_encoder.module.encoder.layer.1.intermediate.dense.bias", "module.text_encoder.module.encoder.layer.1.output.dense.weight", "module.text_encoder.module.encoder.layer.1.output.dense.bias", "module.text_encoder.module.encoder.layer.1.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.1.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.2.attention.self.query.weight", "module.text_encoder.module.encoder.layer.2.attention.self.query.bias", "module.text_encoder.module.encoder.layer.2.attention.self.key.weight", "module.text_encoder.module.encoder.layer.2.attention.self.key.bias", "module.text_encoder.module.encoder.layer.2.attention.self.value.weight", "module.text_encoder.module.encoder.layer.2.attention.self.value.bias", "module.text_encoder.module.encoder.layer.2.attention.output.dense.weight", "module.text_encoder.module.encoder.layer.2.attention.output.dense.bias", "module.text_encoder.module.encoder.layer.2.attention.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.2.attention.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.2.intermediate.dense.weight", "module.text_encoder.module.encoder.layer.2.intermediate.dense.bias", "module.text_encoder.module.encoder.layer.2.output.dense.weight", "module.text_encoder.module.encoder.layer.2.output.dense.bias", "module.text_encoder.module.encoder.layer.2.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.2.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.3.attention.self.query.weight", "module.text_encoder.module.encoder.layer.3.attention.self.query.bias", "module.text_encoder.module.encoder.layer.3.attention.self.key.weight", "module.text_encoder.module.encoder.layer.3.attention.self.key.bias", "module.text_encoder.module.encoder.layer.3.attention.self.value.weight", "module.text_encoder.module.encoder.layer.3.attention.self.value.bias", "module.text_encoder.module.encoder.layer.3.attention.output.dense.weight", "module.text_encoder.module.encoder.layer.3.attention.output.dense.bias", "module.text_encoder.module.encoder.layer.3.attention.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.3.attention.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.3.intermediate.dense.weight", "module.text_encoder.module.encoder.layer.3.intermediate.dense.bias", "module.text_encoder.module.encoder.layer.3.output.dense.weight", "module.text_encoder.module.encoder.layer.3.output.dense.bias", "module.text_encoder.module.encoder.layer.3.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.3.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.4.attention.self.query.weight", "module.text_encoder.module.encoder.layer.4.attention.self.query.bias", "module.text_encoder.module.encoder.layer.4.attention.self.key.weight", "module.text_encoder.module.encoder.layer.4.attention.self.key.bias", "module.text_encoder.module.encoder.layer.4.attention.self.value.weight", "module.text_encoder.module.encoder.layer.4.attention.self.value.bias", "module.text_encoder.module.encoder.layer.4.attention.output.dense.weight", "module.text_encoder.module.encoder.layer.4.attention.output.dense.bias", "module.text_encoder.module.encoder.layer.4.attention.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.4.attention.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.4.intermediate.dense.weight", "module.text_encoder.module.encoder.layer.4.intermediate.dense.bias", "module.text_encoder.module.encoder.layer.4.output.dense.weight", "module.text_encoder.module.encoder.layer.4.output.dense.bias", "module.text_encoder.module.encoder.layer.4.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.4.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.5.attention.self.query.weight", "module.text_encoder.module.encoder.layer.5.attention.self.query.bias", "module.text_encoder.module.encoder.layer.5.attention.self.key.weight", "module.text_encoder.module.encoder.layer.5.attention.self.key.bias", "module.text_encoder.module.encoder.layer.5.attention.self.value.weight", "module.text_encoder.module.encoder.layer.5.attention.self.value.bias", "module.text_encoder.module.encoder.layer.5.attention.output.dense.weight", "module.text_encoder.module.encoder.layer.5.attention.output.dense.bias", "module.text_encoder.module.encoder.layer.5.attention.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.5.attention.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.5.intermediate.dense.weight", "module.text_encoder.module.encoder.layer.5.intermediate.dense.bias", "module.text_encoder.module.encoder.layer.5.output.dense.weight", "module.text_encoder.module.encoder.layer.5.output.dense.bias", "module.text_encoder.module.encoder.layer.5.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.5.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.6.attention.self.query.weight", "module.text_encoder.module.encoder.layer.6.attention.self.query.bias", "module.text_encoder.module.encoder.layer.6.attention.self.key.weight", "module.text_encoder.module.encoder.layer.6.attention.self.key.bias", "module.text_encoder.module.encoder.layer.6.attention.self.value.weight", "module.text_encoder.module.encoder.layer.6.attention.self.value.bias", "module.text_encoder.module.encoder.layer.6.attention.output.dense.weight", "module.text_encoder.module.encoder.layer.6.attention.output.dense.bias", "module.text_encoder.module.encoder.layer.6.attention.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.6.attention.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.6.intermediate.dense.weight", "module.text_encoder.module.encoder.layer.6.intermediate.dense.bias", "module.text_encoder.module.encoder.layer.6.output.dense.weight", "module.text_encoder.module.encoder.layer.6.output.dense.bias", "module.text_encoder.module.encoder.layer.6.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.6.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.7.attention.self.query.weight", "module.text_encoder.module.encoder.layer.7.attention.self.query.bias", "module.text_encoder.module.encoder.layer.7.attention.self.key.weight", "module.text_encoder.module.encoder.layer.7.attention.self.key.bias", "module.text_encoder.module.encoder.layer.7.attention.self.value.weight", "module.text_encoder.module.encoder.layer.7.attention.self.value.bias", "module.text_encoder.module.encoder.layer.7.attention.output.dense.weight", "module.text_encoder.module.encoder.layer.7.attention.output.dense.bias", "module.text_encoder.module.encoder.layer.7.attention.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.7.attention.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.7.intermediate.dense.weight", "module.text_encoder.module.encoder.layer.7.intermediate.dense.bias", "module.text_encoder.module.encoder.layer.7.output.dense.weight", "module.text_encoder.module.encoder.layer.7.output.dense.bias", "module.text_encoder.module.encoder.layer.7.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.7.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.8.attention.self.query.weight", "module.text_encoder.module.encoder.layer.8.attention.self.query.bias", "module.text_encoder.module.encoder.layer.8.attention.self.key.weight", "module.text_encoder.module.encoder.layer.8.attention.self.key.bias", "module.text_encoder.module.encoder.layer.8.attention.self.value.weight", "module.text_encoder.module.encoder.layer.8.attention.self.value.bias", "module.text_encoder.module.encoder.layer.8.attention.output.dense.weight", "module.text_encoder.module.encoder.layer.8.attention.output.dense.bias", "module.text_encoder.module.encoder.layer.8.attention.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.8.attention.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.8.intermediate.dense.weight", "module.text_encoder.module.encoder.layer.8.intermediate.dense.bias", "module.text_encoder.module.encoder.layer.8.output.dense.weight", "module.text_encoder.module.encoder.layer.8.output.dense.bias", "module.text_encoder.module.encoder.layer.8.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.8.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.9.attention.self.query.weight", "module.text_encoder.module.encoder.layer.9.attention.self.query.bias", "module.text_encoder.module.encoder.layer.9.attention.self.key.weight", "module.text_encoder.module.encoder.layer.9.attention.self.key.bias", "module.text_encoder.module.encoder.layer.9.attention.self.value.weight", "module.text_encoder.module.encoder.layer.9.attention.self.value.bias", "module.text_encoder.module.encoder.layer.9.attention.output.dense.weight", "module.text_encoder.module.encoder.layer.9.attention.output.dense.bias", "module.text_encoder.module.encoder.layer.9.attention.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.9.attention.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.9.intermediate.dense.weight", "module.text_encoder.module.encoder.layer.9.intermediate.dense.bias", "module.text_encoder.module.encoder.layer.9.output.dense.weight", "module.text_encoder.module.encoder.layer.9.output.dense.bias", "module.text_encoder.module.encoder.layer.9.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.9.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.10.attention.self.query.weight", "module.text_encoder.module.encoder.layer.10.attention.self.query.bias", "module.text_encoder.module.encoder.layer.10.attention.self.key.weight", "module.text_encoder.module.encoder.layer.10.attention.self.key.bias", "module.text_encoder.module.encoder.layer.10.attention.self.value.weight", "module.text_encoder.module.encoder.layer.10.attention.self.value.bias", "module.text_encoder.module.encoder.layer.10.attention.output.dense.weight", "module.text_encoder.module.encoder.layer.10.attention.output.dense.bias", "module.text_encoder.module.encoder.layer.10.attention.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.10.attention.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.10.intermediate.dense.weight", "module.text_encoder.module.encoder.layer.10.intermediate.dense.bias", "module.text_encoder.module.encoder.layer.10.output.dense.weight", "module.text_encoder.module.encoder.layer.10.output.dense.bias", "module.text_encoder.module.encoder.layer.10.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.10.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.11.attention.self.query.weight", "module.text_encoder.module.encoder.layer.11.attention.self.query.bias", "module.text_encoder.module.encoder.layer.11.attention.self.key.weight", "module.text_encoder.module.encoder.layer.11.attention.self.key.bias", "module.text_encoder.module.encoder.layer.11.attention.self.value.weight", "module.text_encoder.module.encoder.layer.11.attention.self.value.bias", "module.text_encoder.module.encoder.layer.11.attention.output.dense.weight", "module.text_encoder.module.encoder.layer.11.attention.output.dense.bias", "module.text_encoder.module.encoder.layer.11.attention.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.11.attention.output.LayerNorm.bias", "module.text_encoder.module.encoder.layer.11.intermediate.dense.weight", "module.text_encoder.module.encoder.layer.11.intermediate.dense.bias", "module.text_encoder.module.encoder.layer.11.output.dense.weight", "module.text_encoder.module.encoder.layer.11.output.dense.bias", "module.text_encoder.module.encoder.layer.11.output.LayerNorm.weight", "module.text_encoder.module.encoder.layer.11.output.LayerNorm.bias", "module.text_encoder.module.projection.conv.weight", "module.text_encoder.module.projection.conv.bias", "module.text_encoder.module.projection.LayerNorm.weight", "module.text_encoder.module.projection.LayerNorm.bias", "module.text_encoder.module.final_char_encoder.layer.0.attention.self.query.weight", "module.text_encoder.module.final_char_encoder.layer.0.attention.self.query.bias", "module.text_encoder.module.final_char_encoder.layer.0.attention.self.key.weight", "module.text_encoder.module.final_char_encoder.layer.0.attention.self.key.bias", "module.text_encoder.module.final_char_encoder.layer.0.attention.self.value.weight", "module.text_encoder.module.final_char_encoder.layer.0.attention.self.value.bias", "module.text_encoder.module.final_char_encoder.layer.0.attention.output.dense.weight", "module.text_encoder.module.final_char_encoder.layer.0.attention.output.dense.bias", "module.text_encoder.module.final_char_encoder.layer.0.attention.output.LayerNorm.weight", "module.text_encoder.module.final_char_encoder.layer.0.attention.output.LayerNorm.bias", "module.text_encoder.module.final_char_encoder.layer.0.intermediate.dense.weight", "module.text_encoder.module.final_char_encoder.layer.0.intermediate.dense.bias", "module.text_encoder.module.final_char_encoder.layer.0.output.dense.weight", "module.text_encoder.module.final_char_encoder.layer.0.output.dense.bias", "module.text_encoder.module.final_char_encoder.layer.0.output.LayerNorm.weight", "module.text_encoder.module.final_char_encoder.layer.0.output.LayerNorm.bias", "module.text_encoder.module.pooler.dense.weight", "module.text_encoder.module.pooler.dense.bias", "module.time_embed.0.weight", "module.time_embed.0.bias", "module.time_embed.2.weight", "module.time_embed.2.bias", "module.label_emb.weight", "module.input_blocks.0.0.weight", "module.input_blocks.0.0.bias", "module.input_blocks.1.0.in_layers.0.weight", "module.input_blocks.1.0.in_layers.0.bias", "module.input_blocks.1.0.in_layers.2.weight", "module.input_blocks.1.0.in_layers.2.bias", "module.input_blocks.1.0.emb_layers.1.weight", "module.input_blocks.1.0.emb_layers.1.bias", "module.input_blocks.1.0.out_layers.0.weight", "module.input_blocks.1.0.out_layers.0.bias", "module.input_blocks.1.0.out_layers.3.weight", "module.input_blocks.1.0.out_layers.3.bias", "module.input_blocks.1.1.norm.weight", "module.input_blocks.1.1.norm.bias", "module.input_blocks.1.1.proj_in.weight", "module.input_blocks.1.1.proj_in.bias", "module.input_blocks.1.1.transformer_blocks.0.attn1.to_q.weight", "module.input_blocks.1.1.transformer_blocks.0.attn1.to_k.weight", "module.input_blocks.1.1.transformer_blocks.0.attn1.to_v.weight", "module.input_blocks.1.1.transformer_blocks.0.attn1.to_out.0.weight", "module.input_blocks.1.1.transformer_blocks.0.attn1.to_out.0.bias", "module.input_blocks.1.1.transformer_blocks.0.ff.net.0.proj.weight", "module.input_blocks.1.1.transformer_blocks.0.ff.net.0.proj.bias", "module.input_blocks.1.1.transformer_blocks.0.ff.net.2.weight", "module.input_blocks.1.1.transformer_blocks.0.ff.net.2.bias", "module.input_blocks.1.1.transformer_blocks.0.attn2.to_q.weight", "module.input_blocks.1.1.transformer_blocks.0.attn2.to_k.weight", "module.input_blocks.1.1.transformer_blocks.0.attn2.to_v.weight", "module.input_blocks.1.1.transformer_blocks.0.attn2.to_out.0.weight", "module.input_blocks.1.1.transformer_blocks.0.attn2.to_out.0.bias", "module.input_blocks.1.1.transformer_blocks.0.norm1.weight", "module.input_blocks.1.1.transformer_blocks.0.norm1.bias", "module.input_blocks.1.1.transformer_blocks.0.norm2.weight", "module.input_blocks.1.1.transformer_blocks.0.norm2.bias", "module.input_blocks.1.1.transformer_blocks.0.norm3.weight", "module.input_blocks.1.1.transformer_blocks.0.norm3.bias", "module.input_blocks.1.1.proj_out.weight", "module.input_blocks.1.1.proj_out.bias", "module.input_blocks.2.0.op.weight", "module.input_blocks.2.0.op.bias", "module.input_blocks.3.0.in_layers.0.weight", "module.input_blocks.3.0.in_layers.0.bias", "module.input_blocks.3.0.in_layers.2.weight", "module.input_blocks.3.0.in_layers.2.bias", "module.input_blocks.3.0.emb_layers.1.weight", "module.input_blocks.3.0.emb_layers.1.bias", "module.input_blocks.3.0.out_layers.0.weight", "module.input_blocks.3.0.out_layers.0.bias", "module.input_blocks.3.0.out_layers.3.weight", "module.input_blocks.3.0.out_layers.3.bias", "module.middle_block.0.in_layers.0.weight", "module.middle_block.0.in_layers.0.bias", "module.middle_block.0.in_layers.2.weight", "module.middle_block.0.in_layers.2.bias", "module.middle_block.0.emb_layers.1.weight", "module.middle_block.0.emb_layers.1.bias", "module.middle_block.0.out_layers.0.weight", "module.middle_block.0.out_layers.0.bias", "module.middle_block.0.out_layers.3.weight", "module.middle_block.0.out_layers.3.bias", "module.middle_block.1.norm.weight", "module.middle_block.1.norm.bias", "module.middle_block.1.proj_in.weight", "module.middle_block.1.proj_in.bias", "module.middle_block.1.transformer_blocks.0.attn1.to_q.weight", "module.middle_block.1.transformer_blocks.0.attn1.to_k.weight", "module.middle_block.1.transformer_blocks.0.attn1.to_v.weight", "module.middle_block.1.transformer_blocks.0.attn1.to_out.0.weight", "module.middle_block.1.transformer_blocks.0.attn1.to_out.0.bias", "module.middle_block.1.transformer_blocks.0.ff.net.0.proj.weight", "module.middle_block.1.transformer_blocks.0.ff.net.0.proj.bias", "module.middle_block.1.transformer_blocks.0.ff.net.2.weight", "module.middle_block.1.transformer_blocks.0.ff.net.2.bias", "module.middle_block.1.transformer_blocks.0.attn2.to_q.weight", "module.middle_block.1.transformer_blocks.0.attn2.to_k.weight", "module.middle_block.1.transformer_blocks.0.attn2.to_v.weight", "module.middle_block.1.transformer_blocks.0.attn2.to_out.0.weight", "module.middle_block.1.transformer_blocks.0.attn2.to_out.0.bias", "module.middle_block.1.transformer_blocks.0.norm1.weight", "module.middle_block.1.transformer_blocks.0.norm1.bias", "module.middle_block.1.transformer_blocks.0.norm2.weight", "module.middle_block.1.transformer_blocks.0.norm2.bias", "module.middle_block.1.transformer_blocks.0.norm3.weight", "module.middle_block.1.transformer_blocks.0.norm3.bias", "module.middle_block.1.proj_out.weight", "module.middle_block.1.proj_out.bias", "module.middle_block.2.in_layers.0.weight", "module.middle_block.2.in_layers.0.bias", "module.middle_block.2.in_layers.2.weight", "module.middle_block.2.in_layers.2.bias", "module.middle_block.2.emb_layers.1.weight", "module.middle_block.2.emb_layers.1.bias", "module.middle_block.2.out_layers.0.weight", "module.middle_block.2.out_layers.0.bias", "module.middle_block.2.out_layers.3.weight", "module.middle_block.2.out_layers.3.bias", "module.output_blocks.0.0.in_layers.0.weight", "module.output_blocks.0.0.in_layers.0.bias", "module.output_blocks.0.0.in_layers.2.weight", "module.output_blocks.0.0.in_layers.2.bias", "module.output_blocks.0.0.emb_layers.1.weight", "module.output_blocks.0.0.emb_layers.1.bias", "module.output_blocks.0.0.out_layers.0.weight", "module.output_blocks.0.0.out_layers.0.bias", "module.output_blocks.0.0.out_layers.3.weight", "module.output_blocks.0.0.out_layers.3.bias", "module.output_blocks.0.0.skip_connection.weight", "module.output_blocks.0.0.skip_connection.bias", "module.output_blocks.1.0.in_layers.0.weight", "module.output_blocks.1.0.in_layers.0.bias", "module.output_blocks.1.0.in_layers.2.weight", "module.output_blocks.1.0.in_layers.2.bias", "module.output_blocks.1.0.emb_layers.1.weight", "module.output_blocks.1.0.emb_layers.1.bias", "module.output_blocks.1.0.out_layers.0.weight", "module.output_blocks.1.0.out_layers.0.bias", "module.output_blocks.1.0.out_layers.3.weight", "module.output_blocks.1.0.out_layers.3.bias", "module.output_blocks.1.0.skip_connection.weight", "module.output_blocks.1.0.skip_connection.bias", "module.output_blocks.1.1.conv.weight", "module.output_blocks.1.1.conv.bias", "module.output_blocks.2.0.in_layers.0.weight", "module.output_blocks.2.0.in_layers.0.bias", "module.output_blocks.2.0.in_layers.2.weight", "module.output_blocks.2.0.in_layers.2.bias", "module.output_blocks.2.0.emb_layers.1.weight", "module.output_blocks.2.0.emb_layers.1.bias", "module.output_blocks.2.0.out_layers.0.weight", "module.output_blocks.2.0.out_layers.0.bias", "module.output_blocks.2.0.out_layers.3.weight", "module.output_blocks.2.0.out_layers.3.bias", "module.output_blocks.2.0.skip_connection.weight", "module.output_blocks.2.0.skip_connection.bias", "module.output_blocks.2.1.norm.weight", "module.output_blocks.2.1.norm.bias", "module.output_blocks.2.1.proj_in.weight", "module.output_blocks.2.1.proj_in.bias", "module.output_blocks.2.1.transformer_blocks.0.attn1.to_q.weight", "module.output_blocks.2.1.transformer_blocks.0.attn1.to_k.weight", "module.output_blocks.2.1.transformer_blocks.0.attn1.to_v.weight", "module.output_blocks.2.1.transformer_blocks.0.attn1.to_out.0.weight", "module.output_blocks.2.1.transformer_blocks.0.attn1.to_out.0.bias", "module.output_blocks.2.1.transformer_blocks.0.ff.net.0.proj.weight", "module.output_blocks.2.1.transformer_blocks.0.ff.net.0.proj.bias", "module.output_blocks.2.1.transformer_blocks.0.ff.net.2.weight", "module.output_blocks.2.1.transformer_blocks.0.ff.net.2.bias", "module.output_blocks.2.1.transformer_blocks.0.attn2.to_q.weight", "module.output_blocks.2.1.transformer_blocks.0.attn2.to_k.weight", "module.output_blocks.2.1.transformer_blocks.0.attn2.to_v.weight", "module.output_blocks.2.1.transformer_blocks.0.attn2.to_out.0.weight", "module.output_blocks.2.1.transformer_blocks.0.attn2.to_out.0.bias", "module.output_blocks.2.1.transformer_blocks.0.norm1.weight", "module.output_blocks.2.1.transformer_blocks.0.norm1.bias", "module.output_blocks.2.1.transformer_blocks.0.norm2.weight", "module.output_blocks.2.1.transformer_blocks.0.norm2.bias", "module.output_blocks.2.1.transformer_blocks.0.norm3.weight", "module.output_blocks.2.1.transformer_blocks.0.norm3.bias", "module.output_blocks.2.1.proj_out.weight", "module.output_blocks.2.1.proj_out.bias", "module.output_blocks.3.0.in_layers.0.weight", "module.output_blocks.3.0.in_layers.0.bias", "module.output_blocks.3.0.in_layers.2.weight", "module.output_blocks.3.0.in_layers.2.bias", "module.output_blocks.3.0.emb_layers.1.weight", "module.output_blocks.3.0.emb_layers.1.bias", "module.output_blocks.3.0.out_layers.0.weight", "module.output_blocks.3.0.out_layers.0.bias", "module.output_blocks.3.0.out_layers.3.weight", "module.output_blocks.3.0.out_layers.3.bias", "module.output_blocks.3.0.skip_connection.weight", "module.output_blocks.3.0.skip_connection.bias", "module.output_blocks.3.1.norm.weight", "module.output_blocks.3.1.norm.bias", "module.output_blocks.3.1.proj_in.weight", "module.output_blocks.3.1.proj_in.bias", "module.output_blocks.3.1.transformer_blocks.0.attn1.to_q.weight", "module.output_blocks.3.1.transformer_blocks.0.attn1.to_k.weight", "module.output_blocks.3.1.transformer_blocks.0.attn1.to_v.weight", "module.output_blocks.3.1.transformer_blocks.0.attn1.to_out.0.weight", "module.output_blocks.3.1.transformer_blocks.0.attn1.to_out.0.bias", "module.output_blocks.3.1.transformer_blocks.0.ff.net.0.proj.weight", "module.output_blocks.3.1.transformer_blocks.0.ff.net.0.proj.bias", "module.output_blocks.3.1.transformer_blocks.0.ff.net.2.weight", "module.output_blocks.3.1.transformer_blocks.0.ff.net.2.bias", "module.output_blocks.3.1.transformer_blocks.0.attn2.to_q.weight", "module.output_blocks.3.1.transformer_blocks.0.attn2.to_k.weight", "module.output_blocks.3.1.transformer_blocks.0.attn2.to_v.weight", "module.output_blocks.3.1.transformer_blocks.0.attn2.to_out.0.weight", "module.output_blocks.3.1.transformer_blocks.0.attn2.to_out.0.bias", "module.output_blocks.3.1.transformer_blocks.0.norm1.weight", "module.output_blocks.3.1.transformer_blocks.0.norm1.bias", "module.output_blocks.3.1.transformer_blocks.0.norm2.weight", "module.output_blocks.3.1.transformer_blocks.0.norm2.bias", "module.output_blocks.3.1.transformer_blocks.0.norm3.weight", "module.output_blocks.3.1.transformer_blocks.0.norm3.bias", "module.output_blocks.3.1.proj_out.weight", "module.output_blocks.3.1.proj_out.bias", "module.out.0.weight", "module.out.0.bias", "module.out.2.weight", "module.out.2.bias", "module.style_lin.weight", "module.style_lin.bias", "module.text_lin.weight", "module.text_lin.bias".