You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
RuntimeError: Given groups=1, weight of size [64, 22, 4, 4], expected input[1, 2
9, 256, 192] to have 22 channels, but got 29 channels instead
I generated the images using the LIP_JPPNET as suggested here for image parse and image_parse_new. But even after doing that and providing the dataset as suggested I'm getting the above error. But when testing for the dataset provided by the vton it worked perfectly well without any errors
code in test.py below...........
coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import argparse
import os
import time
from cp_dataset import CPDataset, CPDataLoader
from networks import GMM, UnetGenerator, load_checkpoint
from tensorboardX import SummaryWriter
from visualization import board_add_image, board_add_images, save_images
Hi @lokesh0606, Your error is related to pose json file, Check pose keypoints extraction from OpenPose.
Example file: {"version":1.3,"people":[{"person_id":[-1],"pose_keypoints_2d":[92.7477,30.0611,0.899314,98.3928,54.0358,0.898113,75.1456,51.932,0.772811,63.8703,88.5553,0.846992,63.8942,121.655,0.893302,120.981,58.2507,0.836377,123.076,92.7458,0.805378,127.271,127.266,0.870634,76.5765,125.175,0.582027,68.8339,173.802,0.742368,70.921,218.178,0.787068,106.166,127.285,0.634873,105.457,178.722,0.754785,110.37,232.965,0.77029,88.5551,25.1222,0.926259,97.7201,25.1486,0.968981,0,0,0,109.681,25.1539,0.957358],"face_keypoints_2d":[],"hand_left_keypoints_2d":[],"hand_right_keypoints_2d":[],"pose_keypoints_3d":[],"face_keypoints_3d":[],"hand_left_keypoints_3d":[],"hand_right_keypoints_3d":[]}]}
I'm getting this runtime error
RuntimeError: Given groups=1, weight of size [64, 22, 4, 4], expected input[1, 2
9, 256, 192] to have 22 channels, but got 29 channels instead
I generated the images using the LIP_JPPNET as suggested here for image parse and image_parse_new. But even after doing that and providing the dataset as suggested I'm getting the above error. But when testing for the dataset provided by the vton it worked perfectly well without any errors
code in test.py below...........
coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import argparse
import os
import time
from cp_dataset import CPDataset, CPDataLoader
from networks import GMM, UnetGenerator, load_checkpoint
from tensorboardX import SummaryWriter
from visualization import board_add_image, board_add_images, save_images
def get_opt():
parser = argparse.ArgumentParser()
def test_gmm(opt, test_loader, model, board):
model
model.eval()
def test_tom(opt, test_loader, model, board):
model
model.eval()
def main():
opt = get_opt()
print(opt)
print("Start to test stage: %s, named: %s!" % (opt.stage, opt.name))
if name == "main":
main()
code in cp_dataset.py
coding=utf-8
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image
from PIL import ImageDraw
import os.path as osp
import numpy as np
import pandas as pd
import json
class CPDataset(data.Dataset):
"""Dataset for CP-VTON+.
"""
class CPDataLoader(object):
def init(self, opt, dataset):
super(CPDataLoader, self).init()
if name == "main":
print("Check the dataset for geometric matching module!")
error
The text was updated successfully, but these errors were encountered: