|
| 1 | +import time |
| 2 | + |
| 3 | +from gait_dynamics import * |
| 4 | + |
| 5 | + |
| 6 | +class TransformerHipKnee(nn.Module): |
| 7 | + def __init__(self, repr_dim, opt, nlayers=6): |
| 8 | + super(TransformerHipKnee, self).__init__() |
| 9 | + self.input_dim = len(opt.kinematic_diffusion_col_loc) |
| 10 | + self.output_dim = repr_dim - self.input_dim |
| 11 | + embedding_dim = 192 |
| 12 | + self.input_to_embedding = nn.Linear(self.input_dim, embedding_dim) |
| 13 | + self.encoder_layers = nn.Sequential(*[EncoderLayer(embedding_dim) for _ in range(nlayers)]) |
| 14 | + self.embedding_to_output = nn.Linear(embedding_dim, self.output_dim) |
| 15 | + self.opt = opt |
| 16 | + self.input_col_loc = opt.kinematic_diffusion_col_loc |
| 17 | + self.output_col_loc = [i for i in range(repr_dim) if i not in self.input_col_loc] |
| 18 | + |
| 19 | + def loss_fun(self, output_pred, output_true): |
| 20 | + return F.mse_loss(output_pred, output_true, reduction='none') |
| 21 | + |
| 22 | + def end_to_end_prediction(self, x): |
| 23 | + input = x[0][:, :, self.input_col_loc] |
| 24 | + sequence = self.input_to_embedding(input) |
| 25 | + sequence = self.encoder_layers(sequence) |
| 26 | + output_pred = self.embedding_to_output(sequence) |
| 27 | + return output_pred |
| 28 | + |
| 29 | + def predict_samples(self, x, constraint): |
| 30 | + x[0] = x[0] * constraint['mask'] |
| 31 | + output_pred = self.end_to_end_prediction(x) |
| 32 | + x[0][:, :, self.output_col_loc] = output_pred |
| 33 | + return x[0] |
| 34 | + |
| 35 | + def __str__(self): |
| 36 | + return 'tf' |
| 37 | + |
| 38 | + |
| 39 | +def inverse_norm_cops_and_split_force(states, opt, grf_thd_to_zero_cop=20): |
| 40 | + forces = states[-1:, opt.grf_osim_col_loc] |
| 41 | + normed_cops = states[-1:, opt.cop_osim_col_loc] |
| 42 | + |
| 43 | + force_cop_two_feet = [] |
| 44 | + for i_plate in range(2): |
| 45 | + force_v = forces[:, 3*i_plate:3*(i_plate+1)] |
| 46 | + force_v[force_v == 0] = 1e-6 |
| 47 | + vector = normed_cops[:, 3 * i_plate:3 * (i_plate + 1)] / force_v[:, 1:2] * opt.height_m |
| 48 | + cops = np.nan_to_num(vector, posinf=0, neginf=0) |
| 49 | + # vector.clip(min=-0.4, max=0.4, out=vector) # CoP should be within 0.4 m from the foot |
| 50 | + # cops = vector + foot_loc[:, 3*i_plate:3*(i_plate+1)] |
| 51 | + |
| 52 | + if grf_thd_to_zero_cop and force_v[0, 1] * opt.weight_kg < grf_thd_to_zero_cop: |
| 53 | + cops[:] = 0 |
| 54 | + |
| 55 | + cops = torch.from_numpy(cops).to(states.dtype) |
| 56 | + states[:, opt.cop_osim_col_loc[3*i_plate:3*(i_plate+1)]] = cops |
| 57 | + force_cop_two_feet.append(torch.cat((forces[:, 3*i_plate:3*(i_plate+1)] * opt.weight_kg, cops), 1)) |
| 58 | + return force_cop_two_feet |
| 59 | + |
| 60 | + |
| 61 | +class BaselineModelCpu(BaselineModel): |
| 62 | + def __init__( |
| 63 | + self, |
| 64 | + opt, |
| 65 | + model_architecture_class, |
| 66 | + EMA=True, |
| 67 | + ): |
| 68 | + self.device = 'cpu' |
| 69 | + self.opt = opt |
| 70 | + ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) |
| 71 | + self.accelerator = Accelerator(kwargs_handlers=[ddp_kwargs]) |
| 72 | + self.repr_dim = len(opt.model_states_column_names) |
| 73 | + self.horizon = horizon = opt.window_len |
| 74 | + self.accelerator.wait_for_everyone() |
| 75 | + |
| 76 | + self.model = model_architecture_class(self.repr_dim, opt) |
| 77 | + self.diffusion = DiffusionShellForAdaptingTheOriginalFramework(self.model) |
| 78 | + self.diffusion = self.accelerator.prepare(self.diffusion) |
| 79 | + |
| 80 | + print("Model has {} parameters".format(sum(y.numel() for y in self.model.parameters()))) |
| 81 | + |
| 82 | + checkpoint = None |
| 83 | + if opt.checkpoint_bl != "": |
| 84 | + checkpoint = torch.load( |
| 85 | + opt.checkpoint_bl, map_location=self.device |
| 86 | + ) |
| 87 | + self.normalizer = checkpoint["normalizer"] |
| 88 | + |
| 89 | + if opt.checkpoint_bl != "": |
| 90 | + self.model.load_state_dict( |
| 91 | + maybe_wrap( |
| 92 | + checkpoint["ema_state_dict" if EMA else "model_state_dict"], |
| 93 | + 1, |
| 94 | + ) |
| 95 | + ) |
| 96 | + |
| 97 | + def eval_loop(self, opt, state_true, masks, value_diff_thd=None, value_diff_weight=None, cond=None, |
| 98 | + num_of_generation_per_window=1, mode="inpaint"): |
| 99 | + self.eval() |
| 100 | + constraint = {'mask': masks.to(self.device), 'value': state_true, 'cond': cond} |
| 101 | + state_true = state_true.to(self.device) |
| 102 | + state_pred_list = [self.diffusion.predict_samples([state_true], constraint) |
| 103 | + for _ in range(num_of_generation_per_window)] |
| 104 | + state_pred_list = [self.normalizer.unnormalize(state_pred.detach().cpu()) for state_pred in state_pred_list] |
| 105 | + return torch.stack(state_pred_list) |
| 106 | + |
| 107 | + def eval(self): |
| 108 | + self.diffusion.eval() |
| 109 | + |
| 110 | + def train(self): |
| 111 | + self.diffusion.train() |
| 112 | + |
| 113 | + def prepare(self, objects): |
| 114 | + return self.accelerator.prepare(*objects) |
| 115 | + |
| 116 | + |
| 117 | +class RealTimePredictor: |
| 118 | + def __init__(self, opt): |
| 119 | + self.opt = opt |
| 120 | + self.model = BaselineModelCpu(opt, TransformerHipKnee, EMA=True) |
| 121 | + self.model.diffusion.to('cpu') |
| 122 | + |
| 123 | + customOsim: nimble.biomechanics.OpenSimFile = nimble.biomechanics.OpenSimParser.parseOsim(opt.subject_osim_model, self.opt.geometry_folder) |
| 124 | + skel = customOsim.skeleton |
| 125 | + |
| 126 | + self.skel = skel |
| 127 | + self.data_window_buffer = torch.zeros([self.opt.window_len, len(self.opt.model_states_column_names)]) |
| 128 | + |
| 129 | + self.column_index = opt.kinematic_diffusion_col_loc |
| 130 | + self.mask = torch.ones([self.opt.window_len, len(self.opt.model_states_column_names)]) # No mask, just to be compatible with the model |
| 131 | + self.feature_len = len(self.opt.model_states_column_names) |
| 132 | + |
| 133 | + def predict_grf(self, right_hip_pos, right_knee_pos, left_hip_pos, left_knee_pos, |
| 134 | + right_hip_vel, right_knee_vel, left_hip_vel, left_knee_vel): |
| 135 | + |
| 136 | + new_row = torch.zeros([1, self.feature_len]) |
| 137 | + new_row[0, self.column_index] = torch.tensor([right_hip_pos, right_knee_pos, left_hip_pos, left_knee_pos, right_hip_vel, right_knee_vel, left_hip_vel, left_knee_vel]) |
| 138 | + new_row = self.model.normalizer.normalize(new_row) |
| 139 | + self.data_window_buffer = torch.cat((self.data_window_buffer[1:], new_row), 0) |
| 140 | + states = self.model.eval_loop(opt, self.data_window_buffer.unsqueeze(0), self.mask)[0, 0] |
| 141 | + f_r, f_l = inverse_norm_cops_and_split_force(states, self.opt) |
| 142 | + return f_r, f_l |
| 143 | + |
| 144 | + |
| 145 | +def update_opt(opt, current_folder, osim_model_path, height_m, weight_kg): |
| 146 | + opt.subject_osim_model = current_folder + osim_model_path |
| 147 | + opt.height_m = height_m |
| 148 | + opt.weight_kg = weight_kg |
| 149 | + |
| 150 | + opt.geometry_folder = current_folder + '/Geometry/' |
| 151 | + opt.checkpoint_bl = current_folder + '/GaitDynamicsRefinement.pt' |
| 152 | + columns_to_keep = ['hip_flexion_r', 'knee_angle_r', 'hip_flexion_l', 'knee_angle_l'] + KINETICS_ALL + \ |
| 153 | + ['hip_flexion_r_vel', 'knee_angle_r_vel', 'hip_flexion_l_vel', 'knee_angle_l_vel'] |
| 154 | + opt.model_states_column_names = columns_to_keep |
| 155 | + # opt.kinematic_diffusion = [col for col in columns_to_keep if 'force' not in col] |
| 156 | + opt.kinematic_diffusion_col_loc = [columns_to_keep.index(col) for col in columns_to_keep if 'force' not in col] |
| 157 | + opt.grf_osim_col_loc = [columns_to_keep.index(col) for col in columns_to_keep if ('force' in col) and ('cop' not in col)] |
| 158 | + opt.cop_osim_col_loc = [columns_to_keep.index(col) for col in columns_to_keep if 'cop' in col] |
| 159 | + opt.checkpoint_bl = current_folder + '/GaitDynamicsRefinementHipKnee.pt' |
| 160 | + return opt |
| 161 | + |
| 162 | + |
| 163 | +if __name__ == '__main__': |
| 164 | + """ |
| 165 | + TODO before running the code: |
| 166 | + 1. Confirm that sampling rate is 100 Hz, force output in N, CoP output in m and being relative to calcaneus. |
| 167 | + 2. Confirm that angles are in rad and angular velocities are in rad/s. |
| 168 | + 3. Update variables of update_opt, see comment 'TODO: AAAA'. |
| 169 | + 4. Update code below 'TODO: BBBB'. |
| 170 | + """ |
| 171 | + |
| 172 | + current_folder = os.getcwd() |
| 173 | + opt = parse_opt() |
| 174 | + # TODO: AAAA |
| 175 | + opt = update_opt(opt, current_folder, osim_model_path='/Scaled_generic_no_arm.osim', height_m=1.84, weight_kg=92.9) |
| 176 | + predictor = RealTimePredictor(opt) |
| 177 | + |
| 178 | + # TODO: BBBB |
| 179 | + data_df = pd.read_csv('fpa_segment_1_ik.mot', sep='\t', skiprows=10) |
| 180 | + angle_df = data_df[['hip_flexion_r', 'knee_angle_r', 'hip_flexion_l', 'knee_angle_l']] |
| 181 | + angle_vel_df = angle_df.diff().fillna(0) |
| 182 | + angle_vel_df.columns = ['hip_flexion_r_vel', 'knee_angle_r_vel', 'hip_flexion_l_vel', 'knee_angle_l_vel'] |
| 183 | + combined_df = pd.concat([angle_df, angle_vel_df], axis=1) |
| 184 | + f_r_list, f_l_list = [], [] |
| 185 | + iter_start_time = time.time() |
| 186 | + for i_row in range(0, combined_df.shape[0]): |
| 187 | + f_r, f_l = predictor.predict_grf(*combined_df.iloc[i_row]) |
| 188 | + f_r_list.append(f_r) |
| 189 | + f_l_list.append(f_l) |
| 190 | + iter_end_time = time.time() |
| 191 | + print('Time taken for a', combined_df.shape[0]/100, ' s trial: ', iter_end_time - iter_start_time) |
| 192 | + good_prediction = pd.read_csv('fpa_segment_1_ik_pred.mot', sep='\t', skiprows=6) |
| 193 | + plt.figure() |
| 194 | + plt.plot(good_prediction[['force1_vy', 'force2_vy']]) |
| 195 | + plt.plot(torch.concatenate(f_r_list)[:, 1], '--') |
| 196 | + plt.plot(torch.concatenate(f_l_list)[:, 1], '--') |
| 197 | + plt.show() |
| 198 | + |
| 199 | + |
| 200 | + |
| 201 | + |
| 202 | + |
| 203 | + |
| 204 | + |
| 205 | + |
| 206 | + |
| 207 | + |
| 208 | + |
0 commit comments