From b1dc379b09bbf9c044410a6bc51afbee0cba2e05 Mon Sep 17 00:00:00 2001 From: Ali Marjaninejad Date: Wed, 16 Oct 2019 19:19:36 -0700 Subject: [PATCH] uploaded model wrapper --- models/gym_models/TSNMILegv1.py | 766 ++++++++++++++++++++++++++++++++ 1 file changed, 766 insertions(+) create mode 100644 models/gym_models/TSNMILegv1.py diff --git a/models/gym_models/TSNMILegv1.py b/models/gym_models/TSNMILegv1.py new file mode 100644 index 0000000..b678ec8 --- /dev/null +++ b/models/gym_models/TSNMILegv1.py @@ -0,0 +1,766 @@ +import numpy as np +from gym import utils +from gym.envs.mujoco import mujoco_env + + +DEFAULT_CAMERA_CONFIG = { + 'distance': 4.0, +} + + +class TSNMILeg0Env(mujoco_env.MujocoEnv, utils.EzPickle): + def __init__(self, + xml_file='TSNMILeg0_gym_v1.xml', + forward_reward_weight=1.0, + ctrl_cost_weight=0.1*0, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=False, + rgb_rendering_tracking=True): + utils.EzPickle.__init__(**locals()) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation) + + mujoco_env.MujocoEnv.__init__(self, xml_file, 5, rgb_rendering_tracking=rgb_rendering_tracking) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + x_position_before = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.sim.data.qpos[0] + x_velocity = ((x_position_after - x_position_before) + / self.dt) + # z_position = self.sim.data.qpos[1] + # planar_rotation = self.sim.data.qpos[2] + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + #standing_cost = self._forward_reward_weight * -10 *(abs(z_position-2)+abs(planar_rotation)) - abs(x_velocity) + + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + #reward = standing_cost - ctrl_cost + done = False + info = { + 'x_position': x_position_after, + 'x_velocity': x_velocity, + + 'reward_run': forward_reward, + 'reward_ctrl': -ctrl_cost + } + + return observation, reward, done, info + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq) + qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn( + self.model.nv) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) + +class TSNMILeg1Env(mujoco_env.MujocoEnv, utils.EzPickle): + def __init__(self, + xml_file='TSNMILeg1_gym_v1.xml', + forward_reward_weight=1.0, + ctrl_cost_weight=0.1*0, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=False, + rgb_rendering_tracking=True): + utils.EzPickle.__init__(**locals()) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation) + + mujoco_env.MujocoEnv.__init__(self, xml_file, 5, rgb_rendering_tracking=rgb_rendering_tracking) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + x_position_before = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.sim.data.qpos[0] + x_velocity = ((x_position_after - x_position_before) + / self.dt) + # z_position = self.sim.data.qpos[1] + # planar_rotation = self.sim.data.qpos[2] + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + #standing_cost = self._forward_reward_weight * -10 *(abs(z_position-2)+abs(planar_rotation)) - abs(x_velocity) + + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + #reward = standing_cost - ctrl_cost + done = False + info = { + 'x_position': x_position_after, + 'x_velocity': x_velocity, + + 'reward_run': forward_reward, + 'reward_ctrl': -ctrl_cost + } + + return observation, reward, done, info + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq) + qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn( + self.model.nv) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) + +class TSNMILeg2Env(mujoco_env.MujocoEnv, utils.EzPickle): + def __init__(self, + xml_file='TSNMILeg2_gym_v1.xml', + forward_reward_weight=1.0, + ctrl_cost_weight=0.1*0, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=False, + rgb_rendering_tracking=True): + utils.EzPickle.__init__(**locals()) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation) + + mujoco_env.MujocoEnv.__init__(self, xml_file, 5, rgb_rendering_tracking=rgb_rendering_tracking) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + x_position_before = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.sim.data.qpos[0] + x_velocity = ((x_position_after - x_position_before) + / self.dt) + # z_position = self.sim.data.qpos[1] + # planar_rotation = self.sim.data.qpos[2] + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + #standing_cost = self._forward_reward_weight * -10 *(abs(z_position-2)+abs(planar_rotation)) - abs(x_velocity) + + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + #reward = standing_cost - ctrl_cost + done = False + info = { + 'x_position': x_position_after, + 'x_velocity': x_velocity, + + 'reward_run': forward_reward, + 'reward_ctrl': -ctrl_cost + } + + return observation, reward, done, info + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq) + qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn( + self.model.nv) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) + +class TSNMILeg3Env(mujoco_env.MujocoEnv, utils.EzPickle): + def __init__(self, + xml_file='TSNMILeg3_gym_v1.xml', + forward_reward_weight=1.0, + ctrl_cost_weight=0.1*0, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=False, + rgb_rendering_tracking=True): + utils.EzPickle.__init__(**locals()) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation) + + mujoco_env.MujocoEnv.__init__(self, xml_file, 5, rgb_rendering_tracking=rgb_rendering_tracking) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + x_position_before = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.sim.data.qpos[0] + x_velocity = ((x_position_after - x_position_before) + / self.dt) + # z_position = self.sim.data.qpos[1] + # planar_rotation = self.sim.data.qpos[2] + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + #standing_cost = self._forward_reward_weight * -10 *(abs(z_position-2)+abs(planar_rotation)) - abs(x_velocity) + + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + #reward = standing_cost - ctrl_cost + done = False + info = { + 'x_position': x_position_after, + 'x_velocity': x_velocity, + + 'reward_run': forward_reward, + 'reward_ctrl': -ctrl_cost + } + + return observation, reward, done, info + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq) + qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn( + self.model.nv) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) + +class TSNMILeg4Env(mujoco_env.MujocoEnv, utils.EzPickle): + def __init__(self, + xml_file='TSNMILeg4_gym_v1.xml', + forward_reward_weight=1.0, + ctrl_cost_weight=0.1*0, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=False, + rgb_rendering_tracking=True): + utils.EzPickle.__init__(**locals()) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation) + + mujoco_env.MujocoEnv.__init__(self, xml_file, 5, rgb_rendering_tracking=rgb_rendering_tracking) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + x_position_before = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.sim.data.qpos[0] + x_velocity = ((x_position_after - x_position_before) + / self.dt) + # z_position = self.sim.data.qpos[1] + # planar_rotation = self.sim.data.qpos[2] + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + #standing_cost = self._forward_reward_weight * -10 *(abs(z_position-2)+abs(planar_rotation)) - abs(x_velocity) + + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + #reward = standing_cost - ctrl_cost + done = False + info = { + 'x_position': x_position_after, + 'x_velocity': x_velocity, + + 'reward_run': forward_reward, + 'reward_ctrl': -ctrl_cost + } + + return observation, reward, done, info + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq) + qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn( + self.model.nv) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) + +class TSNMILeg5Env(mujoco_env.MujocoEnv, utils.EzPickle): + def __init__(self, + xml_file='TSNMILeg5_gym_v1.xml', + forward_reward_weight=1.0, + ctrl_cost_weight=0.1*0, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=False, + rgb_rendering_tracking=True): + utils.EzPickle.__init__(**locals()) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation) + + mujoco_env.MujocoEnv.__init__(self, xml_file, 5, rgb_rendering_tracking=rgb_rendering_tracking) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + x_position_before = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.sim.data.qpos[0] + x_velocity = ((x_position_after - x_position_before) + / self.dt) + # z_position = self.sim.data.qpos[1] + # planar_rotation = self.sim.data.qpos[2] + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + #standing_cost = self._forward_reward_weight * -10 *(abs(z_position-2)+abs(planar_rotation)) - abs(x_velocity) + + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + #reward = standing_cost - ctrl_cost + done = False + info = { + 'x_position': x_position_after, + 'x_velocity': x_velocity, + + 'reward_run': forward_reward, + 'reward_ctrl': -ctrl_cost + } + + return observation, reward, done, info + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq) + qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn( + self.model.nv) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) + +class TSNMILeg6Env(mujoco_env.MujocoEnv, utils.EzPickle): + def __init__(self, + xml_file='TSNMILeg6_gym_v1.xml', + forward_reward_weight=1.0, + ctrl_cost_weight=0.1*0, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=False, + rgb_rendering_tracking=True): + utils.EzPickle.__init__(**locals()) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation) + + mujoco_env.MujocoEnv.__init__(self, xml_file, 5, rgb_rendering_tracking=rgb_rendering_tracking) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + x_position_before = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.sim.data.qpos[0] + x_velocity = ((x_position_after - x_position_before) + / self.dt) + # z_position = self.sim.data.qpos[1] + # planar_rotation = self.sim.data.qpos[2] + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + #standing_cost = self._forward_reward_weight * -10 *(abs(z_position-2)+abs(planar_rotation)) - abs(x_velocity) + + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + #reward = standing_cost - ctrl_cost + done = False + info = { + 'x_position': x_position_after, + 'x_velocity': x_velocity, + + 'reward_run': forward_reward, + 'reward_ctrl': -ctrl_cost + } + + return observation, reward, done, info + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq) + qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn( + self.model.nv) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) + +class TSNMILeg7Env(mujoco_env.MujocoEnv, utils.EzPickle): + def __init__(self, + xml_file='TSNMILeg7_gym_v1.xml', + forward_reward_weight=1.0, + ctrl_cost_weight=0.1*0, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=False, + rgb_rendering_tracking=True): + utils.EzPickle.__init__(**locals()) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation) + + mujoco_env.MujocoEnv.__init__(self, xml_file, 5, rgb_rendering_tracking=rgb_rendering_tracking) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + x_position_before = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.sim.data.qpos[0] + x_velocity = ((x_position_after - x_position_before) + / self.dt) + # z_position = self.sim.data.qpos[1] + # planar_rotation = self.sim.data.qpos[2] + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + #standing_cost = self._forward_reward_weight * -10 *(abs(z_position-2)+abs(planar_rotation)) - abs(x_velocity) + + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + #reward = standing_cost - ctrl_cost + done = False + info = { + 'x_position': x_position_after, + 'x_velocity': x_velocity, + + 'reward_run': forward_reward, + 'reward_ctrl': -ctrl_cost + } + + return observation, reward, done, info + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq) + qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn( + self.model.nv) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) + +class TSNMILeg8Env(mujoco_env.MujocoEnv, utils.EzPickle): + def __init__(self, + xml_file='TSNMILeg8_gym_v1.xml', + forward_reward_weight=1.0, + ctrl_cost_weight=0.1*0, + reset_noise_scale=0.1, + exclude_current_positions_from_observation=False, + rgb_rendering_tracking=True): + utils.EzPickle.__init__(**locals()) + + self._forward_reward_weight = forward_reward_weight + + self._ctrl_cost_weight = ctrl_cost_weight + + self._reset_noise_scale = reset_noise_scale + + self._exclude_current_positions_from_observation = ( + exclude_current_positions_from_observation) + + mujoco_env.MujocoEnv.__init__(self, xml_file, 5, rgb_rendering_tracking=rgb_rendering_tracking) + + def control_cost(self, action): + control_cost = self._ctrl_cost_weight * np.sum(np.square(action)) + return control_cost + + def step(self, action): + x_position_before = self.sim.data.qpos[0] + self.do_simulation(action, self.frame_skip) + x_position_after = self.sim.data.qpos[0] + x_velocity = ((x_position_after - x_position_before) + / self.dt) + # z_position = self.sim.data.qpos[1] + # planar_rotation = self.sim.data.qpos[2] + ctrl_cost = self.control_cost(action) + + forward_reward = self._forward_reward_weight * x_velocity + #standing_cost = self._forward_reward_weight * -10 *(abs(z_position-2)+abs(planar_rotation)) - abs(x_velocity) + + + observation = self._get_obs() + reward = forward_reward - ctrl_cost + #reward = standing_cost - ctrl_cost + done = False + info = { + 'x_position': x_position_after, + 'x_velocity': x_velocity, + + 'reward_run': forward_reward, + 'reward_ctrl': -ctrl_cost + } + + return observation, reward, done, info + + def _get_obs(self): + position = self.sim.data.qpos.flat.copy() + velocity = self.sim.data.qvel.flat.copy() + + if self._exclude_current_positions_from_observation: + position = position[1:] + + observation = np.concatenate((position, velocity)).ravel() + return observation + + def reset_model(self): + noise_low = -self._reset_noise_scale + noise_high = self._reset_noise_scale + + qpos = self.init_qpos + self.np_random.uniform( + low=noise_low, high=noise_high, size=self.model.nq) + qvel = self.init_qvel + self._reset_noise_scale * self.np_random.randn( + self.model.nv) + + self.set_state(qpos, qvel) + + observation = self._get_obs() + return observation + + def viewer_setup(self): + for key, value in DEFAULT_CAMERA_CONFIG.items(): + if isinstance(value, np.ndarray): + getattr(self.viewer.cam, key)[:] = value + else: + setattr(self.viewer.cam, key, value) +