from rl_coach.agents.clipped_ppo_agent import ClippedPPOAgentParameters from rl_coach.architectures.layers import Dense from rl_coach.base_parameters import PresetValidationParameters, VisualizationParameters from rl_coach.core_types import ( EnvironmentEpisodes, EnvironmentSteps, MaxDumpFilter, RunPhase, SelectedPhaseOnlyDumpFilter, TrainingSteps, ) from rl_coach.environments.gym_environment import GymVectorEnvironment from rl_coach.exploration_policies.e_greedy import EGreedyParameters from rl_coach.filters.observation.observation_normalization_filter import ( ObservationNormalizationFilter, ) from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager from rl_coach.graph_managers.graph_manager import ScheduleParameters from rl_coach.schedules import LinearSchedule #################### # Graph Scheduling # #################### schedule_params = ScheduleParameters() schedule_params.improve_steps = EnvironmentEpisodes(100) schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(10) schedule_params.evaluation_steps = EnvironmentEpisodes(1) schedule_params.heatup_steps = EnvironmentEpisodes(10) ######### # Agent # ######### agent_params = ClippedPPOAgentParameters() agent_params.network_wrappers["main"].learning_rate = 0.001 agent_params.network_wrappers["main"].input_embedders_parameters[ "observation" ].activation_function = "tanh" agent_params.network_wrappers["main"].input_embedders_parameters["observation"].scheme = [Dense(32)] agent_params.network_wrappers["main"].middleware_parameters.scheme = [Dense(32)] agent_params.network_wrappers["main"].middleware_parameters.activation_function = "tanh" agent_params.network_wrappers["main"].batch_size = 256 agent_params.network_wrappers["main"].optimizer_epsilon = 1e-5 agent_params.network_wrappers["main"].adam_optimizer_beta2 = 0.999 agent_params.algorithm.clip_likelihood_ratio_using_epsilon = 0.3 agent_params.algorithm.clipping_decay_schedule = LinearSchedule(0.5, 0.1, 10000 * 50) agent_params.algorithm.beta_entropy = 0 agent_params.algorithm.gae_lambda = 0.95 agent_params.algorithm.discount = 0.999 agent_params.algorithm.estimate_state_value_using_gae = True agent_params.algorithm.num_steps_between_copying_online_weights_to_target = EnvironmentEpisodes(10) agent_params.algorithm.num_episodes_in_experience_replay = 100 agent_params.algorithm.num_consecutive_playing_steps = EnvironmentEpisodes(10) agent_params.algorithm.optimization_epochs = 10 agent_params.pre_network_filter.add_observation_filter( "observation", "normalize_observation", ObservationNormalizationFilter(name="normalize_observation"), ) ############### # Environment # ############### env_params = GymVectorEnvironment(level="patient_envs:PatientContinuousMountainCar") ################# # Visualization # ################# vis_params = VisualizationParameters() vis_params.dump_gifs = True vis_params.video_dump_filters = [SelectedPhaseOnlyDumpFilter(RunPhase.TEST), MaxDumpFilter()] ######## # Test # ######## preset_validation_params = PresetValidationParameters() preset_validation_params.test = True preset_validation_params.min_reward_threshold = 150 preset_validation_params.max_episodes_to_achieve_reward = 250 graph_manager = BasicRLGraphManager( agent_params=agent_params, env_params=env_params, schedule_params=schedule_params, vis_params=vis_params, preset_validation_params=preset_validation_params, )