×
type: MemoryGym
name: Endless-SearingSpotlights-v0
frame_skip: 1
last_action_to_obs: False
last_reward_to_obs: False
obs_stacks: 1
grayscale: False
resize_vis_obs: [84, 84]
positional_encoding: False
reset_params:
start-seed: 200002
num-seeds: 1
max_steps: 8192
steps_per_coin: 160
initial_spawns: 3
spawn_interval: 50
spot_min_radius: 7.5
spot_max_radius: 13.75
spot_min_speed: 0.0025
spot_max_speed: 0.0075
spot_damage: 1.0
visual_feedback: True
black_background: False
hide_chessboard: False
light_dim_off_duration: 6
light_threshold: 255
coin_scale: 0.375
coin_show_duration: 6
coins_visible: False
agent_speed: 3.0
agent_health: 10
agent_scale: 0.25
agent_visible: False
sample_agent_position: True
show_last_action: True
show_last_positive_reward: True
reward_inside_spotlight: 0.0
reward_outside_spotlight: 0.0
reward_death: 0.0
reward_max_steps: 0.0
reward_coin: 0.25
seed: 200002
reward_normalization: 0
×
load_model: False
model_path:
checkpoint_interval: 500
activation: relu
vis_encoder: cnn
vec_encoder: linear
num_vec_encoder_units: 128
hidden_layer: default
num_hidden_layers: 1
num_hidden_units: 512
recurrence:
layer_type: gru
sequence_length: -1
hidden_state_size: 512
hidden_state_init: zero
reset_hidden_state: True
residual: False
num_layers: 1
obs_decoder:
attach_to: memory
detach_gradient: False
×
n_workers: 32
worker_steps: 512
×
algorithm: PPO
resume_at: 0
gamma: 0.995
lamda: 0.95
updates: 50000
epochs: 3
refresh_buffer_epoch: -1
n_mini_batches: 8
advantage_normalization: no
value_coefficient: 0.5
max_grad_norm: 0.25
share_parameters: True
learning_rate_schedule:
initial: 0.000275
final: 1e-05
power: 1.0
max_decay_steps: 10000
beta_schedule:
initial: 0.0001
final: 1e-06
power: 1.0
max_decay_steps: 10000
clip_range_schedule:
initial: 0.1
final: 0.1
power: 1.0
max_decay_steps: 10000
obs_reconstruction_schedule:
initial: 0.1
final: 0.1
power: 1.0
max_decay_steps: 1000
ground_truth_estimator_schedule: {'initial': 0.0}