Policy Gradient Assisted AURORA (PGA-AURORA)¶
To create an instance of PGA-AURORA (introduced in this paper), one needs to use an instance of AURORA with the PGAMEEmitter, detailed below.
qdax.core.emitters.pga_me_emitter.PGAMEEmitter (MultiEmitter)
¶
Source code in qdax/core/emitters/pga_me_emitter.py
class PGAMEEmitter(MultiEmitter):
def __init__(
self,
config: PGAMEConfig,
policy_network: nn.Module,
env: QDEnv,
variation_fn: Callable[[Params, Params, RNGKey], Tuple[Params, RNGKey]],
) -> None:
self._config = config
self._policy_network = policy_network
self._env = env
self._variation_fn = variation_fn
ga_batch_size = int(self._config.proportion_mutation_ga * config.env_batch_size)
qpg_batch_size = config.env_batch_size - ga_batch_size
qpg_config = QualityPGConfig(
env_batch_size=qpg_batch_size,
num_critic_training_steps=config.num_critic_training_steps,
num_pg_training_steps=config.num_pg_training_steps,
replay_buffer_size=config.replay_buffer_size,
critic_hidden_layer_size=config.critic_hidden_layer_size,
critic_learning_rate=config.critic_learning_rate,
actor_learning_rate=config.greedy_learning_rate,
policy_learning_rate=config.policy_learning_rate,
noise_clip=config.noise_clip,
policy_noise=config.policy_noise,
discount=config.discount,
reward_scaling=config.reward_scaling,
batch_size=config.batch_size,
soft_tau_update=config.soft_tau_update,
policy_delay=config.policy_delay,
)
# define the quality emitter
q_emitter = QualityPGEmitter(
config=qpg_config, policy_network=policy_network, env=env
)
# define the GA emitter
ga_emitter = MixingEmitter(
mutation_fn=lambda x, r: (x, r),
variation_fn=variation_fn,
variation_percentage=1.0,
batch_size=ga_batch_size,
)
super().__init__(emitters=(q_emitter, ga_emitter))