policy_params#


class ParamTransformerData(*, envs: Environments, device: str | device, optim_factory: OptimizerFactory, optim: Optimizer | None = None, actor: ModuleOpt | None = None, critic1: ModuleOpt | None = None, critic2: ModuleOpt | None = None)[source]#

Bases: object

Holds data that can be used by ParamTransformer instances to perform their transformation.

The representation contains the superset of all data items that are required by different types of agent factories. An agent factory is expected to set only the attributes that are relevant to its parameters.

envs: Environments#
device: str | device#
optim_factory: OptimizerFactory#
optim: Optimizer | None = None#

the single optimizer for the case where there is just one

actor: ModuleOpt | None = None#
critic1: ModuleOpt | None = None#
critic2: ModuleOpt | None = None#
class ParamTransformer[source]#

Bases: ABC

Base class for parameter transformations from high to low-level API.

Transforms one or more parameters from the representation used by the high-level API to the representation required by the (low-level) policy implementation. It operates directly on a dictionary of keyword arguments, which is initially generated from the parameter dataclass (subclass of Params).

abstract transform(params: dict[str, Any], data: ParamTransformerData) None[source]#
static get(d: dict[str, Any], key: str, drop: bool = False) Any[source]#
class ParamTransformerDrop(*keys: str)[source]#

Bases: ParamTransformer

transform(kwargs: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerChangeValue(key: str)[source]#

Bases: ParamTransformer

transform(params: dict[str, Any], data: ParamTransformerData) None[source]#
abstract change_value(value: Any, data: ParamTransformerData) Any[source]#
class ParamTransformerLRScheduler(key_scheduler_factory: str, key_scheduler: str)[source]#

Bases: ParamTransformer

Transformer for learning rate scheduler params.

Transforms a key containing a learning rate scheduler factory (removed) into a key containing a learning rate scheduler (added) for the data member optim.

transform(params: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerMultiLRScheduler(optim_key_list: list[tuple[Optimizer, str]], key_scheduler: str)[source]#

Bases: ParamTransformer

Transforms several scheduler factories into a single scheduler.

The result may be a MultipleLRSchedulers instance if more than one factory is indeed given.

Parameters:
  • optim_key_list – a list of tuples (optimizer, key of learning rate factory)

  • key_scheduler – the key under which to store the resulting learning rate scheduler

transform(params: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerActorAndCriticLRScheduler(key_scheduler_factory_actor: str, key_scheduler_factory_critic: str, key_scheduler: str)[source]#

Bases: ParamTransformer

transform(params: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerActorDualCriticsLRScheduler(key_scheduler_factory_actor: str, key_scheduler_factory_critic1: str, key_scheduler_factory_critic2: str, key_scheduler: str)[source]#

Bases: ParamTransformer

transform(params: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerAutoAlpha(key: str)[source]#

Bases: ParamTransformer

transform(kwargs: dict[str, Any], data: ParamTransformerData) None[source]#
class ParamTransformerNoiseFactory(key: str)[source]#

Bases: ParamTransformerChangeValue

change_value(value: Any, data: ParamTransformerData) Any[source]#
class ParamTransformerFloatEnvParamFactory(key: str)[source]#

Bases: ParamTransformerChangeValue

change_value(value: Any, data: ParamTransformerData) Any[source]#
class ParamTransformerActionScaling(key: str)[source]#

Bases: ParamTransformerChangeValue

change_value(value: Any, data: ParamTransformerData) Any[source]#
class GetParamTransformersProtocol(*args, **kwargs)[source]#

Bases: Protocol

class Params[source]#

Bases: GetParamTransformersProtocol, ToStringMixin

create_kwargs(data: ParamTransformerData) dict[str, Any][source]#
class ParamsMixinLearningRateWithScheduler(lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None)[source]#

Bases: GetParamTransformersProtocol

lr: float = 0.001#

the learning rate to use in the gradient-based optimizer

lr_scheduler_factory: LRSchedulerFactory | None = None#

factory for the creation of a learning rate scheduler

class ParamsMixinActorAndCritic(actor_lr: float = 0.001, critic_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic_lr_scheduler_factory: LRSchedulerFactory | None = None)[source]#

Bases: GetParamTransformersProtocol

actor_lr: float = 0.001#

the learning rate to use for the actor network

critic_lr: float = 0.001#

the learning rate to use for the critic network

actor_lr_scheduler_factory: LRSchedulerFactory | None = None#

factory for the creation of a learning rate scheduler to use for the actor network (if any)

critic_lr_scheduler_factory: LRSchedulerFactory | None = None#

factory for the creation of a learning rate scheduler to use for the critic network (if any)

class ParamsMixinActionScaling(action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip')[source]#

Bases: GetParamTransformersProtocol

action_scaling: bool | Literal['default'] = 'default'#

whether to apply action scaling; when set to “default”, it will be enabled for continuous action spaces

action_bound_method: Literal['clip', 'tanh'] | None = 'clip'#

method to bound action to range [-1, 1]. Only used if the action_space is continuous.

class ParamsMixinExplorationNoise(exploration_noise: BaseNoise | Literal['default'] | NoiseFactory | NoneType = None)[source]#

Bases: GetParamTransformersProtocol

exploration_noise: BaseNoise | Literal['default'] | NoiseFactory | None = None#

If not None, add noise to actions for exploration. This is useful when solving “hard exploration” problems. It can either be a distribution, a factory for the creation of a distribution or “default”. When set to “default”, use Gaussian noise with standard deviation 0.1.

class PGParams(lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', discount_factor: float = 0.99, reward_normalization: bool = False, deterministic_eval: bool = False)[source]#

Bases: Params, ParamsMixinActionScaling, ParamsMixinLearningRateWithScheduler

discount_factor: float = 0.99#

discount factor (gamma) for future rewards; must be in [0, 1]

reward_normalization: bool = False#

if True, will normalize the returns by subtracting the running mean and dividing by the running standard deviation.

deterministic_eval: bool = False#

whether to use deterministic action (the dist’s mode) instead of stochastic one during evaluation. Does not affect training.

class ParamsMixinGeneralAdvantageEstimation(gae_lambda: float = 0.95, max_batchsize: int = 256)[source]#

Bases: GetParamTransformersProtocol

gae_lambda: float = 0.95#

determines the blend between Monte Carlo and one-step temporal difference (TD) estimates of the advantage function in general advantage estimation (GAE). A value of 0 gives a fully TD-based estimate; lambda=1 gives a fully Monte Carlo estimate.

max_batchsize: int = 256#

the maximum size of the batch when computing general advantage estimation (GAE)

class A2CParams(gae_lambda: float = 0.95, max_batchsize: int = 256, lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', discount_factor: float = 0.99, reward_normalization: bool = False, deterministic_eval: bool = False, vf_coef: float = 0.5, ent_coef: float = 0.01, max_grad_norm: float | None = None)[source]#

Bases: PGParams, ParamsMixinGeneralAdvantageEstimation

vf_coef: float = 0.5#

weight (coefficient) of the value loss in the loss function

ent_coef: float = 0.01#

weight (coefficient) of the entropy loss in the loss function

max_grad_norm: float | None = None#

maximum norm for clipping gradients in backpropagation

class PPOParams(gae_lambda: float = 0.95, max_batchsize: int = 256, lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', discount_factor: float = 0.99, reward_normalization: bool = False, deterministic_eval: bool = False, vf_coef: float = 0.5, ent_coef: float = 0.01, max_grad_norm: float | None = None, eps_clip: float = 0.2, dual_clip: float | None = None, value_clip: bool = False, advantage_normalization: bool = True, recompute_advantage: bool = False)[source]#

Bases: A2CParams

eps_clip: float = 0.2#

determines the range of allowed change in the policy during a policy update: The ratio between the probabilities indicated by the new and old policy is constrained to stay in the interval [1 - eps_clip, 1 + eps_clip]. Small values thus force the new policy to stay close to the old policy. Typical values range between 0.1 and 0.3. The optimal epsilon depends on the environment; more stochastic environments may need larger epsilons.

dual_clip: float | None = None#

determines the lower bound clipping for the probability ratio (corresponds to parameter c in arXiv:1912.09729, Equation 5). If set to None, dual clipping is not used and the bounds described in parameter eps_clip apply. If set to a float value c, the lower bound is changed from 1 - eps_clip to c, where c < 1 - eps_clip. Setting c > 0 reduces policy oscillation and further stabilizes training. Typical values are between 0 and 0.5. Smaller values provide more stability. Setting c = 0 yields PPO with only the upper bound.

value_clip: bool = False#

whether to apply clipping of the predicted value function during policy learning. Value clipping discourages large changes in value predictions between updates. Inaccurate value predictions can lead to bad policy updates, which can cause training instability. Clipping values prevents sporadic large errors from skewing policy updates too much.

advantage_normalization: bool = True#

whether to apply per mini-batch advantage normalization.

recompute_advantage: bool = False#

whether to recompute advantage every update repeat as described in https://arxiv.org/pdf/2006.05990.pdf, Sec. 3.5. The original PPO implementation splits the data in each policy iteration step into individual transitions and then randomly assigns them to minibatches. This makes it impossible to compute advantages as the temporal structure is broken. Therefore, the advantages are computed once at the beginning of each policy iteration step and then used in minibatch policy and value function optimization. This results in higher diversity of data in each minibatch at the cost of using slightly stale advantage estimations. Enabling this option will, as a remedy to this problem, recompute the advantages at the beginning of each pass over the data instead of just once per iteration.

class NPGParams(gae_lambda: float = 0.95, max_batchsize: int = 256, lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', discount_factor: float = 0.99, reward_normalization: bool = False, deterministic_eval: bool = False, optim_critic_iters: int = 5, actor_step_size: float = 0.5, advantage_normalization: bool = True)[source]#

Bases: PGParams, ParamsMixinGeneralAdvantageEstimation

optim_critic_iters: int = 5#

number of times to optimize critic network per update.

actor_step_size: float = 0.5#

step size for actor update in natural gradient direction

advantage_normalization: bool = True#

whether to do per mini-batch advantage normalization.

class TRPOParams(gae_lambda: float = 0.95, max_batchsize: int = 256, lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', discount_factor: float = 0.99, reward_normalization: bool = False, deterministic_eval: bool = False, optim_critic_iters: int = 5, actor_step_size: float = 0.5, advantage_normalization: bool = True, max_kl: float = 0.01, backtrack_coeff: float = 0.8, max_backtracks: int = 10)[source]#

Bases: NPGParams

max_kl: float = 0.01#

maximum KL divergence, used to constrain each actor network update.

backtrack_coeff: float = 0.8#

coefficient with which to reduce the step size when constraints are not met.

max_backtracks: int = 10#

maximum number of times to backtrack in line search when the constraints are not met.

class ParamsMixinActorAndDualCritics(actor_lr: float = 0.001, critic1_lr: float = 0.001, critic2_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic1_lr_scheduler_factory: LRSchedulerFactory | None = None, critic2_lr_scheduler_factory: LRSchedulerFactory | None = None)[source]#

Bases: GetParamTransformersProtocol

actor_lr: float = 0.001#

the learning rate to use for the actor network

critic1_lr: float = 0.001#

the learning rate to use for the first critic network

critic2_lr: float = 0.001#

the learning rate to use for the second critic network

actor_lr_scheduler_factory: LRSchedulerFactory | None = None#

factory for the creation of a learning rate scheduler to use for the actor network (if any)

critic1_lr_scheduler_factory: LRSchedulerFactory | None = None#

factory for the creation of a learning rate scheduler to use for the first critic network (if any)

critic2_lr_scheduler_factory: LRSchedulerFactory | None = None#

factory for the creation of a learning rate scheduler to use for the second critic network (if any)

class SACParams(action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', exploration_noise: BaseNoise | Literal['default'] | NoiseFactory | NoneType = None, actor_lr: float = 0.001, critic1_lr: float = 0.001, critic2_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic1_lr_scheduler_factory: LRSchedulerFactory | None = None, critic2_lr_scheduler_factory: LRSchedulerFactory | None = None, tau: float = 0.005, gamma: float = 0.99, alpha: float | AutoAlphaFactory = 0.2, estimation_step: int = 1, deterministic_eval: bool = True)[source]#

Bases: _SACParams, ParamsMixinExplorationNoise, ParamsMixinActionScaling

deterministic_eval: bool = True#

whether to use deterministic action (mean of Gaussian policy) in evaluation mode instead of stochastic action sampled by the policy. Does not affect training.

class DiscreteSACParams(actor_lr: float = 0.001, critic1_lr: float = 0.001, critic2_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic1_lr_scheduler_factory: LRSchedulerFactory | None = None, critic2_lr_scheduler_factory: LRSchedulerFactory | None = None, tau: float = 0.005, gamma: float = 0.99, alpha: float | AutoAlphaFactory = 0.2, estimation_step: int = 1)[source]#

Bases: _SACParams

class DQNParams(lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, discount_factor: float = 0.99, estimation_step: int = 1, target_update_freq: int = 0, reward_normalization: bool = False, is_double: bool = True, clip_loss_grad: bool = False)[source]#

Bases: Params, ParamsMixinLearningRateWithScheduler

discount_factor: float = 0.99#

discount factor (gamma) for future rewards; must be in [0, 1]

estimation_step: int = 1#

the number of steps to look ahead

target_update_freq: int = 0#

the target network update frequency (0 if no target network is to be used)

reward_normalization: bool = False#

whether to normalize the returns to Normal(0, 1)

is_double: bool = True#

whether to use double Q learning

clip_loss_grad: bool = False#

whether to clip the gradient of the loss in accordance with nature14236; this amounts to using the Huber loss instead of the MSE loss.

class IQNParams(lr: float = 0.001, lr_scheduler_factory: LRSchedulerFactory | None = None, discount_factor: float = 0.99, estimation_step: int = 1, target_update_freq: int = 0, reward_normalization: bool = False, is_double: bool = True, clip_loss_grad: bool = False, sample_size: int = 32, online_sample_size: int = 8, target_sample_size: int = 8, num_quantiles: int = 200, hidden_sizes: collections.abc.Sequence[int] = (), num_cosines: int = 64)[source]#

Bases: DQNParams

sample_size: int = 32#

the number of samples for policy evaluation

online_sample_size: int = 8#

the number of samples for online model in training

target_sample_size: int = 8#

the number of samples for target model in training.

num_quantiles: int = 200#

the number of quantile midpoints in the inverse cumulative distribution function of the value

hidden_sizes: Sequence[int] = ()#

hidden dimensions to use in the IQN network

num_cosines: int = 64#

number of cosines to use in the IQN network

class DDPGParams(action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', exploration_noise: BaseNoise | Literal['default'] | NoiseFactory | NoneType = None, actor_lr: float = 0.001, critic_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic_lr_scheduler_factory: LRSchedulerFactory | None = None, tau: float = 0.005, gamma: float = 0.99, estimation_step: int = 1)[source]#

Bases: Params, ParamsMixinActorAndCritic, ParamsMixinExplorationNoise, ParamsMixinActionScaling

tau: float = 0.005#

controls the soft update of the target network. It determines how slowly the target networks track the main networks. Smaller tau means slower tracking and more stable learning.

gamma: float = 0.99#

discount factor (gamma) for future rewards; must be in [0, 1]

estimation_step: int = 1#

the number of steps to look ahead.

class REDQParams(action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', exploration_noise: BaseNoise | Literal['default'] | NoiseFactory | NoneType = None, actor_lr: float = 0.001, critic_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic_lr_scheduler_factory: LRSchedulerFactory | None = None, tau: float = 0.005, gamma: float = 0.99, estimation_step: int = 1, ensemble_size: int = 10, subset_size: int = 2, alpha: float | AutoAlphaFactory = 0.2, actor_delay: int = 20, deterministic_eval: bool = True, target_mode: Literal['mean', 'min'] = 'min')[source]#

Bases: DDPGParams

ensemble_size: int = 10#

the number of sub-networks in the critic ensemble

subset_size: int = 2#

the number of networks in the subset

alpha: float | AutoAlphaFactory = 0.2#

controls the relative importance (coefficient) of the entropy term in the loss function. This can be a constant or a factory for the creation of a representation that allows the parameter to be automatically tuned; use tianshou.highlevel.params.alpha.AutoAlphaFactoryDefault for the standard auto-adjusted alpha.

estimation_step: int = 1#

the number of steps to look ahead

actor_delay: int = 20#

the number of critic updates before an actor update

deterministic_eval: bool = True#

whether to use deterministic action (the dist’s mode) instead of stochastic one during evaluation. Does not affect training.

target_mode: Literal['mean', 'min'] = 'min'#
class TD3Params(action_scaling: bool | Literal['default'] = 'default', action_bound_method: Literal['clip', 'tanh'] | None = 'clip', exploration_noise: BaseNoise | Literal['default'] | NoiseFactory | NoneType = None, actor_lr: float = 0.001, critic1_lr: float = 0.001, critic2_lr: float = 0.001, actor_lr_scheduler_factory: LRSchedulerFactory | None = None, critic1_lr_scheduler_factory: LRSchedulerFactory | None = None, critic2_lr_scheduler_factory: LRSchedulerFactory | None = None, tau: float = 0.005, gamma: float = 0.99, policy_noise: float | FloatEnvValueFactory = 0.2, noise_clip: float | FloatEnvValueFactory = 0.5, update_actor_freq: int = 2, estimation_step: int = 1)[source]#

Bases: Params, ParamsMixinActorAndDualCritics, ParamsMixinExplorationNoise, ParamsMixinActionScaling

tau: float = 0.005#

controls the soft update of the target network. It determines how slowly the target networks track the main networks. Smaller tau means slower tracking and more stable learning.

gamma: float = 0.99#

discount factor (gamma) for future rewards; must be in [0, 1]

policy_noise: float | FloatEnvValueFactory = 0.2#

the scale of the the noise used in updating policy network

noise_clip: float | FloatEnvValueFactory = 0.5#

determines the clipping range of the noise used in updating the policy network as [-noise_clip, noise_clip]

update_actor_freq: int = 2#

the update frequency of actor network

estimation_step: int = 1#

the number of steps to look ahead.