optim
Source code: tianshou/highlevel/optim.py
-
class OptimizerFactory[source]
-
create_optimizer(module: Module, lr: float) → Optimizer[source]
-
abstract create_optimizer_for_params(params: Iterable[Tensor] | Iterable[dict[str, Any]], lr: float) → Optimizer[source]
-
class OptimizerFactoryAdam(betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-08, weight_decay: float = 0)[source]
-
create_optimizer_for_params(params: Iterable[Tensor] | Iterable[dict[str, Any]], lr: float) → Optimizer[source]
-
class OptimizerFactoryRMSprop(alpha: float = 0.99, eps: float = 1e-08, weight_decay: float = 0, momentum: float = 0, centered: bool = False)[source]
-
create_optimizer_for_params(params: Iterable[Tensor] | Iterable[dict[str, Any]], lr: float) → Optimizer[source]
-
class OptimizerFactoryTorch(optim_class: OptimizerWithLearningRateProtocol, **kwargs: Any)[source]
-
create_optimizer_for_params(params: Iterable[Tensor] | Iterable[dict[str, Any]], lr: float) → Optimizer[source]
-
class OptimizerWithLearningRateProtocol(*args, **kwargs)[source]