依赖库:
%%%%%%%%%%%%%%算法%%%%%%%%%%%
\usepackage{algorithmic}
\usepackage[ruled,vlined]{algorithm2e} % 算法包
% \usepackage{algorithm}
% \renewcommand{\algorithmicrequire}{ \textbf{Input:}} %Use Input in the format of Algorithm
% \renewcommand{\algorithmicensure}{ \textbf{Output:}} %UseOutput in the format of Algorithm
%%%%%%%%%%%%%算法%%%%%%%%%%%%
代码:
\begin{algorithm}
\label{algo_TD3}
\caption{TD3 algorithm}%算法名字
\LinesNumbered %要求显示行号
\KwIn{initial policy parameters $\theta$, Q-function parameters $\phi_1$, $\phi_2$, empty replay buffer $\mathcal{D}$}
% \KwOut{output result}%输出
Set target parameters equal to main parameters $\theta_{targ}\leftarrow \theta$, $\phi_{targ,1}\leftarrow \phi_1$, $\phi_{targ,2}\leftarrow \phi_2$\;
\For{episode=1 to M}{
Receive initial observation state $s_1$\;
Initialize a random process $\mathcal{N}$ for action exploration\;
\For{t=1 to T}{
Select action $a_t$ according to the current policy and exploration noise or the guidance policy\;
Execute action $a_t$ and observe reward $r_t$ and observe new state $s_{t+1}$ \;
Normalize the state $s_{t+1}$\;
Get $r_t^{rs}$ using
$ r_t^{rs}=T(s_{t+1})+F(s_t,a_t,s_{t+1})$
Store transition $(s_t, a_t, r_t^{rs}, s_{t+1})$ in $\mathcal{D}$\;
Sample a mini-batch of transitions, \quad$\mathcal{B}=\{(s,a,r^{rs},s',d)\}$ from $\mathcal{D}$\;
Compute target actions $a'(s')=$
\quad$clip(\pi_{\theta_{targ}}(s')+clip(\epsilon,-c,c),a_{Low},a_{High})$\;
Compute targets $y(r^{rs},s',d) =$
\quad$ r^{rs}+\gamma(1-d)\min_{i=1,2}Q_{\phi_{i,targ}}(s',a')$\;
Update Q-functions using
$\nabla_{\phi_{i}}\frac{1}{\mathcal{B}}\sum_{(s,a,s',r^{rs},d)\in \mathcal{B}}{(Q_{\phi_{i}}(s,a)-y(r^{rs},s',d))^2}$
\If{t mod $p_{delay}$=0}{
Update policy using
\quad$\nabla_{\theta}\frac{1}{\mathcal{B}}\sum_{s\in \mathcal{B}}{Q_{\phi_{1}}(s,\pi_{\theta}(s))}$\;
Update the target network:\
\quad\quad$\phi_{targ,i}\leftarrow\tau\phi_{targ,i}+(1-\tau)\phi_{i}$\
\quad\quad$\theta_{targ}\leftarrow\tau\theta_{targ}+(1-\tau)\theta$
}
}
}
\end{algorithm}
效果: