算法伪代码在 IEEE Transsection Template 中不可执行。错误快照已附上

算法伪代码在 IEEE Transsection Template 中不可执行。错误快照已附上

在此处输入图片描述

\documentclass[journal]{IEEEtran}
\usepackage[lined,boxed,ruled,commentsnumbered]{algorithm2e}
\usepackage{algpseudocode}
\begin{document}
\begin{algorithm}%[H]
\caption{%\textcolor{blue}
{Pseudocode of learning algorithm for training FCDBN in module II}}
\label{pseudoGWO}
\textbf{\Function:} {FCDBN_Training(\emph{x},\emph{i}, $bs$, $\eta$, $\rho$, $\varepsilon$, $Mitr$)}
{
\textbf{Input:} Training dataset $x$ with \emph{n} samples
Number of hidden neurons $i$
Model parameters $\eta$, $\rho$, and $\varepsilon$
Batch size $bs$
Maximum number of iterations $Mitr$ 
From normal distribution with mean 0.1 and variance 1 sample $\theta$
Split the training dataset $x$ in batches $x_b$ using batch size
Initialize i=0; j=0;
    \While {j $<$ $Mitr$ or $\theta$ converged \textbf{do}}
    {
        \For{each batch $x_b$ \textbf{do}}
        {
        \While {i $<$ bs \textbf{do}}
{Step 1: Gibbs sampling 
 Determine $h_i$ using Gibbs sampling Equation \ref{eqGS1}
           Determine $v_i$ using Gibbs sampling Equations \ref{eqGS2} and \ref{eqGS3}
Step 2: Contrastive divergence            
           Update weights $w_{t}^h$, $w_t^y$, and $w_t^v$  using Equations \ref{eqWh}, \ref{eqWy}, and \ref{eqWv}, respectively. 
       
           Update model parameters \emph{A} and \emph{B} using Equations \ref{eqDBA}, and \ref{eqWBB}, respectively. 
       
           Update dynamic biases of hidden layer ${{\hat a}_t}$ and visible layer ${{\hat b}_t}$ using Equation \ref{eqDBab}. 
       
           i=i++}
           }
           j=j++
           }
        \textbf{\Return}{Trained FCDBN}
}
\end{algorithm}
\end{document}

答案1

不要混合使用algorithmicx'algpseudocodealgorithm2e。它们具有相同构造的冲突结构。以下可能是您想要的:

在此处输入图片描述

\documentclass[journal]{IEEEtran}

\usepackage[lined,boxed,ruled,commentsnumbered]{algorithm2e}
\DontPrintSemicolon

\begin{document}

\begin{algorithm}
  \caption{Pseudocode of learning algorithm for training FCDBN in module II}
  \textbf{Function:} FCDBN\_Training($x$, $i$, $bs$, $\eta$, $\rho$, $\varepsilon$, $M$)\;
  \textbf{Input:} Training dataset $x$ with $n$ samples\;
  \Indp
    Number of hidden neurons $i$\;
    Model parameters $\eta$, $\rho$, and $\varepsilon$\;
    Batch size $bs$\;
    Maximum number of iterations $M$\;
  \Indm
  From normal distribution with mean $0.1$ and variance $1$, sample $\theta$\;
  Split the training dataset $x$ in batches $x_b$ using batch size\;
  Initialize $i = 0$ and $j = 0$\;
  \While {$j < M$ \textnormal{or} $\theta$ \textnormal{converged}}{
    \For {\textnormal{each batch} $x_b$}{
      \While {$i < bs$}{
        Step 1: \textit{Gibbs sampling}\;
        \Indp
          Determine $h_i$ using Gibbs sampling Equation~(1)\;
          Determine $v_i$ using Gibbs sampling Equations~(2) and~(3)\;
        \Indm
        Step 2: \textit{Contrastive divergence}\;
        \Indp
          Update weights $w_t^h$, $w_t^y$, and $w_t^v$ using Equations~(4), (5), and~(6), respectively
          Update model parameters~$A$ and~$B$ using Equations~(7), and~(8), respectively
          Update dynamic biases of hidden layer~$\hat{a}_t$ and visible layer~$\hat{b}_t$ using Equation~(9)\;
        \Indm
        $i \gets i + 1$\;
      }
    }
    $j \gets j + 1$\;
  }
  \Return{\textnormal{trained FCDBN}}\;
\end{algorithm}

\end{document}

相关内容