\documentclass[sigconf,review]{acmart}
\usepackage{algorithmic}
\usepackage{algorithm}
\usepackage{enumitem}
\usepackage{booktabs, makecell, multirow, tabularx}
\newcommand{\mh}[1]{
{\color{green!70!black} MH: #1}}
\newcommand\mytab[1]{\begin{tabular}[t]{@{}c@{}} #1 \end{tabular}}
\newcommand\mc[2]{\multicolumn{#1}{c}{#2}}
\renewcommand{\algorithmiccomment}[1]{//#1}
\begin{document}
\settopmatter{printacmref=false}
\setcopyright{none}
\renewcommand\footnotetextcopyrightpermission[1]{}
\pagestyle{plain}
%%
%% This command processes the author and affiliation and title
%% information and builds the first part of the formatted document.
\maketitle
\begin{table*}[]
\caption{Precision/recall and completeness of the requirement-to-method traces output by our approach}
\label{Results}
\resizebox{\textwidth}{!}{
\begin{tabular}{|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|}
\hline
\multirow{2}{*}{\textbf{\begin{tabular}[c]{@{}c@{}}1-\\ Prog.\end{tabular}}} & \multirow{2}{*}{\textbf{\begin{tabular}[c]{@{}c@{}}2-\\ Step\end{tabular}}} & \multicolumn{4}{c|}{\textbf{Cumulative Predictions}} & \multicolumn{3}{c|}{\textbf{\begin{tabular}[c]{@{}c@{}}Cunulative Output\\ Completeness\end{tabular}}} & \multicolumn{5}{c|}{\textbf{Cumulative Precision and Recall}} & \multicolumn{2}{c|}{\textbf{\begin{tabular}[c]{@{}c@{}}Cumulaltive \\ Output \\ Precision\end{tabular}}} & \multicolumn{2}{c|}{\textbf{\begin{tabular}[c]{@{}c@{}}Cumulative\\ Output\\ Recall\end{tabular}}} \\ \cline{3-18}
& & \textbf{\begin{tabular}[c]{@{}c@{}}3-\\ T\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}4-\\ N\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}5-\\ U\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}6-\\ Total\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}7-\\ T\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}8-\\ N\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}9-\\ U\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}10-\\ TP\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}11-\\ TN\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}12-\\ FP\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}13-\\ FN\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}14-\\ U\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}15-\\ T\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}16-\\ N\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}17-\\ T\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}18-\\ N\end{tabular}} \\ \hline
\multirow{4}{*}{\textbf{Chess}} & \textbf{1} & 0 & 2063 & 3953 & 6016 & 0.00 & 34.29 & 65.71 & 0 & 1612 & 0 & 0 & 4404 & NA & 100 & NA & 100.00 \\ \cline{2-18}
& \textbf{2} & 0 & 3144 & 2872 & 6016 & 0.00 & 52.26 & 47.74 & 0 & 1822 & 0 & 46 & 4148 & NA & 97.54 & 0.00 & 100.00 \\ \cline{2-18}
& \textbf{3} & 923 & 3144 & 1949 & 6016 & 15.34 & 52.26 & 32.40 & 417 & 1822 & 391 & 46 & 3340 & 51.61 & 97.54 & 90.06 & 82.33 \\ \cline{2-18}
& \textbf{4} & \textbf{1403} & \textbf{3144} & \textbf{1469} & \textbf{6016} & \textbf{24.32} & \textbf{52.26} & \textbf{24.42} & \textbf{467} & \textbf{1822} & \textbf{448} & \textbf{46} & \textbf{3233} & \textbf{51.04} & \textbf{97.54} & \textbf{91.03} & \textbf{80.26} \\ \hline
\multirow{4}{*}{\textbf{Gantt}} & \textbf{1} & 0 & 55535 & 34699 & 90234 & 0.00 & 61.55 & 38.45 & 0 & 22365 & 0 & 0 & 67869 & NA & 100 & NA & 100.00 \\ \cline{2-18}
& \textbf{2} & 0 & 66609 & 23625 & 90234 & 0.00 & 73.49 & 26.51 & 0 & 22592 & 0 & 57 & 67582 & NA & 99.75 & 0.00 & 100.00 \\ \cline{2-18}
& \textbf{3} & 908 & 66609 & 228729 & 90234 & 1.01 & 73.49 & 25.50 & 97 & 22592 & 124 & 57 & 67364 & 43.89 & 99.75 & 62.99 & 99.45 \\ \cline{2-18}
& \textbf{4} & \textbf{1527} & \textbf{66609} & \textbf{22098} & \textbf{90234} & \textbf{1.70} & \textbf{73.49} & \textbf{24.81} & \textbf{146} & \textbf{22592} & \textbf{184} & \textbf{57} & \textbf{64255} & \textbf{44.24} & \textbf{99.75} & \textbf{71.92} & \textbf{99.19} \\ \hline
\multirow{4}{*}{\textbf{iTrust}} & \textbf{1} & 0 & 17573 & 149469 & 166838 & 0.00 & 10.52 & 89.48 & 0 & 6572 & 0 & 0 & 160470 & NA & 100 & NA & 100.00 \\ \cline{2-18}
& \textbf{2} & 0 & 28657 & 138385 & 166838 & 0.00 & 17.16 & 82.84 & 0 & 6685 & 0 & 9 & 160348 & NA & 99.87 & 0.00 & 100.00 \\ \cline{2-18}
& \textbf{3} & 290 & 28657 & 138095 & 166838 & 0.17 & 17.16 & 82.67 & 81 & 6685 & 27 & 9 & 160240 & 75.00 & 99.87 & 90.00 & 99.60 \\ \cline{2-18}
& \textbf{4} & \textbf{1527} & \textbf{28657} & \textbf{137350} & \textbf{166838} & \textbf{0.62} & \textbf{17.16} & \textbf{82.22} & \textbf{93} & \textbf{6685} & \textbf{28} & \textbf{9} & \textbf{160227} & \textbf{76.86} & \textbf{99.87} & \textbf{91.18} & \textbf{99.58} \\ \hline
\multirow{4}{*}{\textbf{\begin{tabular}[c]{@{}c@{}}JHot\\ Draw\end{tabular}}} & \textbf{1} & 0 & 116787 & 20133 & 136920 & 0.00 & 85.30 & 14.70 & 0 & 12013 & 0 & 0 & 124907 & NA & 100 & NA & 100.00 \\ \cline{2-18}
& \textbf{2} & 0 & 125748 & 11172 & 136920 & 0.00 & 91.84 & 8.16 & 0 & 12066 & 0 & 86 & 124768 & NA & 99.29 & 0.00 & 100.00 \\ \cline{2-18}
& \textbf{3} & 1738 & 125748 & 9434 & 136920 & 1.27 & 91.84 & 6.89 & 98 & 12066 & 34 & 86 & 124636 & 74.24 & 99.29 & 53.26 & 99.72 \\ \cline{2-18}
& \textbf{4} & \textbf{2389} & \textbf{125748} & \textbf{8783} & \textbf{136920} & \textbf{1.74} & \textbf{91.84} & \textbf{6.41} & \textbf{132} & \textbf{12066} & \textbf{49} & \textbf{86} & \textbf{124587} & \textbf{72.93} & \textbf{99.29} & \textbf{60.55} & \textbf{99.60} \\ \hline
\textbf{Avg} & \textbf{4} & \textbf{1589} & \textbf{56040} & \textbf{42425} & \textbf{100002} & \textbf{6.98} & \textbf{58.69} & \textbf{34.47} & \textbf{210} & \textbf{10791} & \textbf{177} & \textbf{50} & \textbf{88076} & \textbf{61.27} & \textbf{99.11} & \textbf{78.67} & \textbf{64.66} \\ \hline
\end{tabular}}
\end{table*}
predictions in the case of JHotDraw). Indeed, No Trace precision is between 97-99\% for each program at the end of step 4. High precision is meaningless if recall is low. This is discussed next:
\begin{itemize}[noitemsep,topsep=0pt]
\item{\textit{Trace recall = TP/(TP+FN): represents the fraction of relevant Traces.}}
\item{\textit{No Trace recall = TN/(TN+FP): represents the fraction of relevant No Traces.}}
\end{itemize}
We notice in table~\ref{Results} that Trace Recall is 61-91\% and No Trace recall is 80-99\% (indeed 99\% for all systems except the small Chess). Trace Recall is particularly important here because any Traces incorrectly predicted as No Traces require software engineers to investigate the rather large set of No Trace predictions to find them. With a recall in average around 80\%, most traces are indeed found by our approach or remain in the gray zone of undefined traces. Ideally, recall should be higher still and future work will investigate this. We believe that the false negatives (which are at fault here) are likely close to the true positives in the code structure. Since No Trace recall and No Trace precision are both very high (99.11\% and 94.66\% in average), this means that our approach is quite capable in identifying NoTrace predictions correctly and completely. Software engineers hence can thus trust the NoTrace predictions. Given that the No Trace predictions also cover the vast majority of the code, software engineers should merely need to focus their attention on the remaining code. Take, for example, our largest case study JHotDraw. Of its 136,920 comparable cells in its requirement- to-method RTM, 125,748 cells were predicted to contain No Traces with 99.3\% precision and 99.6\% recall. Hence, the much smaller, remaining number set of cells (the 2389 Traces and 8783 Undefined Traces) is a much more manageable subset for software engineers to focus on. This saves software engineers effort.
\subsection{Applicability and Generalization}
After demonstrating our approach’s benefits in terms of completeness and correctness, we also want to make a few, final observations about our approach’s applicability. \mh{added a line saying that our findings can be generalized to other applications}First and foremost, given the high number of LOC in our case studies and given that we are considering the key requirements of the systems specified by developers, we conjecture that our findings can be generalized to other applications. Also, the four case study systems were very diverse and built by different people. One system, iTrust, even included network communication which obscures method calls (i.e., no observable calls between client and server). Still, our approach delivered very good results. However, all case studies had Java in common. Hence, one might ask whether our approach is limited to Java only. Given that very little code structure is used as part of our approach's reasoning, we do not believe so. Methods and method calls are commonly found in non-Java/non-OO languages including C, Python, C\#, and many more. The implements/inherits constructs are also not limited to Java. However, they are not a benefit but a complexity in this paper. If a language does not have interface/implementation construct concepts then our approach does not need to have an extended caller/callee that guesses their effect. The same can be said about inheritance. The extended caller/callee is thus an optional part of our algorithm. Therefore, overall, we see no major limitation in applying our approach to other languages and future work will do so.
\subsection{Precision/Recall Comparison with Other Techniques}
\mh{added this subsection since reviewers were complaining that we did not discuss how our technique compares to others}
In order to evaluate the performance of our technique, we collected the precision and recall values obtained after applying other major
\end{document}
答案1
该示例并未产生所示的问题,而且我尝试进行一些更改后仍无法产生所示的问题,因此这些建议尚未经过测试。
LaTeX 不会在节标题之后中断(您可以强制这样做,但不应该这样做),因此有两种可能性:
减少第一列中的文本,以便标题和几行文本适合底部,或
增加第一列的文本,或减少列高,以便文本在第一列的底部结束。
为了实现第一个目标,你可以
在文档前面的文本中任意位置进行更改,会将几行移到前面的页面,但仅限于此示例中可见的文本,您可以(也许)重新措辞两个缩进的项目(并减少缩进)以使其适合一行。这样可以为您节省两行,这可能就足够了。
您还可以
\looseness=-1
在较长的最后一段之前使用它来告诉 TeX 尝试调整换行以少取一行。
为了实现第二个目标,你可以
\renewcommand\arraystretch{1.1}
在之前添加tabular
以使顶部浮动更大。您的图像显示高度差异约为 4 行,因此如果您将浮动设置为 2 个基线,那么它自然会与每个文本列少两行的情况保持平衡,或者您可以稍微增加项目列表周围的间距,以将第一列的底部向下推一行或两行。