save_size 的界限是什么?我使用了 Bakoma tex,我只能将 TEXMF.INI 中的 save_size 更改为最大值 7000,如果超过该值,程序就会崩溃。
%
\documentclass[10pt,letter,notitlepage]{article}
%Mise en page
\usepackage[left=2cm, right=2cm, lines=45, top=0.8in, bottom=0.7in]{geometry}
\usepackage{fancyhdr}
\usepackage{fancybox}
\usepackage{pdfpages}
\renewcommand{\headrulewidth}{1.5pt}
\renewcommand{\footrulewidth}{1.5pt}
\pagestyle{fancy}
\newcommand\Loadedframemethod{TikZ}
\usepackage[framemethod=\Loadedframemethod]{mdframed}
\usepackage{tikz}
\usepackage[linesnumbered,ruled,vlined]{algorithm2e}
\usepackage{url}
\usepackage{dsfont}
\usepackage{amssymb,amsmath}
\usepackage{xspace}
\newcommand{\RR}{\mathds{R}}
\newcommand{\sign}{\mathop{\mathrm{sign}}}
\newcommand{\argmin}{\mathop{\mathrm{argmin}}}
\newcommand{\zero}{\mathbf{0}}
\newcommand{\one}{\mathbf{1}}
\newcommand{\bv}{\mathbf{b}}
\newcommand{\wv}{\mathbf{w}}
\newcommand{\xv}{\mathbf{x}}
\newcommand{\yv}{\mathbf{y}}
\newcommand{\rv}{\mathbf{r}}
\newcommand{\inner}[2]{\langle #1, #2 \rangle}
\newcommand{\red}[1]{{\color{red}#1}}
\newcommand{\blue}[1]{{\color{blue}#1}}
\newcommand{\magenta}[1]{{\color{magenta}#1}}
\newcommand{\ea}{{et al.}\xspace}
\newcommand{\eg}{{e.g.}\xspace}
\newcommand{\ie}{{i.e.}\xspace}
\newcommand{\iid}{{i.i.d.}\xspace}
\newcommand{\cf}{{cf.}\xspace}
\newcommand{\wrt}{{w.r.t.}\xspace}
\newcommand{\aka}{{a.k.a.}\xspace}
\newcommand{\etc}{{etc.}\xspace}
\newcommand{\ans}[1]{{\color{orange}\textsf{Ans}: #1}}
%================================
%================================
\setlength{\parskip}{1cm}
\setlength{\parindent}{1cm}
\tikzstyle{titregris} =
[draw=gray,fill=white, shading = exersicetitle, %
text=gray, rectangle, rounded corners, right,minimum height=.3cm]
\pgfdeclarehorizontalshading{exersicebackground}{100bp}
{color(0bp)=(green!40); color(100bp)=(black!5)}
\pgfdeclarehorizontalshading{exersicetitle}{100bp}
{color(0bp)=(red!40);color(100bp)=(black!5)}
\newcounter{exercise}
\renewcommand*\theexercise{exercice \textbf{Exercice}~n\arabic{exercise}}
\makeatletter
\def\mdf@@exercisepoints{}%new mdframed key:
\define@key{mdf}{exercisepoints}{%
\def\mdf@@exercisepoints{#1}
}
\mdfdefinestyle{exercisestyle}{%
outerlinewidth=1em,outerlinecolor=white,%
leftmargin=-1em,rightmargin=-1em,%
middlelinewidth=0.5pt,roundcorner=3pt,linecolor=black,
apptotikzsetting={\tikzset{mdfbackground/.append style ={%
shading = exersicebackground}}},
innertopmargin=0.1\baselineskip,
skipabove={\dimexpr0.1\baselineskip+0\topskip\relax},
skipbelow={-0.1em},
needspace=0.5\baselineskip,
frametitlefont=\sffamily\bfseries,
settings={\global\stepcounter{exercise}},
singleextra={%
\node[titregris,xshift=0.5cm] at (P-|O) %
{~\mdf@frametitlefont{\theexercise}~};
\ifdefempty{\mdf@@exercisepoints}%
{}%
{\node[titregris,left,xshift=-1cm] at (P)%
{~\mdf@frametitlefont{\mdf@@exercisepoints points}~};}%
},
firstextra={%
\node[titregris,xshift=1cm] at (P-|O) %
{~\mdf@frametitlefont{\theexercise}~};
\ifdefempty{\mdf@@exercisepoints}%
{}%
{\node[titregris,left,xshift=-1cm] at (P)%
{~\mdf@frametitlefont{\mdf@@exercisepoints points}~};}%
},
}
\makeatother
%%%%%%%%%
%%%%%%%%%%%%%%%
\mdfdefinestyle{theoremstyle}{%
outerlinewidth=0.01em,linecolor=black,middlelinewidth=0.5pt,%
frametitlerule=true,roundcorner=2pt,%
apptotikzsetting={\tikzset{mfframetitlebackground/.append style={%
shade,left color=white, right color=blue!20}}},
frametitlerulecolor=black,innertopmargin=1\baselineskip,%green!60,
innerbottommargin=0.5\baselineskip,
frametitlerulewidth=0.1pt,
innertopmargin=0.7\topskip,skipabove={\dimexpr0.2\baselineskip+0.1\topskip\relax},
frametitleaboveskip=1pt,
frametitlebelowskip=1pt
}
\setlength{\parskip}{0mm}
\setlength{\parindent}{10mm}
\mdtheorem[style=theoremstyle]{exercise}{\textbf{Exercise}}
%================Liste definition--numList-and alphList=============
\newcounter{alphListCounter}
\newenvironment
{alphList}
{\begin{list}
{\alph{alphListCounter})}
{\usecounter{alphListCounter}
\setlength{\rightmargin}{0cm}
\setlength{\leftmargin}{0.5cm}
\setlength{\itemsep}{0.2cm}
\setlength{\partopsep}{0cm}
\setlength{\parsep}{0cm}}
}
{\end{list}}
\newcounter{numListCounter}
\newenvironment
{numList}
{\begin{list}
{\arabic{numListCounter})}
{\usecounter{numListCounter}
\setlength{\rightmargin}{0cm}
\setlength{\leftmargin}{0.5cm}
\setlength{\itemsep}{0cm}
\setlength{\partopsep}{0cm}
\setlength{\parsep}{0cm}}
}
{\end{list}}
\usepackage[letterpaper=true,linkcolor=magenta,urlcolor=magenta,citecolor=black]{hyperref}
\usepackage{cleveref}
%===========================================================
\begin{document}
\begin{exercise}[Perceptron Implementation (5 pts)]
\blue{\textbf{Convention:} All algebraic operations, when applied to a vector
or matrix, are understood to be element-wise (unless otherwise stated).}
\begin{algorithm}[H]
\DontPrintSemicolon
\KwIn{$X\in\RR^{n\times d}$, $\yv\in \{-1,1\}^n$, $\wv=\zero_d$,
$b=0$, $\mathsf{max\_pass} \in \mathds{N}$}
\KwOut{$\wv, b, mistake$}
\For{$t=1, 2, \ldots, \mathsf{max\_pass}$ }{
$mistake(t) \gets 0$
\For{$i=1, 2, \ldots, n$}{
\If{$y_i (\inner{\xv_i}{\wv}+b) \leq 0$}{
$\wv \gets \wv + y_i\xv_i$ \tcp*{$\xv_i$
is the $i$-th row of $X$}
$b \gets b + y_i$
$mistake(t) \gets mistake(t) + 1$
}
}
}
\caption{The perceptron algorithm.}
\label{alg:perceptron}
\end{algorithm}
Implement the perceptron in \Cref{alg:perceptron}. Your implementation should
take input as $X = [\xv_1^\top, \ldots, \xv_n^\top]^\top \in \RR^{n \times
d}$, $\yv \in \{-1,1\}^{n}$, an initialization of the hyperplane parameters
$\wv\in\RR^{d}$ and $b\in \RR$, and the maximum number of passes of the training
set [suggested $\mathsf{max\_pass} = 500$]. Run your perceptron algorithm
on the \href{https://archive.ics.uci.edu/ml/datasets/spambase}{\magenta{\textsf{spambase}}}
dataset (available on \href{https://cs.uwaterloo.ca/~y328yu/mycourses/480/assignment.html}{\magenta{course
website}}), and plot the number of mistakes ($y$-axis) \wrt the number of
passes ($x$-axis).
\ans{ }
\end{exercise}
\begin{exercise}[Linear Regression (5 pts)]
Recall that ridge regression refers to the following $\ell_2$ norm regularized
linear regression problem:
\begin{align}
\label{eq:rr}
\min_{\wv} ~ \tfrac12\|X\wv - \yv\|_2^2 + \lambda \|\wv\|_2^2,
\end{align}
where $X \in \RR^{n\times d}, \yv \in \RR^n$ and $\wv \in \RR^d$. Obviously,
setting $\lambda = 0$ we recover ordinary linear regression.
\begin{enumerate}
\item (3 pts) Prove that ridge regression with any $\lambda > 0$ is equivalent
to ordinary linear regression after performing the following data augmentation:
\begin{align}
\label{eq:da1}
X &\gets \begin{bmatrix} X \\ \sqrt{2\lambda} I_{d \times d} \end{bmatrix}
\\
\label{eq:da2}
\yv &\gets \begin{bmatrix} \yv \\ 0_{d} \end{bmatrix},
\end{align}
where $I_{d\times d}$ is the $d\times d$ identity matrix and $0_d$ is the
$d$-dimensional zero vector.
\ans{}
\item (2 pts) Explain the data augmentation step \eqref{eq:da1}-\eqref{eq:da2}.
[E.g., what kind of data are we adding to the training set? how many of them?
their dimension? what effect are they bringing to the weight vector $\wv$?]
\ans{}
\end{enumerate}
\end{exercise}
\end{document}