%\documentstyle[10pt,twoside]{article}
%\documentstyle[twoside]{article}
\documentclass[twoside]{article}
\setlength{\oddsidemargin}{0 in}
\setlength{\evensidemargin}{0 in}
\setlength{\topmargin}{-0.6 in}
\setlength{\textwidth}{6.7 in}
\setlength{\textheight}{8.5 in}
\setlength{\headsep}{0.75 in}
\setlength{\parindent}{0 in}
\setlength{\parskip}{0.1 in}
\usepackage{amsmath,amssymb,enumerate,algorithms,ifthen}
% The following commands sets up the lecnum (lecture number)
% counter and make various numbering schemes work relative
% to the lecture number.
%
\newcounter{lecnum}
\renewcommand{\thepage}{\thelecnum-\arabic{page}}
\renewcommand{\thesection}{\thelecnum.\arabic{section}}
\renewcommand{\theequation}{\thelecnum.\arabic{equation}}
\renewcommand{\thefigure}{\thelecnum.\arabic{figure}}
\renewcommand{\thetable}{\thelecnum.\arabic{table}}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}{Lemma}
\newtheorem{claim}{Claim}
\newtheorem{proposition}{Proposition}
\newtheorem{prob}{Problem}
\newtheorem{corollary}{Corollary}
\newtheorem{question}{Question}
\newtheorem{conjecture}{Conjecture}
\newtheorem{example}{Example}
\newtheorem{definition}{Definition}
\newtheorem{remarka}{Remark}
\def\P{\mathop{\rm P}\nolimits}
\def\NP{\mathop{\rm NP}\nolimits}
\def\DTIME{\mathop{\rm DTIME}\nolimits}
\def\BPTIME{\mathop{\rm BPTIME}\nolimits}
\def\ZPTIME{\mathop{\rm ZPTIME}\nolimits}
\def\polylog{\mathop{\rm polylog}\nolimits}
\newenvironment{remark}{\begin{remarka}\rm}{\end{remarka}}
\newenvironment{proof}{{\bf Proof.}}{\hfill\rule{2mm}{2mm}}
\newenvironment{pproof}[1]{\noindent{\textbf{Proof of #1.}}}{\hfill\rule{2mm}{2mm}}
\newcommand{\calI}{{\cal I}}
\newcommand{\calT}{{\cal T}}
\newcommand{\calP}{{\cal P}}
\newcommand{\opt}{\mbox{\sc opt}}
\newcommand{\OPT}{\mbox{\sc OPT}}
\newcommand{\QQ}{\mathbb{Q}}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\ZZ}{\mathbb{Z}}
%
% The following macro is used to generate the header.
%
\newcommand{\lecture}[5]{
\pagestyle{myheadings}
\thispagestyle{plain}
\newpage
\setcounter{lecnum}{#1}
\setcounter{page}{1}
\noindent
\begin{center}
\framebox{
\vbox{\vspace{2mm}
\hbox to 6.28in { {\bf CMPUT 675: Approximation Algorithms
\hfill Fall 2011} }
\vspace{4mm}
\hbox to 6.28in { {\Large \hfill Lecture #1 (#2): #3 \hfill} }
\vspace{2mm}
\hbox to 6.28in { {\it Lecturer: #4 \hfill Scribe: #5} }
\vspace{2mm}}
}
\end{center}
\markboth{Lecture #1: #3}{Lecture #1: #3}
\vspace*{4mm}
}
%
% Convention for citations is authors' initials followed by the year.
% For example, to cite a paper by Leighton and Maggs you would type
% \cite{LM89}, and to cite a paper by Strassen you would type \cite{S69}.
% (To avoid bibliography problems, for now we redefine the \cite command.)
%
\renewcommand{\cite}[1]{[#1]}
\input{epsf}
%Use this command for a figure; it puts a figure in wherever you want it.
%usage: \fig{NUMBER}{FIGURE-SIZE}{CAPTION}{FILENAME}
\newcommand{\fig}[4]{
\vspace{0.2 in}
\setlength{\epsfxsize}{#2}
\centerline{\epsfbox{#4}}
\begin{center}
Figure \thelecnum.#1:~#3
\end{center}
}
% Use these for theorems, lemmas, proofs, etc.
% Some useful equation alignment commands, borrowed from TeX
\makeatletter
\def\eqalign#1{\,\vcenter{\openup\jot\m@th
\ialign{\strut\hfil$\displaystyle{##}$&$\displaystyle{{}##}$\hfil
\crcr#1\crcr}}\,}
\def\eqalignno#1{\displ@y \tabskip\@centering
\halign to\displaywidth{\hfil$\displaystyle{##}$\tabskip\z@skip
&$\displaystyle{{}##}$\hfil\tabskip\@centering
&\llap{$##$}\tabskip\z@skip\crcr
#1\crcr}}
\def\leqalignno#1{\displ@y \tabskip\@centering
\halign to\displaywidth{\hfil$\displaystyle{##}$\tabskip\z@skip
&$\displaystyle{{}##}$\hfil\tabskip\@centering
&\kern-\displaywidth\rlap{$##$}\tabskip\displaywidth\crcr
#1\crcr}}
\makeatother
% **** IF YOU WANT TO DEFINE ADDITIONAL MACROS FOR YOURSELF, PUT THEM HERE:
\begin{document}
%FILL IN THE RIGHT INFO.
%\lecture{**LECTURE-NUMBER**}{**DATE**}{**LECTURER**}{**SCRIBE**}
\lecture{1}{Sep 8, 2005 }{A sample scribe}{Mohammad R. Salavatipour}{Mohammad R. Salavatipour}
% **** YOUR NOTES GO HERE:
% Some general latex examples and examples making use of the
% macros follow.
%**** IN GENERAL, BE BRIEF. LONG SCRIBE NOTES, NO MATTER HOW WELL WRITTEN,
%**** ARE NEVER READ BY ANYBODY.
%This lecture's notes illustrate some uses of
%various \LaTeX\ macros.
%Take a look at this and imitate.
%
%\section{Some theorems and stuff} % Don't be this informal in your notes!
%
%We now delve right into the proof.
%
%\begin{lemma}
%This is the first lemma of the lecture.
%\end{lemma}
%
%\begin{proof}
%The proof is by induction on \ldots.
%For fun, we throw in a figure.
%%%%NOTE USAGE !
%\fig{1}{1in}{A Fun Figure}{funfig.eps}
%
%This is the end of the proof, which is marked with a little box.
%\end{proof}
%
%\subsection{A few items of note}
%
%Here is an itemized list:
%\begin{itemize}
%\item this is the first item;
%\item this is the second item.
%\end{itemize}
%
%Here is an enumerated list:
%\begin{enumerate}
%\item this is the first item;
%\item this is the second item.
%\end{enumerate}
%
%Here is an exercise:
%
%{\bf Exercise:} Show that ${\rm P}\ne{\rm NP}$.
%
%Here is how to define things in the proper mathematical style.
%Let $f_k$ be the $AND-OR$ function, defined by
%
%\[ f_k(x_1, x_2, \ldots, x_{2^k}) = \left\{ \begin{array}{ll}
%
% x_1 & \mbox{if $k = 0$;} \\
%
% AND(f_{k-1}(x_1, \ldots, x_{2^{k-1}}),
% f_{k-1}(x_{2^{k-1} + 1}, \ldots, x_{2^k}))
% & \mbox{if $k$ is even;} \\
%
% OR(f_{k-1}(x_1, \ldots, x_{2^{k-1}}),
% f_{k-1}(x_{2^{k-1} + 1}, \ldots, x_{2^k}))
% & \mbox{otherwise.}
% \end{array}
% \right. \]
%
%\begin{theorem}
%This is the first theorem.
%\end{theorem}
%
%\begin{proof}
%This is the proof of the first theorem. We show how to write pseudo-code now.
%%*** USE PSEUDO-CODE ONLY IF IT IS CLEARER THAN AN ENGLISH DESCRIPTION
%
%Consider a comparison between $x$ and~$y$:
%\begin{tabbing}
%\hspace*{.25in} \= \hspace*{.25in} \= \hspace*{.25in} \= \hspace*{.25in} \= \hspace*{.25in} \=\kill
%\>{\bf if} $x$ or $y$ or both are in $S$ {\bf then } \\
%\>\> answer accordingly \\
%\>{\bf else} \\
%\>\> Make the element with the larger score (say $x$) win the comparison \\
%\>\> {\bf if} $F(x) + F(y) < \frac{n}{t-1}$ {\bf then} \\%
%\>\>\> $F(x) \leftarrow F(x) + F(y)$ \\
%\>\>\> $F(y) \leftarrow 0$ \\
%\>\> {\bf else} \\
%\>\>\> $S \leftarrow S \cup \{ x \} $ \\
%\>\>\> $r \leftarrow r+1$ \\
%\>\> {\bf endif} \\
%\>{\bf endif}
%\end{tabbing}
%
%This concludes the proof.
%\end{proof}
%
%
%\section{Next topic}
%
%Here is a citation, just for fun \cite{CW87}.
%
% **** THIS ENDS THE EXAMPLES. DON'T DELETE THE FOLLOWING LINE:
\section{Introduction}
What is a randomized algorithm? Put it simply, any algorithm that makes random choices
during its execution. We assume these algorithm have access to a source of fair random bits.
Why do we study and use randomized algorithms? There are several factors, some of the more
important ones are:
\begin{description}
\item{\bf Speed:} Randomized algorithms are usually fast, sometimes much faster than any
deterministic algorithm for the same problem. Even if the asymptotic running time is the
same as the deterministic one it can be faster in practice. For example QuickSort,
though has worse-case running time worse than that of MergeSort or Heapsort, it is the
fastest algorithm in practice. The disadvantage of some randomized algorithms (like QuickSort)
is that sometimes the running time is more than usual. Also, some randomized algorithms produce
a solution that is correct with high (but smaller than 1) probability, so there is a chance
that the solution is not correct.
\item{\bf Simplicity:} Even if the known randomized algorithm is not faster than the deterministic one,
it is often simpler.
\item{\bf Only thing we can do:} In some situations, randomized algorithms are the only algorithms we know.
For example, in some protocols or online algorithms (in the presence of an adversary) randomness helps to
defeat the adversary, whereas any deterministic algorithm can be defeated by an adversary.
Or, to check if a multivariable polynomial is identical to zero we only know randomized algorithms.
Note that the description of the polynomial might be implicit, e.g. the determinant of a matrix whose entries
contain different variables.
\end{description}
Randomized algorithms should not be confused with probabilistic analysis of algorithms and average case analysis:
in the average case analysis we assume some probability distribution (e.g. uniform) on input and then analyze the
running time based on the assumption that the input is drawn from that probability distribution (e.g. average
case analysis of QuickSort). However, when we analyze randomized algorithms we do not assume a particular (random)
distribution on input. The analysis must hold for all inputs.
\section{An Example: Comparing strings with communication costs}
Suppose that Alice and Bob each have an $n$-bit binary string, call them $a$ (for Alice) and $b$ (for Bob).
They want to find out if these two strings are equal. There is a communication link between them over which they
can send bits. It is enough if one of them figures out whether $a=b$ or not.
However, there is a cost associated with each bit transmission. Clearly, any deterministic algorithm
needs to use $n$ bits of communication (Why?). Here we give a randomized algorithms which uses only $O(\log n)$ bits
and has a very high success probability. The idea behind the solution is general: instead of comparing two objects
from a large universe, map them into two objects from a smaller universe and then compare those two. These two
smaller objects behave as the fingerprint of the two larger ones.
We will fix the number $t$ later. Alice picks a prime number $p\leq t$ uniformly at random and then
sends both $p$ and $a\;{\rm mod}\; p$ to Bob. Bob computes $b\;{\rm mod}\;p$ and if it is equal to
$a\;{\rm mod}\;p$ then says they are equal. Otherwise he concludes that they are not equal.
Clearly if $a=b$ then this protocol gives the correct answer (since $a\;{\rm mod}\;p=b\;{\rm mod}\;p$).
However, if $a\not=b$ there is a chance that this algorithm give a wrong answer. We upper bound this probability
(and therefore find an upper bound on the error probability of the algorithm).
\begin{lemma}
With $t=c\cdot n\ln n$ for some large constant $c$: $\Pr[a\;{\rm mod}\; p=b\;{\rm mod}\;p]\in O(\frac{1}{c})$.
\end{lemma}
\begin{proof}
Since $|a-b|\leq 2^n$ and since every prime is at least 2, there are at most $n$ primes that divide $|a-b|$.
This is an upper bound on the number of ``dangerous'' primes, i.e. those that if we select the algorithm will fail.
The Prime Number Theorem says that the number of primes up to $x$ is about $\frac{x}{\ln x}$. Therefore, there
are $\frac{t}{\log t}$ primes that we can choose, out of which at most $n$ are dangerous. Thus:
\begin{eqnarray*}
\Pr[failure]=\Pr[a\;{\rm mod}\; p=b\;{\rm mod}\;p]&=&\Pr[p\;{\rm divides}\;|a-b|] \\
&\leq& \frac{n}{\frac{t}{\ln t}}\\
&=&\frac{n[\ln n + \ln\ln n + \ln c]}{c\cdot n\ln n} \in O(\frac{1}{c})
\end{eqnarray*}
\end{proof}
Therefore, the algorithm succeeds with probability at least $1-\epsilon$ where $\epsilon$ is made
arbitrary close to $0$ by making $c$ large enough.
\section{Some formulas and Algorithms}
Here is a linear program:
\begin{figure}[ht]
\[\begin{array}{rrrclr}
{\mbox{\bf LP-SLST}} \ \ \ \mbox{min}~~ & \sum_{e} c(e)\cdot x_e & & & &\\
\mbox{s.t.}~ & \sum_{p \in P_t|e\in p} f(p)&\leq& x_e & \forall e \in E, \ \ t\in T & (1)\\
~~& \sum_{p \in {\cal P}_t} f(p) &\geq& 1 & t\in T & (2)\\
~~& x_e,f(p) &\geq& 0 & \forall e\in E, \ \ p\in\cup_t{\cal P}_t & (3)
\end{array}\]
\end{figure}
\subsection{Set Cover: Greedy and randomized rounding}
Here is a greedy algorithm for set cover:
\begin{figure}[h]
\noindent
\fbox{\parbox{\textwidth}{
{\bf SetCover Greedy Algorithm}
\begin{tabbing}
\hspace*{.25in} \= \hspace*{.25in} \= \hspace*{.25in} \= \hspace*{.25in} \= \hspace*{.25in} \=\kill
\> {\bf Input:} Uinversal set $U=\{e_1,e_2,\ldots,e_n\}$, subsets ${\cal S}=\{S_1,S_2,\ldots,S_m\}$, with each $S_i\subseteq U$
having cost $C_i$. \\
\> {\bf Output:} A minimum cost subset $S'\subseteq {\cal S}$ such that each $e_i\in U$ is in some $S_j\in S'$.\\
\>1. $C \leftarrow \emptyset$ \\
\>2. $S' \leftarrow \emptyset$ \\
\>3. {\bf while} $C \neq U$ {\bf do} \\
\>4. \>select $S_i \in S$ with minimum cost effectiveness $\alpha = \frac{c(S_i)}{|S_i - C|}$ with respect to $C$ \\
\>5. \>for each $e \in S_i$, define $price(e)$ as $\alpha$ \\
\>6. \>$S' \leftarrow S' \cup \{S_i\}$ \\
\>7. \>$C \leftarrow C \cup S_i$ \\
\>8. {\bf return} $S'$ \\
\end{tabbing}
}}
\caption{Algorithm SetCover}
\end{figure}
Now we use randomized rounding algorithm to solve Set Cover.
First consider the following simple algorithm:
\begin{enumerate}
\item Compute an optimal solution $x^*$ to the LP relaxation. Let $z^*$ be the value of this
optimal solution.
\item For each set $S_i$, pick $S_i$ with probability $x^*_i$; in other words let $\hat{x}_i=1$ be
1 with probability $x^*_i$ and zero with probability $1-x^*_i$.
Therefore, $\hat{x}_i$ is the rounded value of $x^*_i$.
\item Return the collection $C = \{S_j| \hat{x}_j = 1\}$
\end{enumerate}
The expected value of the cost of the returned collection of selected sets is computed as
follows:
\begin{eqnarray}
E\left[cost(C)\right] &=& \sum_{i=1}^{m}
{\rm Pr}\left[\mbox{$S_i$ being selected}\right]\cdot c(S_i)\nonumber\\
& = & \sum_{i=1}^{m} x^*_i\cdot c(S_i) \nonumber \\
& = & z^* \nonumber
\end{eqnarray}
So the expected cost of the solution is the same as the optimum. However,
this algorithm randomly picks sets of $S_j$ with a probability that is given by solving the
problem using LP, so the solution might not be feasible.
To address this, it's enough to repeat the Step 2, $4\ln n$
times, and return the union of collection of
all sets selected in all these repetitions.
That is, if $C_i$ is the collection of sets that are
selected at $i^{th}$ iteration of Step 2, then in Step 3, we would
$C = \cup_{i=1}^{4 \ln n} c_i$.
We show that this new algorithm has a good chance of returning a feasible solution
with approximation ratio $O(\log n)$.
Consider an arbitrary element $e_i$ and WLOG assume that it belongs to
sets $S_1,\ldots,S_k$. Since we start from a feasible solution, we must have
$x^*_1 + x^*_2 + \ldots + x^*_k \ge 1$; in the worst case we should have
$x^*_1 + x^*_2 + \ldots + x^*_k = 1$. What is
the probability that none of the corresponding $k$ sets be selected given the probabilities of
$x^*_i$'s? The following lemma (whose proof is starightforward)
suggests that this probability is the highest when all the $x^*_i$'s are equal.
\begin{lemma}\label{lemma:worst-case}
Given $x_1+\ldots+x_k = 1$, if we select each $S_i$ with probability $x^*_i$, the
probability that no $S_i$ will be selected is highest (worst case) when all $x^*_i =
\frac{1}{k}$.
\end{lemma}
Using Lemma~\ref{lemma:worst-case}, we would like to compute the probability that
randomized rounding would not be able to find a feasible solution. We take the worst case
conditions into account in order to obtain an upper bound probability.
\begin{eqnarray}
{\rm Pr}\left[e_i \notin c_j\right] \le \left(1-\frac{1}{k}\right)^k \le e^{-1} \nonumber
\end{eqnarray}
Therefore, for $4\ln n$ iterations, we would have:
\[{\rm Pr}\left[e_i \in \cup_{j=1}^{4 \ln n} c_j\right] \le \left(e^{-1}\right)^{4 \ln n}
\le \frac{1}{4n}\]
Therefore, using union bound and the fact that there are $n$ elements:
\begin{eqnarray}
{\rm Pr}\left[\mbox{some $e_i$ is not covered}\right]
\le n\cdot\frac{1}{4n} \le \frac{1}{4}. \nonumber
\end{eqnarray}
Using the analysis done earlier, the expected cost of this solution is bounded by:
\begin{displaymath}
{\rm E}\left[cost(C)\right] \le 4\ln n\cdot z^*
\end{displaymath}
Using Markov's inequality, ${\rm Pr}[X\ge t] \le \frac{E[X]}{t}$:
\begin{displaymath}
{\rm Pr}\left[cost(C)> 4\ln n\cdot z^*\right] \le \frac{1}{4}
\end{displaymath}
Therefore, with a probability greater than or equal to $\frac{1}{2}$, we have a feasible
solution with cost less than or equal to $4\ln n\cdot z^*$ which is
in $O(\log n\cdot \OPT)$, where $\OPT$ is the cost of the optimal solution.
To get better probability it's enough to repeat the algorithm a constant number of times
and get the best answer.
\end{document}