\documentclass[amssymb,twocolumn,pra,10pt,aps]{revtex4-1}
\usepackage{mathptmx,amsmath}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\CC}{\mathbb{C}}
\DeclareMathOperator{\lcm}{lcm}
\DeclareMathOperator{\sgn}{sgn}
\begin{document}
\title{Solutions to the 64th William Lowell Putnam Mathematical Competition \\
Saturday, December 6, 2003}
\author{Manjul Bhargava, Kiran Kedlaya, and Lenny Ng}
\noaffiliation
\maketitle
\begin{itemize}
\item[A1]
There are $n$ such sums. More precisely, there is exactly one such sum
with $k$ terms for each of $k=1, \dots, n$ (and clearly no others).
To see this, note that if $n = a_1 + a_2 + \cdots + a_k$ with
$a_1 \leq a_2 \leq \cdots \leq a_k \leq a_1 + 1$, then
\begin{align*}
ka_1 &= a_1 + a_1 + \cdots + a_1 \\
&\leq n \leq a_1 + (a_1 + 1) + \cdots + (a_1 + 1) \\
&= ka_1 + k-1.
\end{align*}
However, there is a unique integer $a_1$ satisfying these inequalities,
namely $a_1 = \lfloor n/k \rfloor$. Moreover, once $a_1$ is fixed,
there are $k$ different possibilities for the sum $a_1 + a_2 + \cdots + a_k$:
if $i$ is the last integer such that $a_i = a_1$, then the sum equals
$ka_1 + (i-1)$. The possible values of $i$ are $1, \dots, k$,
and exactly one of these sums comes out equal to $n$, proving
our claim.
\textbf{Note:}
In summary, there is a unique partition of $n$ with $k$ terms that is
``as equally spaced as possible''.
One can also obtain essentially the same construction inductively: except
for the all-ones sum, each partition of $n$ is obtained by ``augmenting''
a unique partition of $n-1$.
\item[A2]
\textbf{First solution:}
Assume without loss of generality that $a_i + b_i > 0$
for each $i$ (otherwise both sides of the desired inequality are zero).
Then the AM-GM inequality gives
\begin{multline*}
\left( \frac{a_1\cdots a_n}{(a_1+b_1)\cdots(a_n+b_n)} \right)^{1/n} \\
\leq \frac{1}{n} \left( \frac{a_1}{a_1 + b_1} + \cdots + \frac{a_n}{a_n+b_n}
\right),
\end{multline*}
and likewise with the roles of $a$ and $b$ reversed. Adding these two
inequalities and clearing denominators yields the desired result.
\textbf{Second solution:}
Write the desired inequality in the form
\[
(a_1 + b_1)\cdots(a_n+b_n) \geq
[(a_1\cdots a_n)^{1/n} + (b_1\cdots b_n)^{1/n}]^n,
\]
expand both sides, and compare the terms on both sides
in which $k$ of the terms are among the $a_i$. On the left,
one has the product of each $k$-element subset of $\{1, \dots, n\}$;
on the right, one has
\[
\binom{n}{k} (a_1\cdots a_n)^{k/n} \cdots (b_1 \dots b_n)^{(n-k)/n},
\]
which is precisely $\binom{n}{k}$ times the geometric mean of the terms
on the left. Thus AM-GM shows that the terms under consideration on the
left exceed those on the right; adding these inequalities over all $k$
yields the desired result.
\textbf{Third solution:}
Since both sides are continuous in each $a_i$, it is sufficient to
prove the claim with $a_1, \dots, a_n$ all positive (the general case
follows by taking limits as some of the $a_i$ tend to zero).
Put $r_i = b_i/a_i$; then the given inequality is equivalent to
\[
(1 + r_1)^{1/n} \cdots (1+r_n)^{1/n} \geq 1 + (r_1\cdots r_n)^{1/n}.
\]
In terms of the function
\[
f(x) = \log(1 + e^x)
\]
and the quantities $s_i = \log r_i$,
we can rewrite the desired inequality as
\[
\frac{1}{n}(f(s_1) + \cdots + f(s_n)) \geq f\left( \frac{s_1 + \cdots +
s_n}{n} \right).
\]
This will follow from Jensen's inequality if we can verify that $f$
is a convex function; it is enough to check that $f''(x) > 0$ for all $x$.
In fact,
\[
f'(x) = \frac{e^x}{1+e^x} = 1 - \frac{1}{1+e^x}
\]
is an increasing function of $x$, so $f''(x) > 0$ and Jensen's inequality
thus yields the desired result. (As long as the $a_i$ are all positive,
equality holds when $s_1 = \cdots = s_n$, i.e., when the vectors
$(a_1, \dots, a_n)$ and $(b_1, \dots, b_n)$. Of course other equality
cases crop up if some of the $a_i$ vanish, i.e., if $a_1=b_1=0$.)
\textbf{Fourth solution:}
We apply induction on $n$, the case $n=1$ being evident.
First we verify the auxiliary inequality
\[
(a^n + b^n)(c^n + d^n)^{n-1} \geq (ac^{n-1} + b d^{n-1})^n
\]
for $a,b,c,d \geq 0$.
The left side can be written as
\begin{align*}
a^n c^{n(n-1)} &+ b^n d^{n(n-1)} \\
&+ \sum_{i=1}^{n-1} \binom{n-1}{i} b^n c^{ni} d^{n(n-1-i)} \\
&+ \sum_{i=1}^{n-1} \binom{n-1}{i-1} a^n c^{n(i-1)} d^{n(n-i)}.
\end{align*}
Applying the weighted AM-GM inequality between matching terms in the two
sums yields
\begin{multline*}
(a^n + b^n)(c^n + d^n)^{n-1} \geq a^n c^{n(n-1)} + b^n d^{n(n-1)} \\
+ \sum_{i=1}^{n-1} \binom{n}{i} a^i b^{n-i} c^{(n-1)i} d^{(n-1)(n-i)},
\end{multline*}
proving the auxiliary inequality.
Now given the auxiliary inequality and the $n-1$ case of the desired
inequality, we apply the auxiliary inequality with $a = a_1^{1/n}$,
$b = b_1^{1/n}$, $c = (a_2 \cdots a_n)^{1/n(n-1)}$,
$d = (b_2 \dots b_n)^{1/n(n-1)}$. The right side will be the $n$-th
power of the desired inequality. The left side comes out to
\[
(a_1 + b_1)((a_2 \cdots a_n)^{1/(n-1)} + (b_2 \cdots b_n)^{1/(n-1)})^{n-1},
\]
and by the induction hypothesis, the second factor is less than
$(a_2 + b_2)\cdots(a_n+b_n)$. This yields the desired result.
\textbf{Note:}
Equality holds if and only if $a_i=b_i=0$ for some $i$ or if the vectors
$(a_1, \dots, a_n)$ and $(b_1, \dots, b_n)$ are proportional.
As pointed out by Naoki Sato, the problem also appeared on the 1992
Irish Mathematical Olympiad.
It is also a special case of a classical inequality,
known as H\"older's inequality, which generalizes the
Cauchy-Schwarz inequality (this is visible from the $n=2$ case); the
first solution above is adapted from the standard proof of H\"older's
inequality.
We don't know whether the declaration
``Apply H\"older's inequality'' by itself is considered
an acceptable solution to this problem.
\item[A3]
\textbf{First solution:}
Write
\begin{align*}
f(x) &= \sin x + \cos x + \tan x + \cot x + \sec x + \csc x \\
&= \sin x + \cos x + \frac{1}{\sin x \cos x} + \frac{\sin x + \cos x}{\sin x
\cos x}.
\end{align*}
We can write $\sin x + \cos x = \sqrt{2} \cos(\pi/4 - x)$; this suggests
making the substitution $y = \pi/4 - x$. In this new coordinate,
\[
\sin x \cos x = \frac{1}{2} \sin 2x = \frac{1}{2} \cos 2y,
\]
and writing $c = \sqrt{2} \cos y$, we have
\begin{align*}
f(y) &= (1 + c)\left(1 + \frac{2}{c^2 -1} \right) - 1 \\
&= c + \frac{2}{c - 1}.
\end{align*}
We must analyze this function of $c$ in the range $[-\sqrt{2}, \sqrt{2}]$.
Its value at $c=-\sqrt{2}$ is $2 - 3\sqrt{2} < -2.24$, and at
$c = \sqrt{2}$ is $2 + 3\sqrt{2}>6.24$.
Its derivative is $1 - 2/(c-1)^2$, which vanishes when
$(c-1)^2 = 2$, i.e., where $c = 1 \pm \sqrt{2}$. Only the value
$c = 1 - \sqrt{2}$ is in bounds, at which
the value of $f$ is $1-2\sqrt{2} > -1.83$. As for the pole at $c=1$,
we observe that $f$ decreases as $c$ approaches from below
(so takes negative values for all $c<1$) and increases as $c$
approaches from above (so takes positive values for all $c>1$); from
the data collected so far, we see that $f$ has no sign crossings, so
the minimum of $|f|$ is achieved at a critical point of $f$.
We conclude that the minimum of $|f|$ is $2 \sqrt{2} - 1$.
Alternate derivation (due to Zuming Feng): We can also minimize $|c + 2/(c-1)|$
without calculus (or worrying about boundary conditions). For $c>1$, we have
\[
1 + (c-1) + \frac{2}{c-1} \geq 1 + 2 \sqrt{2}
\]
by AM-GM on the last two terms, with equality for $c-1 = \sqrt{2}$
(which is out of range).
For $c<1$, we similarly have
\[
-1 + 1-c + \frac{2}{1-c} \geq -1 + 2\sqrt{2},
\]
here with equality for $1-c = \sqrt{2}$.
\textbf{Second solution:}
Write
\[
f(a,b) = a+b + \frac{1}{ab} + \frac{a+b}{ab}.
\]
Then the problem is to minimize $|f(a,b)|$ subject to the constraint
$a^2+b^2-1 = 0$. Since the constraint region has no boundary, it is
enough to check the value at each critical point and each potential
discontinuity (i.e., where $ab=0$) and select the smallest value
(after checking that $f$ has no sign crossings).
We locate the critical points using the Lagrange multiplier condition:
the gradient of $f$ should be parallel to that of the constraint, which is
to say, to the vector $(a,b)$. Since
\[
\frac{\partial f}{\partial a}
= 1 - \frac{1}{a^2 b} - \frac{1}{a^2}
\]
and similarly for $b$, the proportionality yields
\[
a^2 b^3 - a^3 b^2 + a^3 - b^3 + a^2 - b^2 = 0.
\]
The irreducible factors of the left side are $1+a$, $1+b$,
$a-b$, and $ab-a-b$. So we must check what happens when any of
those factors, or $a$ or $b$, vanishes.
If $1+a = 0$, then $b=0$, and the singularity of $f$ becomes removable
when restricted to the circle. Namely, we have
\[
f = a + b + \frac{1}{a} + \frac{b+1}{ab}
\]
and $a^2+b^2-1 = 0$ implies $(1+b)/a = a/(1-b)$. Thus we have
$f = -2$; the same occurs when $1+b=0$.
If $a-b=0$, then $a=b=\pm \sqrt{2}/2$ and either
$f = 2 + 3 \sqrt{2} > 6.24$, or $f = 2 - 3 \sqrt{2} < -2.24$.
If $a=0$, then either $b = -1$ as discussed above, or $b=1$. In the
latter case, $f$ blows up as one approaches this point, so there cannot
be a global minimum there.
Finally, if $ab-a-b = 0$, then
\[
a^2b^2 = (a + b)^2 = 2ab + 1
\]
and so $ab = 1 \pm \sqrt{2}$. The plus sign is impossible since
$|ab| \leq 1$, so $ab = 1 - \sqrt{2}$ and
\begin{align*}
f(a,b) &= ab + \frac{1}{ab} + 1 \\
&= 1 - 2 \sqrt{2} > -1.83.
\end{align*}
This yields the smallest value of $|f|$ in the list (and indeed no sign
crossings are possible), so $2\sqrt{2}-1$ is the desired minimum of $|f|$.
\textbf{Note:} Instead of using the geometry of the graph of $f$ to rule
out sign crossings, one can verify explicitly that $f$ cannot
take the value 0. In the first solution, note that $c + 2/(c-1)=0$
implies $c^2 - c + 2 = 0$, which has no real roots.
In the second solution, we would have
\[
a^2 b + ab^2 + a + b = -1.
\]
Squaring both sides and simplifying yields
\[
2a^3b^3 + 5a^2b^2 + 4ab = 0,
\]
whose only real root is $ab=0$. But the cases with $ab=0$ do not yield
$f=0$, as verified above.
\item[A4]
We split into three cases.
Note first that $|A| \geq |a|$, by applying the condition for large $x$.
\textbf{Case 1: $B^2 - 4AC > 0$.}
In this case $Ax^2 + Bx + C$ has two distinct real roots $r_1$ and $r_2$.
The condition implies that $ax^2 + bx + c$ also vanishes at $r_1$ and $r_2$,
so $b^2 - 4ac > 0$.
Now
\begin{align*}
B^2 - 4AC &= A^2(r_1-r_2)^2 \\
&\geq a^2(r_1 - r_2)^2 \\
&= b^2 - 4ac.
\end{align*}
\textbf{Case 2: $B^2 - 4AC \leq 0$ and $b^2 - 4ac \leq 0$.}
Assume without loss of generality that $A \geq a > 0$, and that $B=0$
(by shifting $x$). Then $Ax^2 + Bx + C \geq ax^2 + bx + c \geq 0$ for all $x$;
in particular, $C \geq c \geq 0$. Thus
\begin{align*}
4AC - B^2 &= 4AC \\
&\geq 4ac \\
&\geq 4ac - b^2.
\end{align*}
Alternate derivation (due to Robin Chapman): the ellipse
$Ax^2 + Bxy + Cy^2 = 1$ is contained within
the ellipse $ax^2 + bxy + cy^2 = 1$,
and their respective enclosed areas are $\pi/(4AC-B^2)$ and
$\pi/(4ac-b^2)$.
\textbf{Case 3: $B^2 - 4AC \leq 0$ and $b^2 - 4ac > 0$.}
Since $Ax^2 + Bx + C$ has a graph not crossing the $x$-axis,
so do $(Ax^2 + Bx + C) \pm (ax^2 + bx + c)$. Thus
\begin{gather*}
(B-b)^2 - 4(A-a)(C-c) \leq 0, \\
(B+b)^2 - 4(A+a)(C+c) \leq 0
\end{gather*}
and adding these together yields
\[
2(B^2 - 4AC) + 2(b^2 - 4ac) \leq 0.
\]
Hence $b^2 - 4ac \leq 4AC - B^2$, as desired.
\item[A5]
\textbf{First solution:}
We represent a Dyck $n$-path by a sequence $a_1\cdots a_{2n}$, where
each $a_i$ is either $(1,1)$ or $(1,-1)$.
Given an $(n-1)$-path $P=a_1\cdots a_{2n-2}$, we distinguish two cases.
If $P$ has no returns of even-length, then let $f(P)$ denote the $n$-path
$(1,1)(1,-1)P$. Otherwise, let $a_ia_{i+1}\cdots a_{j}$ denote the
rightmost even-length return in $P$, and let $f(P)=(1,1)a_1a_2\cdots
a_j(1,-1)a_{j+1}\cdots a_{2n-2}$. Then $f$ clearly maps the set of Dyck
$(n-1)$-paths to the set of Dyck $n$-paths having no even return.
We claim that $f$ is bijective; to see this, we simply construct the
inverse mapping. Given an $n$-path $P$, let $R=a_ia_{i+1}...a_j$
denote the leftmost return in $P$, and let $g(P)$ denote the
path obtained by removing $a_1$ and $a_j$ from $P$. Then evidently
$f \circ g$ and $g \circ f$ are identity maps, proving the claim.
\textbf{Second solution:} (by Dan Bernstein)
Let $C_n$ be the number of Dyck paths of length $n$, let $O_n$ be the number
of Dyck paths whose final return has odd length, and let $X_n$ be the number
of Dyck paths with no return of even length.
We first exhibit a recursion for $O_n$; note that $O_0 = 0$.
Given a Dyck $n$-path
whose final return has odd length, split it just after its next-to-last return.
For some $k$ (possibly zero), this yields
a Dyck $k$-path, an upstep, a Dyck $(n-k-1)$-path whose odd return
has even length, and a downstep. Thus for $n \geq 1$,
\[
O_n = \sum_{k=0}^{n-1} C_k (C_{n-k-1} - O_{n-k-1}).
\]
We next exhibit a similar recursion for $X_n$; note that $X_0 = 1$.
Given a Dyck $n$-path with no even return,
splitting as above yields for some $k$
a Dyck $k$-path with no even return,
an upstep, a Dyck $(n-k-1)$-path whose final return has even length,
then a downstep. Thus for $n \geq 1$,
\[
X_n = \sum_{k=0}^{n-1} X_k (C_{n-k-1} - O_{n-k-1}).
\]
To conclude, we verify that $X_n = C_{n-1}$ for $n \geq 1$,
by induction on $n$. This is
clear for $n=1$ since $X_1 = C_0 = 1$. Given $X_k = C_{k-1}$ for $k a+b$, either $x_i = 0$ or $|x_i| > s$. (One of $a,b$ might be
zero.)
Now consider
\[
f(\overbrace{t, \cdots,t}^{\mbox{$a$ terms}},\overbrace{-t,\cdots,-t}^{\mbox{$b$ terms}},x_{a+b+1},\cdots, x_n)
\]
as a function of $t$.
It is piecewise linear near $s$; in fact, it is
linear between 0 and
the smallest nonzero value among $|x_{a+b+1}|, \dots, |x_n|$
(which exists by hypothesis). Thus its minimum is achieved by one (or both)
of those two endpoints. In other words, we can reduce the
number of distinct nonzero absolute values among the $x_i$ without
increasing $f$. This
yields the induction, pending verification of the base case.
As for the base case, suppose that $x_1 = \cdots = x_a = s > 0$,
$x_{a+1} = \cdots = x_{a+b} = -s$, and $x_{a+b+1} = \cdots = x_n = 0$.
(Here one or even both of $a,b$ could be zero, though the latter case
is trivial.)
Then
\begin{multline*}
f(x_1, \dots, x_n) = \frac{s}{n^2} (2a^2 + 2b^2 + (a+b)(n-a-b)) \\
- \frac{s}{n} (a+b) = \frac{s}{n^2} (a^2 -2ab + b^2) \geq 0.
\end{multline*}
This proves the base case of the induction, completing the solution
of the discrete analogue.
To deduce the original statement from the discrete analogue,
approximate both integrals
by equally-spaced Riemann sums and take limits. This works because
given a continuous function
on a product of closed intervals,
any sequence of Riemann sums with mesh size tending to zero
converges to the integral. (The domain is compact, so
the function is uniformly continuous. Hence for any $\epsilon > 0$
there is a cutoff below
which any mesh size forces the
discrepancy between the Riemann sum and the integral to be less than
$\epsilon$.)
Alternate derivation (based on a solution by Dan Bernstein):
from the discrete analogue, we have
\[
\sum_{1 \leq i 0\}$ and
$\{ x \ : \ f(x) < 0\}$ respectively, and let $\mu \le 1/2$ be
$\min(\mu_p,\mu_n)$. Then
\begin{align*}
& \int_{0}^{1} \int_{0}^{1} |f(x) + f(y)|\,dx\,dy \\
&\ge (1 + (1-2\mu)^2)
\int_{0}^{1} |f(x)|\,dx.
\end{align*}
Note that the constant can be seen to be best possible by considering a
sequence of functions tending towards the step function which is $1$ on
$[0,\mu]$ and $-1$ on $(\mu,1]$.
Suppose without loss of generality that $\mu = \mu_p$. As in the second
solution, it suffices to prove a
strengthened discrete analogue, namely
\[
\frac{1}{n^2} \sum_{i,j} |a_i + a_j| \ge
\left(1 + \left(1 - \frac{2p}{n}\right)^2\right)\left(\frac{1}{n}
\sum_{i=1}^{n} |a_i| \right),
\]
where $p \le n/2$ is the number of $a_1,\ldots,a_n$ which are positive.
(We need only make sure to choose meshes so that $p/n \to \mu$ as
$n \to \infty$.)
An equivalent inequality is
\[
\sum_{1 \le i < j \le n} | a_i + a_j | \ge \left(n - 1 - 2p +
\frac{2p^2}{n}\right) \sum_{i=1}^{n} | a_i |.
\]
Write $r_i = |a_i|$, and assume without loss of generality that $r_i \ge
r_{i+1}$ for each $i$. Then for $i 2p$, then
\[
-2 p_k (k-p_k) + \binom{k}{2} \ge -2 p(k-p) + \binom{k}{2}
\]
since $p_k$ is at most $p$. Define $Q_k$ to
be $- \lfloor \frac{k}{2} \rfloor$ if $k \le
2p$ and $-2 p(k-p) + \binom{k}{2}$ if $k \ge 2p$, so that $P_k \ge Q_k$. Note
that $Q_1=0$.
Partial summation gives
\begin{align*}
\sum_{j = 1}^n r_j C_j & = r_n P_n + \sum_{j=2}^{n} (r_{j-1} - r_j)
P_{j-1} \\
& \ge r_n Q_n + \sum_{j=2}^{n} (r_{j-1} - r_j) Q_{j-1} \\
& = \sum_{j=2}^{n} r_j (Q_j - Q_{j-1}) \\
& = - r_2 - r_4 - \cdots - r_{2p} + \sum_{j = 2p+1}^{n} (j-1-2p) r_j.
\end{align*}
It follows that
\begin{align*}
\sum_{1 \le i < j \le n} | a_i + a_j |
&= \sum_{i = 1}^n (n - i) r_i + \sum_{j = 1}^n r_j C_j \\
& \ge \sum_{i = 1}^{2p} (n - i - [ i \ \text{even}]) r_i \\
&\quad + \sum_{i = 2p+1}^{n} (n - 1 - 2p) r_i \\
& = (n-1-2p) \sum_{i=1}^{n} r_i \\
&\quad + \sum_{i=1}^{2p} (2p + 1 - i - [i \ \text{even}]) r_i \\
& \ge (n-1-2p) \sum_{i=1}^{n} r_i + p \sum_{i=1}^{2p} r_i \\
& \ge (n-1-2p) \sum_{i=1}^{n} r_i + p\frac{2p}{n} \sum_{i=1}^{n} r_i\,,
\end{align*}
as desired. The next-to-last and last inequalities each follow from the
monotonicity of the $r_i$'s, the former by pairing the $i^{\textrm{th}}$
term with the $(2p+1-i)^{\textrm{th}}$.
\textbf{Note:}
Compare the closely related Problem 6 from the 2000 USA Mathematical
Olympiad: prove that
for any nonnegative real numbers $a_1, \dots, a_n, b_1, \dots, b_n$,
one has
\[
\sum_{i,j=1}^n \min\{a_i a_j,b_i b_j\} \leq
\sum_{i,j=1}^n \min\{a_i b_j,a_j b_i\}.
\]
\end{itemize}
\end{document}