From 6ce1db1f867c545ebdb1afb705580514b356f883 Mon Sep 17 00:00:00 2001
From: Holden Rohrer
Date: Wed, 5 Jan 2022 00:02:12 -0500
Subject: more math stuff
---
li/Makefile | 19 +++
li/hw10.tex | 157 +++++++++++++++++
li/hw9.tex | 185 ++++++++++++++++++++
li/matlab_hw.py | 512 ++++++++++++++++++++++++++++++++++++++++++++++----------
4 files changed, 781 insertions(+), 92 deletions(-)
create mode 100644 li/Makefile
create mode 100644 li/hw10.tex
create mode 100644 li/hw9.tex
(limited to 'li')
diff --git a/li/Makefile b/li/Makefile
new file mode 100644
index 0000000..76cbdb8
--- /dev/null
+++ b/li/Makefile
@@ -0,0 +1,19 @@
+.POSIX:
+
+final.pdf: matlab_hw.pdf output.pdf
+ pdftk matlab_hw.pdf output.pdf output final.pdf
+
+matlab_hw.pdf: matlab_hw.ps
+ ps2pdf matlab_hw.ps
+
+output.pdf: output.ps
+ ps2pdf output.ps
+
+matlab_hw.ps: matlab_hw.py
+ vim -R matlab_hw.py -c "hardcopy > matlab_hw.ps | q"
+
+output.ps: output.txt
+ vim -R output.txt -c "hardcopy > output.ps | q"
+
+output.txt: matlab_hw.py
+ python3 matlab_hw.py > output.txt
diff --git a/li/hw10.tex b/li/hw10.tex
new file mode 100644
index 0000000..2b372de
--- /dev/null
+++ b/li/hw10.tex
@@ -0,0 +1,157 @@
+\def\bmatrix#1{\left[\matrix{#1}\right]}
+\def\dmatrix#1{\left|\matrix{#1}\right|}
+\def\fr#1#2{{#1\over #2}}
+
+ {\bf Section 6.1}
+
+\noindent{\bf 5.}
+
+$$A = \bmatrix{1&b\cr b&9}.$$
+
+{\it (a)}
+$a_{11} = 1 > 0,$
+and $\det A = 9 - b^2 > 0$ when $-3 < b < 3.$
+
+{\it (b)}
+
+$$A = \bmatrix{1&0\cr b&1}\bmatrix{1&b\cr 0&9-b^2}
+ = \bmatrix{1&0\cr b&1}\bmatrix{1&0\cr 0&9-b^2}\bmatrix{1&b\cr 0&1}
+$$
+
+{\it (c)}
+
+$(x-v)^TA(x-v) + c = x^TAx - 2x^TAv + v^TAv + c$ lets us generate
+arbitrary first and zeroth order terms while retaining the same
+second-order terms ($v^TAx = x^TAv$ by symmetry) since $x^TAx$ are the
+only second order terms.
+
+$$\fr12(x^2+2bxy+9y^2)-y = \fr12(x^TAx - 2x^TAv + v^TAv + c) \Rightarrow
+y = x^TAv - v^TAv - c \Rightarrow
+y = x^TAv, c = -v^TAv.$$
+
+$$y = x^TAv \Rightarrow \bmatrix{0\cr1} = Av \Rightarrow
+v = \bmatrix{-b/(9-b^2)\cr 1/(9-b^2)},$$
+by row reduction.
+
+The minimum is
+$$\fr12c = -\fr12v^TAv = -\fr12v^T\bmatrix{0\cr1} = \fr1{2(b^2-9)},$$
+when $A$ is positive definite ($|b| < 3$).
+
+{\it (d)}
+
+There is no minimum if $b=3$ because $x^2+2bxy+9y^2 = x^2+6xy+9y^2 =
+(x+3y)^2$ is zero where $y = -x/3,$ giving nonzero values of $y$ with a
+zero quadratic component and therefore arbitrarily small values of
+$\fr12(x^2+2bxy+9y^2) - y.$
+
+ {\bf Section 6.2}
+
+\noindent{\bf 4.}
+
+Since $A$ is positive definite, $A$ has eigenvalue $\lambda > 0$ and
+eigenvector $v,$ $A^2v = \lambda^2v,$ giving that $A^2$ has eigenvalue
+$\lambda^2 > 0$ and no others (because the complete set of eigenvectors,
+from symmetry of $A^2,$ span $R^n.$)
+
+Similarly, $A^{-1}$ is symmetric ($(A^{-1}A)^T = I \to (A^{-1})^TA = I
+\to (A^{-1})^T = A^{-1}$), so its eigenvectors will span the codomain,
+and $Av = \lambda v \to A^{-1}Av = A^{-1}\lambda v \to (1/\lambda)v =
+A^{-1}v,$ and $\lambda > 0 \to 1/\lambda > 0,$ so it is also
+positive-definite.
+
+\noindent{\bf 24.}
+
+Letting $s=0$ gives us a set of eigenvalues with minimum
+$\lambda_{\rm min},$ and the set of eigenvalues for arbitrary $s$ has
+minimum $\lambda_{\rm min}+s > 0 \to s > -\lambda_{\rm min}.$
+$$
+\bmatrix{0&-4&-4\cr -4&0&-4\cr -4&-4&0} \to \lambda = 4,-8 \to s > 8.$$
+$$
+\bmatrix{0&3&0\cr 3&0&4\cr 0&4&0} \to \lambda = -4,0,4 \to s > 4.$$
+
+\noindent{\bf 27.}
+
+$$A = CC^T = \bmatrix{3&0\cr1&2}\bmatrix{3&1\cr0&2} = \bmatrix{10&3\cr
+3&5}.$$
+
+$$A = \bmatrix{4&8\cr8&25} = \bmatrix{2&0\cr 4&3}\bmatrix{2&4\cr 0&3}.$$
+(This was found by inspection after a first-order approximation, with
+$U = \bmatrix{4&8\cr0&9},$ (in the LU decomposition) giving $\sqrt D =
+\bmatrix{2&0\cr0&3}$)
+
+\noindent{\bf 31.}
+
+For $A,$ it has eigenvalue $3$ corresponding to $(1,-1,0)^T$ and
+$(1,1,-2)^T.$
+$$x^TAx = 3/2(x-y)^2+1/2(x+y-2z)^2.$$
+
+$$x^TBx = x^T\bmatrix{x+y+z\cr x+y+z\cr x+y+z} = (x+y+z)^2.$$
+
+\goodbreak
+ {\bf Section 6.3}
+
+\noindent{\bf 12.}
+
+{\it (a)}
+
+If $A' = 4A,$ $\Sigma' = 4\Sigma,$ because $(A')^TA' = 4A^T\cdot4A =
+16A^TA,$ meaning the diagonal of $\Sigma'$ will have values $4$ times
+$\Sigma.$
+
+{\it (b)}
+
+The SVD of $A^T$ is $V\Sigma U^T,$ by $(AB)^T = B^TA^T,$ and that
+$\Sigma$ is symmetric (and $V$ and $U$ still satisfy orthogonality
+because orthogonality is preserved for square matrices across a
+transpose).
+The SVD of $A^{-1} = A^+$ is $V\Sigma^{-1} U^T,$ because the
+pseudoinverse is the complete inverse for an invertible matrix.
+
+\noindent{\bf 15.}
+
+$$A = \bmatrix{1&1&1&1} = \bmatrix{1}\bmatrix{2&0&0&0}
+ \bmatrix{1/2&1/2&1/2&1/2\cr
+ -1/\sqrt2&1/\sqrt2&0&0\cr
+ -1/\sqrt2&0&1/\sqrt2&0\cr
+ -1/\sqrt2&0&0&1/\sqrt2\cr}$$
+
+$$A^+ = \bmatrix{1/2&-1/\sqrt2&-1/\sqrt2&-1/\sqrt2\cr
+ 1/2&1/\sqrt2&0&0\cr
+ 1/2&0&1/\sqrt2&0\cr
+ 1/2&0&0&1/\sqrt2\cr}\bmatrix{1/2\cr 0\cr 0\cr 0}
+ \bmatrix{1}
+ = \bmatrix{1/4\cr1/4\cr1/4\cr1/4}.
+$$
+
+$$B = \bmatrix{0&1&0\cr 1&0&0} = \bmatrix{1&0\cr0&1}\bmatrix{1&0&0\cr0&1&0}
+ \bmatrix{0&1&0\cr1&0&0\cr0&0&1}
+$$
+
+$$B^+ = \bmatrix{0&1&0\cr 1&0&0\cr 0&0&1}\bmatrix{1&0\cr0&1\cr0&0}
+ \bmatrix{1&0\cr0&1}
+= \bmatrix{0&1\cr1&0\cr0&0}$$
+
+$$C = \bmatrix{1&1\cr 0&0} =
+\bmatrix{1&0\cr0&1}\bmatrix{\sqrt2&0\cr0&0}\bmatrix{1/\sqrt2&1/\sqrt2\cr1/\sqrt2&-1/\sqrt2}$$
+
+$$C^+ =
+\bmatrix{1/\sqrt2&1/\sqrt2\cr1/\sqrt2&-1/\sqrt2}\bmatrix{1/\sqrt2&0\cr0&0}
+\bmatrix{1&0\cr0&1} =
+\bmatrix{1/2&1/2\cr0&0}$$
+
+\noindent{\bf 18.}
+
+With $r_i$ as the $i$th row of $A,$ $A,$ $\hat x = c_1r_1 + c_3r_3.$
+
+$$A^TA\hat x = A^Tb =
+A^T\bmatrix{1&0&0\cr1&0&0\cr1&1&1}(c_1\bmatrix{1\cr0\cr0} +
+c_3\bmatrix{1\cr1\cr1}) =
+\bmatrix{1&1&1\cr0&0&1\cr0&0&1}(c_1\bmatrix{1\cr1\cr1} +
+c_3\bmatrix{1\cr1\cr3}) = $$$$
+c_1\bmatrix{3\cr1\cr1} + c_3\bmatrix{5\cr3\cr3}
+= \bmatrix{1&1&1\cr0&0&1\cr0&0&1}\bmatrix{0\cr2\cr2} =
+\bmatrix{4\cr2\cr2} = (1/2)\bmatrix{3\cr1\cr1} +
+(1/2)\bmatrix{5\cr3\cr3} \to \hat x = \bmatrix{1\cr1/2\cr1/2}
+$$
+
+\bye
diff --git a/li/hw9.tex b/li/hw9.tex
new file mode 100644
index 0000000..ee99439
--- /dev/null
+++ b/li/hw9.tex
@@ -0,0 +1,185 @@
+\def\bmatrix#1{\left[\matrix{#1}\right]}
+\def\dmatrix#1{\left|\matrix{#1}\right|}
+\def\fr#1#2{{#1\over #2}}
+
+ {\bf Section 5.4}
+
+\noindent{\bf 8.}
+
+$${dr\over dt} = 4r-2w.$$
+$${dw\over dt} = r+w.$$
+$${du\over dt} = \bmatrix{4&-2\cr1&1}u.$$
+
+The system has eigenvalues $\lambda = 2, 3,$ with eigenvectors
+$(1, 1)^T$ and $(2, 1)^T,$
+
+{\it (a)} This system is unstable because all eigenvalues are greater
+than zero.
+
+{\it (b)} $$\bmatrix{300\cr 200} = 100\bmatrix{1\cr 1} +
+100\bmatrix{2\cr 1},$$
+giving
+$$u = \bmatrix{1&2\cr 1&1}\bmatrix{e^{2t}&0\cr 0&e^{3t}}\bmatrix{100\cr100} =
+\bmatrix{100e^{2t} + 200e^{3t}\cr 100e^{2t} + 100e^{3t}}.$$
+
+{\it (c)}
+
+The dominant growth is $e^{3t},$ so the ratio will eventually be
+2 to 1.
+
+\noindent{\bf 24.}
+
+$v+w$ is constant if $${d\over dt}(v+w) = {dv\over dt} + {dw\over dt} =
+w-v + v-w = 0.$$
+
+$${du\over dt} = \bmatrix{-1&1\cr1&-1}u,$$
+with eigenvalues $-2$ and $0$ and corresponding eigenvectors $(1, -1)^T$
+and $(1, 1)^T$ respectively. This gives a particular solution (with
+$v(0) = 30$ and $w(0) = 10$)
+$$u = \bmatrix{-1&1\cr1&-1}\bmatrix{20\cr10e^{-2t}} = \bmatrix{-1\cr
+1}20 + \bmatrix{1\cr -1}10e^{-2t}$$
+
+\noindent{\bf 42.}
+
+The reason this (falsely) appears to work is because the right side of
+the matrix is still multiplying from
+$$u = \bmatrix{x\cr y},$$ while the left side is supposed to be
+$${du\over dt} = \bmatrix{dy/dt\cr dx/dt}.$$
+In order to make this correct, there should also be a column exchange
+(changing $u$ to represent $(y, x)^T,$)
+giving
+$$\bmatrix{2&-2\cr-4&0},$$
+which is unstable.
+
+ {\bf Section 5.5}
+
+\noindent{\bf 11.}
+
+$$P = 0\bmatrix{1/2\cr -1/2}\bmatrix{1/2&-1/2}
+ + 1\bmatrix{1/2\cr 1/2}\bmatrix{1/2&1/2}.$$
+
+$$Q = 1\bmatrix{1/2\cr -1/2}\bmatrix{1/2&-1/2}
+ -1\bmatrix{1/2\cr 1/2}\bmatrix{1/2&1/2}.$$
+
+$$R = 5\bmatrix{2/\sqrt5\cr 1/\sqrt5}\bmatrix{2/\sqrt5&1/\sqrt5}
+ -5\bmatrix{1/\sqrt5\cr -2/\sqrt5}\bmatrix{1/\sqrt5&-2/\sqrt5}.$$
+
+\noindent{\bf 13.}
+
+{\it (a)}
+
+The eigenvectors $u,$ $v,$ and $w$ are pairwise orthogonal because $A$
+is symmetric.
+
+{\it (b)}
+The null space is $\mathop{\rm sp}(u)$ because it's, by definition, in
+$\mathop{\rm null}(A-0I),$ and the column space is $\mathop{\rm
+sp}(v,w),$ because an eigenvalue's multiple must be a valid output of
+the matrix.
+The left null space is also $\mathop{\rm sp}(u)$ by
+orthogonality to the column space, and the row space is $\mathop{\rm
+sp}(v,w)$ by orthogonality to the null space.
+
+{\it (c)}
+
+No. $x+u$ also satisfies. $A(x+u) = Ax + Au = v + w + 0.$
+
+{\it (d)}
+
+If $b\in\mathop{\rm sp}(v,w),$ this equation has a solution, by
+definition of the column space.
+
+{\it (e)}
+
+$S^TS = I,$ because all the vectors are orthogonal, and normal/unit, so
+the dot product with eachother is zero and the dot product with
+themselves is one. Thus, $S^{-1} = S^T.$
+
+$$S^{-1}AS = \Lambda,$$
+where $\Lambda$ is the diagonal matrix of eigenvalues
+$$\bmatrix{0&0&0\cr 0&1&0\cr 0&0&2}.$$
+
+\noindent{\bf 14.}
+
+$A$ is orthogonal (all columns have norm one and are orthogonal to each
+other), permutation (all rows and columns have exactly one non-zero
+entry which is a one) and therefore invertible, diagonalizable (on the
+complex numbers), and Markov.
+By row reducing $A-\lambda I,$ we get
+$$\bmatrix{-\lambda&1&0&0\cr
+ 0&-\lambda&1&0\cr
+ 0&0&-\lambda&1\cr
+ 0&0&0&-\lambda+\lambda^{-3}},$$
+which has determinant equal to product of the diagonal, or
+characteristic polynomial
+$$\lambda^4 - 1 = 0,$$
+giving $A$ eigenvalues of the fourth roots of unity, $\{1,-1,i,-i\}.$
+
+$B$ is a projection (because $B^2 = B,$ and symmetric), Hermitian (by
+symmetric), rank-1 (because all columns are identical), diagonalizable
+(by symmetric), and Markov (all columns sum to one).
+Rank-1/singular excludes orthogonal and invertible.
+$B$ has eigenvalues $0$ and $1.$
+
+ {\bf Section 5.6}
+
+\noindent{\bf 30.}
+
+$M$ is the matrix which transforms the eigenvectors of $A$ to the
+corresponding eigenvectors (based on eigenvalues) of $B.$
+
+{\it (a)}
+$$M = M^{-1} = \bmatrix{0&1\cr1&0}.$$
+$$B = M^{-1}AM =
+\bmatrix{0&1\cr1&0}\bmatrix{0&1\cr0&1}\bmatrix{0&1\cr1&0} =
+\bmatrix{1&0\cr1&0}\bmatrix{0&1\cr1&0} = \bmatrix{0&1\cr0&1}.$$
+
+{\it (b)}
+
+$$M = M^{-1} = \bmatrix{1&0\cr0&-1}.$$
+$$B = M^{-1}AM =
+\bmatrix{1&0\cr0&-1}\bmatrix{1&1\cr1&1}\bmatrix{1&0\cr0&-1} =
+\bmatrix{1&0\cr0&-1}\bmatrix{1&-1\cr1&-1} =
+\bmatrix{1&-1\cr-1&1}.$$
+
+{\it (c)}
+
+$$A = \bmatrix{1&2\cr3&4}$$
+has eigenvalues $\lambda^2 - 5l - 2 = 0 \to \lambda = 5/2\pm
+\sqrt{33}/2,$
+corresponding to eigenvectors $(\fr16(-3+\sqrt33), 1)^T$ and
+$(\fr16(-3-\sqrt33),1)^T$ in decreasing order.
+$$B = \bmatrix{4&3\cr2&1},$$
+with the same eigenvalues but corresponding eigenvectors (also in
+decreasing order of eigenvalue)
+$(\fr14(3+\sqrt33),1)$ and
+$(\fr14(3-\sqrt33),1).$
+
+This corresponds to
+$$M = \bmatrix{3/2&3/2\cr0&1}.$$
+For this $M,$ $B = M^{-1}AM.$
+
+\noindent{\bf 32.}
+
+There is the zero family (with one matrix), the family with all ones
+(with one matrix), the permutation matrix $\bmatrix{0&1\cr1&0}$ (with
+a family of one), and the identity matrix (with one in its family).
+
+Then, there are the families with two members: those with one nonzero
+entry on the antidiagonal, like
+$$\bmatrix{0&1\cr0&0},$$
+those with a full diagonal and one entry on the antidiagonal, like
+$$\bmatrix{1&1\cr0&1},$$
+and those with a full antidiagonal and one entry on the diagonal, like
+$$\bmatrix{1&1\cr1&0}.$$
+
+Then, there is the largest family, with six members, consisting of one
+entry on the diagonal and zero or one entry on the antidiagonal, like
+$$\bmatrix{1&1\cr0&0}.$$
+
+I found these distinct families with the number of possible
+characteristic polynomials, based on trace having a value $0,1,2$ and
+determinant having a value $-1,0,1,$ and then splitting up repeated
+eigenvalue matrices based on the number of distinct eigenvalues.
+
+\bye
diff --git a/li/matlab_hw.py b/li/matlab_hw.py
index 3473934..cf20709 100644
--- a/li/matlab_hw.py
+++ b/li/matlab_hw.py
@@ -1,12 +1,22 @@
import numpy as np
+from time import perf_counter
import scipy.linalg
+import pandas as pd
+import sympy
+
+timer_problems = True
+regression_problems = True
+np.set_printoptions(precision=3, suppress=True)
+# if you want to run the regression problems, you need this dataset:
+# `insurance.csv` in the current directory from
+# https://www.kaggle.com/mirichoi0218/insurance/version/1
-print("Section 1.3: #30")
+print("\nSection 1.3: #30")
coeff = (
- [[ 1, 1, 1 ],
- [ 1, 2, 2 ],
- [ 2, 3, -4]]
+ ( 1, 1, 1 ),
+ ( 1, 2, 2 ),
+ ( 2, 3, -4)
)
print(
np.linalg.solve(
@@ -20,7 +30,7 @@ np.linalg.solve(
[7, 10, 3])
)
-print("Section 1.3: #32")
+print("\nSection 1.3: #32")
dimension = 3
tot = [0]*3
@@ -31,7 +41,7 @@ for x in range(ct):
tot[i] += U[i][i]
print([x/ct for x in tot]);
-print("Section 1.4: #21")
+print("\nSection 1.4: #21")
A = [[.5, .5], [.5, .5]]
B = [[1, 0], [0, -1]]
@@ -52,20 +62,66 @@ print("C^3")
print(np.linalg.matrix_power(C, 3))
print("C^k = 0")
-print("Section 1.4: #59")
+print("\nSection 1.4: #59")
+
+v = [3,4,5]
+A = np.identity(3)
+print(f"A * v = {np.matmul(A, v)}")
+print(f"v' * v = {np.dot(v,v)}")
+print(f"v * A = {np.matmul(v, A)}")
+
+print("\nSection 1.4: #60")
+
+print(f"Av = {np.matmul(np.ones((4,4)), np.ones((4,1)))}")
+print(f"Bw = {np.matmul(np.identity(4)+np.ones((4,4)), np.zeros((4,1))+2*np.ones((4,1)))}")
+
+print("\nSection 1.6: #12")
+
+tridiag_mask = np.identity(11);
+i,j = np.indices(tridiag_mask.shape)
+tridiag_mask[i==j+1] = 1
+tridiag_mask[i==j-1] = 1
+tridiag_preserved = True
+triangle_preserved = True
+sym_preserved = True
+
+for x in range(1000):
+ a = np.random.rand(11,11)
+ inv = np.linalg.inv(a*tridiag_mask)
+ if (inv != inv*tridiag_mask).any():
+ tridiag_preserved = False
+
+ triu = np.triu(a)
+ inv = np.linalg.inv(triu)
+ if (inv != np.triu(inv)).any() and (inv != np.tril(inv)).any():
+ triangle_preserved = False
+
+ sym = np.transpose(triu) + triu - triu*np.identity(11)
+ if (sym != np.transpose(sym)).any():
+ sym_preserved = False
-print("A * v = [3, 4, 5]")
-print("v' * v = 50")
-print(f"v * A = {np.matmul([3,4,5], np.identity(3))}")
+if triangle_preserved:
+ print("Triangular matrices' inverses are triangular.")
+else:
+ print("Triangular matrices' inverses are not triangular.")
-print("Section 1.4: #60")
+if sym_preserved:
+ print("Symetric matrices' inverses are symmetric.")
+else:
+ print("Symmetric matrices' inverses are not symmertic.")
-print("Av = [4. 4. 4. 4.]")
-print("Bw = [10. 10. 10. 10.]")
+if tridiag_preserved:
+ print("Tridiagonal matrices' inverses are tridiagonal.")
+else:
+ print("Tridiagonal matrices' inverses are not tridiagonal.")
-print("Section 1.6: #12")
+print("Entries being integers isn't preserved. This is trivially shown"
+" the 1x1 [ 2. ] matrix's inverse [ 1/2. ]")
+print("Entries being fractions (rationals) is preserved because the"
+" matrix augmented with an identity and then reduced only requires"
+" rational multiplications.")
-print("Section 1.6: #32")
+print("\nSection 1.6: #32")
print("the inverse of the first matrix is")
print(np.linalg.inv(5*np.identity(4) - np.ones((4,4))))
@@ -75,9 +131,9 @@ print(np.matmul(np.ones((4,4)),np.ones((4,4))))
print("so we can solve this like the following equation: (a*eye +\n\
b*ones)(c*eye + d*ones) = ac*eye + (ad+bc+dimension*bd)*ones = eye")
print("c = 1/a. d = -b/a/(a+dimension*b), giving for the next matrix")
-print("a = 1/6, d = 6/(1/6+5*-1)") # WRONG
+print("a = 1/6, d = 6/(1/6+5*-1)") # WRONG (TODO?)
-print("Section 1.6: #47")
+print("\nSection 1.6: #47")
print("I'm not sure. This is Python. It says (in either case):")
print("numpy.linalg.LinAlgError: Singular matrix")
@@ -86,54 +142,49 @@ print(np.linalg.solve(np.ones((4, 4)), np.random.rand(4, 1)))
print(np.linalg.solve(np.ones((4, 4)), np.ones((4, 1))))
'''
-''' #68
-dimension = 500
-northwest = np.random.rand(dimension, dimension)
-for x in range(0, dimension):
- for y in range(x+1, dimension):
- northwest[y][x] = 0
-print(northwest)
-'''
-print("Section 1.6: #69")
-
-
-from time import perf_counter
-matrix = np.random.rand(500, 500)
-start = perf_counter()
-np.linalg.inv(matrix)
-first_time = perf_counter() - start
-print("500x500 inverse:", str(first_time) + "s")
-matrix = np.random.rand(1000, 1000)
-start = perf_counter()
-np.linalg.inv(matrix)
-second_time = perf_counter() - start
-print("1000x1000 inverse:", str(second_time) + "s")
-print("factor:", second_time / first_time)
-
-print("Section 1.6: #70")
-
-dimension = 1000
-I = np.identity(dimension)
-A = np.random.rand(dimension, dimension)
-U = np.random.rand(dimension, dimension)
-for x in range(0, dimension):
- for y in range(x+1, dimension):
- U[y][x] = 0
-
-start = perf_counter()
-np.linalg.inv(U)
-print("inverse of U:", str(perf_counter() - start) + "s")
-start = perf_counter()
-np.linalg.solve(U, I)
-print("U\I:", str(perf_counter() - start) + "s")
-start = perf_counter()
-np.linalg.inv(A)
-print("inverse of A:", str(perf_counter() - start) + "s")
-start = perf_counter()
-np.linalg.solve(A, I)
-print("A\I:", str(perf_counter() - start) + "s")
-
-print("Section 1.6: #71")
+if timer_problems:
+ print("\nSection 1.6: #69")
+
+ matrix = np.random.rand(500, 500)
+ start = perf_counter()
+ np.linalg.inv(matrix)
+ first_time = perf_counter() - start
+ print("500x500 inverse:", str(first_time) + "s")
+ matrix = np.random.rand(1000, 1000)
+ start = perf_counter()
+ np.linalg.inv(matrix)
+ second_time = perf_counter() - start
+ print("1000x1000 inverse:", str(second_time) + "s")
+ print("factor:", second_time / first_time)
+ print("This matches theory. We also expect that almost every random"
+ " matrix is invertible")
+
+ print("\nSection 1.6: #70")
+
+ dimension = 1000
+ I = np.identity(dimension)
+ A = np.random.rand(dimension, dimension)
+ U = np.random.rand(dimension, dimension)
+ for x in range(0, dimension):
+ for y in range(x+1, dimension):
+ U[y][x] = 0
+
+ start = perf_counter()
+ np.linalg.inv(U)
+ print("inverse of U:", str(perf_counter() - start) + "s")
+ start = perf_counter()
+ np.linalg.solve(U, I)
+ print("U\I:", str(perf_counter() - start) + "s")
+ start = perf_counter()
+ np.linalg.inv(A)
+ print("inverse of A:", str(perf_counter() - start) + "s")
+ start = perf_counter()
+ np.linalg.solve(A, I)
+ print("A\I:", str(perf_counter() - start) + "s")
+
+ print("There is no significant performance deviation in Python.")
+
+print("\nSection 1.6: #71")
def proof(dim):
L = np.identity(dim) - np.diag([x/(1+x) for x in range(1,dim)], -1)
@@ -141,19 +192,17 @@ def proof(dim):
Linv = np.linalg.inv(L)
print("L^{-1}:")
print(Linv)
- right = True
for j in range(1,dim+1):
for i in range(1,j+1):
if not np.isclose(Linv[j-1][i-1], i/j):
print("the assertion in the book is wrong at",i,j)
- right = False
- if right:
- print("The book is right")
+ return
+ print("The book is right")
proof(4)
proof(5)
-print("Section 2.2: #33")
+print("\nSection 2.2: #33")
print("For the first matrix:")
A = [[1, 3, 3], [2, 6, 9], [-1, -3, 3]]
@@ -170,17 +219,36 @@ print(scipy.linalg.null_space(A))
print("particular solution:")
print(np.linalg.lstsq(A, b, rcond=None)[0])
-print("Section 2.2: #35")
-print("TODO")
+print("\nSection 2.2: #35")
+# b is, trivially, in the column space.
+
+def restrictions(A):
+ p,l,u = scipy.linalg.lu(A)
+ # PLU = A \to col(A) = col(PL) because U is full rank
+ # PLx = b
+ order = np.arange(len(A)) # [0,1,2,3]
+ for col in reversed(np.transpose(p@l)):
+ print(' + '.join(
+ str(col[int(pos)]) + '*x_' + str(int(pos))
+ for pos in order
+ if not np.isclose(col[int(pos)], 0)
+ )
+ + ' = 0.')
+
print("For the first system:")
A = [[1, 2], [2, 4], [2, 5], [3, 9]]
+restrictions(A)
-print("Section 2.2: #36")
+print("For the second system:")
+A = [[1,2,3], [2,4,6], [2,5,7], [3,9,12]]
+restrictions(A)
+
+print("\nSection 2.2: #36")
print("(a)")
A = np.array([[1, 2, 1], [2, 6, 3], [0, 2, 5]])
print("Basis of the column space:")
-print(scipy.linalg.orth(A))
+print(np.linalg.matrix_rank(A))
print("Multiplying the rows by this gives zero (for this matrix, \
equivalent to [0 0 0]^T)")
print(scipy.linalg.null_space(A.transpose()))
@@ -188,12 +256,12 @@ print(scipy.linalg.null_space(A.transpose()))
print("(b)")
A = np.array([[1, 1, 1], [1, 2, 4], [2, 4, 8]])
print("Basis of the column space:")
-print(scipy.linalg.orth(A))
+print(scipy.linalg.orth(A)) # ugh this ain't right
print("Multiplying the rows by this gives zero (for this matrix, \
equivalent to [0 2 -1]^T)")
print(scipy.linalg.null_space(A.transpose()))
-print("Section 2.3: #2")
+print("\nSection 2.3: #2")
print("Largest possible number of independent vectors is the dimension \
of the space spanned by these vectors, or the number of vectors in the \
@@ -202,9 +270,9 @@ v = [[ 1, 1, 1, 0, 0, 0],
[-1, 0, 0, 1, 1, 0],
[ 0, -1, 0, -1, 0, 1],
[ 0, 0, -1, 0, -1, -1]]
-print(len(scipy.linalg.orth(v)))
+print(np.linalg.matrix_rank(v))
-print("Section 2.3: #5")
+print("\nSection 2.3: #5")
print("(a)")
@@ -222,17 +290,277 @@ v = [[ 1, 2, -3],
if len(scipy.linalg.orth(v)) == 3: print("they are independent")
else: print("they are not independent")
-print("Section 2.3: #13")
-# probably out of spirit of the matlab hw
-print("This one can be done easily without a calculator because the \
- echelon matrix has already been computed.")
-print("The dimensions of all of these is 2, and the row spaces are the \
- same.")
-
-print("Section 2.3: #16")
-print("TODO")
-
-print("Section 2.3: #18")
-print("TODO")
-
-print("Section 2.3: #24")
+print("\nSection 2.3: #13")
+print("U is the echelon martix of A, and for any matrix and its echelon"
+" form, the dimensions of these spacees are the same, and the row"
+" spaces are equal.")
+
+A = [[1,1,0], [1,3,1], [3,1,-1]]
+U = [[1,1,0], [0,2,1], [0,0,0]]
+print(f"(a) dim col A = {np.linalg.matrix_rank(A)}")
+print(f"(b) dim row A = {np.linalg.matrix_rank(A)}")
+print(f"(c) dim col U = {np.linalg.matrix_rank(U)}")
+print(f"(d) dim row U = {np.linalg.matrix_rank(U)}")
+stack = np.vstack((A, U))
+print("Since the row spaces are the same, the dimension of the space"
+" both their rows span should be 2, like the dimension of each matrix's"
+f" own row space. The dimension is {np.linalg.matrix_rank(stack)}.")
+
+print("\nSection 2.3: #16")
+v = np.transpose([[1,1,0,0], [1,0,1,0], [0,0,1,1], [0,1,0,1]])
+rank = np.linalg.matrix_rank(v)
+if rank == v.shape[0] and rank == v.shape[1]:
+ print("The matrix is full rank, so the rows are linearly "
+ "independent.")
+# Finding the generic solution to Ax = 0 on a computer can also be
+# written as scipy.linalg.null_space
+if len(scipy.linalg.null_space(v)) == 0:
+ print("The matrix only has the trivial solution, so its columns are"
+ " linearly independent, by definition.")
+
+print("\nSection 2.3: #18")
+w = np.transpose([[1,1,0], [2,2,1], [0,0,2]])
+b = [3,4,5]
+lstsq = np.linalg.lstsq(w, b, rcond=None)[0]
+print("We get least squares solution:")
+print(lstsq)
+print("Multiplying back out:")
+mul = np.matmul(w, lstsq)
+print(mul)
+if np.allclose(mul, b):
+ print("which is b, so there is a solution.")
+else:
+ print("which isn't b, so there isn't a solution.")
+
+w = [[1,2,0], [2,5,0], [0,0,2], [0,0,0]]
+rank = np.linalg.matrix_rank(w)
+if rank == 3:
+ print("The vectors span R^3, so a solution exists for all b.")
+else:
+ print("The vectors don't span R^3, so some b don't have a solution")
+
+print("\nSection 2.3: #24")
+v = [1,-2,3] # vT x = 0
+
+print("Basis for the solution set:")
+print(scipy.linalg.null_space([v]))
+
+print("Intersection with the xy plane:") # xy plane -> z = 0
+u = [0,0,1] # uT x = 0
+print(scipy.linalg.null_space([v,u]))
+
+print(f"Basis of V-perp: {v}")
+
+if regression_problems:
+ df = pd.read_csv('insurance.csv')
+ df['smoker'] = df['smoker'].replace({'yes': 1, 'no': 0})
+ df['sex'] = df['sex'].replace({'female': 1, 'male': 0})
+ regions = df['region'].unique()
+ for region in df['region'].unique():
+ df[region] = df.region.apply(lambda r: r == region)
+ del df['region']
+
+ vals = df.to_numpy()
+ # Index(['age', 'sex', 'bmi', 'children', 'smoker', 'charges', 'southwest', 'southeast', 'northwest', 'northeast'],
+ A = [(1, row[0], 1.1**row[0], row[1], row[2], row[3], row[4],
+ row[6], row[7], row[8], row[9]) for row in vals]
+ # exponential and linear model of age, and linear models of
+ # everything else
+ b = [row[5] for row in vals]
+ x, e, rank, s = np.linalg.lstsq(A, b, rcond=None)
+ print(f"With error {rank},\n"
+ f"charges = {x[0]} + {x[1]}*age + {x[2]}*1.1^age + {x[3]}*female + "
+ f"{x[4]}*bmi + {x[5]}*children + {x[6]}*smoker + "
+ f"{x[7]}*southwest + {x[8]}*southeast + {x[9]}*northwest + "
+ f"{x[10]}*northeast")
+
+ print("Expected charges for a 45-year-old woman in the northeast "
+ "who doesn't smoke and has no children at BMI of 27 is "
+ f"{x[0] + x[1]*45 + x[2]*1.1**45 + x[3] + x[4]*27 + x[10]}")
+
+
+print("\nSection 3.3: #32")
+b = (0,8,8,20)
+t = np.array((0,1,3,4))
+# mostly given by np linalg lstsq
+A = np.transpose((t, np.ones(t.shape)))
+x, e, rank, s = np.linalg.lstsq(A, b, rcond=None)
+hatb = np.matmul(A,x)
+print(f"b = {x[0]}t + {x[1]}")
+print(f"Approximations {hatb} have error {hatb-b} with min. error "
+ f"E^2 = {e}")
+
+print("\nSection 3.3: #33")
+p = (1,5,13,17)
+for arow, brow in zip(A, b):
+ print(f"{arow[0]}t + {arow[1]}t = {brow}")
+x, e, rank, s = np.linalg.lstsq(A, p, rcond=None)
+print(f"We get exact solution p = {x[0]}t + {x[1]} with error {e}.")
+
+print("\nSection 3.3: #34")
+e = np.array(b) - np.array(p)
+print(f"A^Te = {np.matmul(np.transpose(A), e)}, so e "
+ "is orthogonal to every column of A.")
+print(f"The error ||e|| is {np.sqrt(np.dot(e,e))}.")
+
+print("\nSection 3.3: #35")
+def buildcoeff(coeff):
+ return ' + '.join(f"{c}*x_{i}" for i,c in enumerate(coeff))
+
+A = np.transpose((t*t, t, np.ones(t.shape)))
+eq = np.matmul( np.transpose(A), b )
+for coeff, of in zip(np.matmul(np.transpose(A), A), eq):
+ print(f"{buildcoeff(coeff)} = {of}")
+
+print("\nSection 3.3: #36")
+A = np.transpose((t*t*t, t*t, t, np.ones(t.shape)))
+x, e, rank, s = np.linalg.lstsq(A, b, rcond=None)
+p = np.matmul(A,x)
+print(f"p = b = {p}")
+print(f"e = p-b = 0 = {p-b}")
+
+print("\nSection 3.3: #37")
+st = sum(t)/len(t)
+sb = sum(b)/len(b)
+A = np.transpose((t, np.ones(t.shape)))
+x, e, rank, s = np.linalg.lstsq(A, b, rcond=None)
+print(f"hat b = {sb} = {np.dot((st, 1), x)} = C(hat t) + D")
+
+print("This is true because the first equation in A^TAp = A^Tb is "
+"(1,1,1,1)Ap = (1,1,1,1)b, so (1,1,1,1)Ap = (1,1,1,1) (Ct + D) = C(hat "
+"t) + D")
+
+print("\nSection 5.2: #29")
+A = ((.6, .4), (.4, .6))
+Aw,Av = np.linalg.eig(A)
+if all(l < 1 for l in Aw):
+ print("A^k converges.")
+else:
+ print("B^k doesn't converge to zero")
+B = ((.6, .9), (.1, .6))
+Bw,Bv = np.linalg.eig(B)
+if all(l < 1 for l in Bw):
+ print("B^k converges.")
+else:
+ print("B^k doesn't converge to zero.")
+
+print("\nSection 5.2: #30")
+if any(l > 1 for l in Aw):
+ print("A^k diverges")
+else:
+ l = [0 if l < 1 else 1 for l in Aw] #Lambda has diagonal = l, 0 else
+ print(f"A^k converges to\n{np.matmul( np.matmul(Av, np.diag(l)), np.linalg.inv(Av) )}")
+
+print("\nSection 5.2: #31")
+for u in ((3, 1), (3, -1), (6,0)):
+ L = np.diag(Aw**10)
+ lim = np.matmul(np.matmul( np.matmul(Av, L), np.linalg.inv(Av) ), u)
+ print(f"Limit of u0 = {u}: {lim}")
+
+print("\nSection 5.2: #45")
+A = np.array( ((.6, .2),(.4,.8)) )
+Alim = np.array( ((1/3, 1/3),(2/3, 2/3)) )
+
+Aw,Av = np.linalg.eig(A)
+Alimw,Alimv = np.linalg.eig(Alim)
+
+print(f"These matrices have the same eigenvectors\n{Aw}\nand\n{Alimw},")
+print("The limit of eigenvalues of A^k is A^\infty, so A^100 is close "
+ "(exponents quickly approach their asymptotes).\n"
+ f"Eigenvalues of A are {Av}, and of A^\infty are {Alimv}")
+
+print("Section 5.3: #3")
+f = np.array(((1,1), (1,0)))
+k = f
+for i in range(1,5):
+ print(f"F^{i} =\n{k}")
+ k = np.matmul(k, f)
+
+n = 20
+fn = round(((1+np.sqrt(5))/2)**n/np.sqrt(5))
+print(fn)
+
+print("\nSection 5.3: #7")
+
+l = (2, 1)
+for x in range(9):
+ l = (l[1], l[0] + l[1])
+print(f"Exactly, {l[1]}")
+
+l1 = (1+np.sqrt(5))/2
+print(f"Approximately, L_{10} = {round(l1**10)}")
+
+print("Section 5.3: #16")
+A = np.array( ((0, -1), (1, 0)) )
+stdeig = np.linalg.eig(A)
+print(stdeig)
+I = np.eye(A.shape[0])
+est = ( I+A, np.linalg.inv(I-A), np.matmul( np.linalg.inv(I - A/2),
+ I+A/2 ) )
+names = ( "Forward", "Backward", "Centered" )
+for f,name in zip(est, names):
+ w,v = np.linalg.eig(f)
+ print(f"{name} approximation has eigenvalues {w}.")
+ if all(np.isclose(abs(e), 1) for e in w):
+ print(f"{name} stays on the circle because all the eigenvalues "
+ "have magnitude 1.")
+
+print("\nSection 5.3: #29")
+
+B = ((3, 2), (-5, -3))
+C = ((5, 7), (-3, -4))
+
+w,v = np.linalg.eig(B)
+print(f"B ({B}) has eigenvalues\n{w}, and B^4 has eigenvalues\n{w**4} ="
+ " (1, 1), meaning that it is the identity.")
+w,v = np.linalg.eig(C)
+print(f"C ({C}) has eigenvalues\n{w}, and C^3 has eigenvalues\n{w**3} ="
+ " (-1, -1), meaning that it is the skew identity -I.")
+
+print("\nSection 5.4: #28")
+for x in range(1000):
+# my'' + by' + ky = 0
+# u = (y,y')^T
+# u' = (y',y'')^T
+# my'' = - (by' + ky)
+# y' = y
+ m,b,k = (np.random.rand(), np.random.rand(), np.random.rand())
+ A = (( 0, 1),
+ (-b,-k))
+ M = (( 1, 0),
+ ( 0, m))
+ # Mu' = Au => u' = Bu
+ B = np.matmul( np.linalg.inv(M), A )
+ w,v = np.linalg.eig(B)
+
+print("\nChecking that roots of characteristic polynomial = eigenvalues")
+lamda = sympy.symbols('lamda')
+for x in range(50):
+ npmat = np.random.rand(10, 10)
+ npmat += np.transpose(npmat) # make it symmetric for ease of proof
+ mat = sympy.Matrix(npmat)
+
+ poly = mat.charpoly(lamda)
+
+ w,v = np.linalg.eig(npmat)
+ roots = sorted(np.array(poly.nroots(), dtype='float64'))
+ w = sorted(w)
+ if not np.allclose(w, roots, atol=.001):
+ print("This property doesn't hold for all matrices")
+
+# Power method for eigenvalues and characteristic polynomials (see doc)
+
+print("\nPower Method for estimating the dominant eigen{vector,value}")
+matrix = np.random.rand(20, 20)
+w,v = np.linalg.eig(matrix)
+truedominant = max(*w)
+
+# a random vector will almost never be in the span of a subset of
+# eigenvectors, so we will use np.random.rand
+vec = np.random.rand(20)
+for x in range(50): # 50 iterations of the power method
+ vec = vec / np.linalg.norm(vec)
+ prev = vec
+ vec = matrix @ vec
+ val = np.linalg.norm(vec) / np.linalg.norm(prev)
+print(f"power method gives a dominant value {val}, compared to true {truedominant}")
--
cgit