aboutsummaryrefslogtreecommitdiff
path: root/li/matlab_hw.py
diff options
context:
space:
mode:
Diffstat (limited to 'li/matlab_hw.py')
-rw-r--r--li/matlab_hw.py512
1 files changed, 420 insertions, 92 deletions
diff --git a/li/matlab_hw.py b/li/matlab_hw.py
index 3473934..cf20709 100644
--- a/li/matlab_hw.py
+++ b/li/matlab_hw.py
@@ -1,12 +1,22 @@
import numpy as np
+from time import perf_counter
import scipy.linalg
+import pandas as pd
+import sympy
+
+timer_problems = True
+regression_problems = True
+np.set_printoptions(precision=3, suppress=True)
+# if you want to run the regression problems, you need this dataset:
+# `insurance.csv` in the current directory from
+# https://www.kaggle.com/mirichoi0218/insurance/version/1
-print("Section 1.3: #30")
+print("\nSection 1.3: #30")
coeff = (
- [[ 1, 1, 1 ],
- [ 1, 2, 2 ],
- [ 2, 3, -4]]
+ ( 1, 1, 1 ),
+ ( 1, 2, 2 ),
+ ( 2, 3, -4)
)
print(
np.linalg.solve(
@@ -20,7 +30,7 @@ np.linalg.solve(
[7, 10, 3])
)
-print("Section 1.3: #32")
+print("\nSection 1.3: #32")
dimension = 3
tot = [0]*3
@@ -31,7 +41,7 @@ for x in range(ct):
tot[i] += U[i][i]
print([x/ct for x in tot]);
-print("Section 1.4: #21")
+print("\nSection 1.4: #21")
A = [[.5, .5], [.5, .5]]
B = [[1, 0], [0, -1]]
@@ -52,20 +62,66 @@ print("C^3")
print(np.linalg.matrix_power(C, 3))
print("C^k = 0")
-print("Section 1.4: #59")
+print("\nSection 1.4: #59")
+
+v = [3,4,5]
+A = np.identity(3)
+print(f"A * v = {np.matmul(A, v)}")
+print(f"v' * v = {np.dot(v,v)}")
+print(f"v * A = {np.matmul(v, A)}")
+
+print("\nSection 1.4: #60")
+
+print(f"Av = {np.matmul(np.ones((4,4)), np.ones((4,1)))}")
+print(f"Bw = {np.matmul(np.identity(4)+np.ones((4,4)), np.zeros((4,1))+2*np.ones((4,1)))}")
+
+print("\nSection 1.6: #12")
+
+tridiag_mask = np.identity(11);
+i,j = np.indices(tridiag_mask.shape)
+tridiag_mask[i==j+1] = 1
+tridiag_mask[i==j-1] = 1
+tridiag_preserved = True
+triangle_preserved = True
+sym_preserved = True
+
+for x in range(1000):
+ a = np.random.rand(11,11)
+ inv = np.linalg.inv(a*tridiag_mask)
+ if (inv != inv*tridiag_mask).any():
+ tridiag_preserved = False
+
+ triu = np.triu(a)
+ inv = np.linalg.inv(triu)
+ if (inv != np.triu(inv)).any() and (inv != np.tril(inv)).any():
+ triangle_preserved = False
+
+ sym = np.transpose(triu) + triu - triu*np.identity(11)
+ if (sym != np.transpose(sym)).any():
+ sym_preserved = False
-print("A * v = [3, 4, 5]")
-print("v' * v = 50")
-print(f"v * A = {np.matmul([3,4,5], np.identity(3))}")
+if triangle_preserved:
+ print("Triangular matrices' inverses are triangular.")
+else:
+ print("Triangular matrices' inverses are not triangular.")
-print("Section 1.4: #60")
+if sym_preserved:
+ print("Symetric matrices' inverses are symmetric.")
+else:
+ print("Symmetric matrices' inverses are not symmertic.")
-print("Av = [4. 4. 4. 4.]")
-print("Bw = [10. 10. 10. 10.]")
+if tridiag_preserved:
+ print("Tridiagonal matrices' inverses are tridiagonal.")
+else:
+ print("Tridiagonal matrices' inverses are not tridiagonal.")
-print("Section 1.6: #12")
+print("Entries being integers isn't preserved. This is trivially shown"
+" the 1x1 [ 2. ] matrix's inverse [ 1/2. ]")
+print("Entries being fractions (rationals) is preserved because the"
+" matrix augmented with an identity and then reduced only requires"
+" rational multiplications.")
-print("Section 1.6: #32")
+print("\nSection 1.6: #32")
print("the inverse of the first matrix is")
print(np.linalg.inv(5*np.identity(4) - np.ones((4,4))))
@@ -75,9 +131,9 @@ print(np.matmul(np.ones((4,4)),np.ones((4,4))))
print("so we can solve this like the following equation: (a*eye +\n\
b*ones)(c*eye + d*ones) = ac*eye + (ad+bc+dimension*bd)*ones = eye")
print("c = 1/a. d = -b/a/(a+dimension*b), giving for the next matrix")
-print("a = 1/6, d = 6/(1/6+5*-1)") # WRONG
+print("a = 1/6, d = 6/(1/6+5*-1)") # WRONG (TODO?)
-print("Section 1.6: #47")
+print("\nSection 1.6: #47")
print("I'm not sure. This is Python. It says (in either case):")
print("numpy.linalg.LinAlgError: Singular matrix")
@@ -86,54 +142,49 @@ print(np.linalg.solve(np.ones((4, 4)), np.random.rand(4, 1)))
print(np.linalg.solve(np.ones((4, 4)), np.ones((4, 1))))
'''
-''' #68
-dimension = 500
-northwest = np.random.rand(dimension, dimension)
-for x in range(0, dimension):
- for y in range(x+1, dimension):
- northwest[y][x] = 0
-print(northwest)
-'''
-print("Section 1.6: #69")
-
-
-from time import perf_counter
-matrix = np.random.rand(500, 500)
-start = perf_counter()
-np.linalg.inv(matrix)
-first_time = perf_counter() - start
-print("500x500 inverse:", str(first_time) + "s")
-matrix = np.random.rand(1000, 1000)
-start = perf_counter()
-np.linalg.inv(matrix)
-second_time = perf_counter() - start
-print("1000x1000 inverse:", str(second_time) + "s")
-print("factor:", second_time / first_time)
-
-print("Section 1.6: #70")
-
-dimension = 1000
-I = np.identity(dimension)
-A = np.random.rand(dimension, dimension)
-U = np.random.rand(dimension, dimension)
-for x in range(0, dimension):
- for y in range(x+1, dimension):
- U[y][x] = 0
-
-start = perf_counter()
-np.linalg.inv(U)
-print("inverse of U:", str(perf_counter() - start) + "s")
-start = perf_counter()
-np.linalg.solve(U, I)
-print("U\I:", str(perf_counter() - start) + "s")
-start = perf_counter()
-np.linalg.inv(A)
-print("inverse of A:", str(perf_counter() - start) + "s")
-start = perf_counter()
-np.linalg.solve(A, I)
-print("A\I:", str(perf_counter() - start) + "s")
-
-print("Section 1.6: #71")
+if timer_problems:
+ print("\nSection 1.6: #69")
+
+ matrix = np.random.rand(500, 500)
+ start = perf_counter()
+ np.linalg.inv(matrix)
+ first_time = perf_counter() - start
+ print("500x500 inverse:", str(first_time) + "s")
+ matrix = np.random.rand(1000, 1000)
+ start = perf_counter()
+ np.linalg.inv(matrix)
+ second_time = perf_counter() - start
+ print("1000x1000 inverse:", str(second_time) + "s")
+ print("factor:", second_time / first_time)
+ print("This matches theory. We also expect that almost every random"
+ " matrix is invertible")
+
+ print("\nSection 1.6: #70")
+
+ dimension = 1000
+ I = np.identity(dimension)
+ A = np.random.rand(dimension, dimension)
+ U = np.random.rand(dimension, dimension)
+ for x in range(0, dimension):
+ for y in range(x+1, dimension):
+ U[y][x] = 0
+
+ start = perf_counter()
+ np.linalg.inv(U)
+ print("inverse of U:", str(perf_counter() - start) + "s")
+ start = perf_counter()
+ np.linalg.solve(U, I)
+ print("U\I:", str(perf_counter() - start) + "s")
+ start = perf_counter()
+ np.linalg.inv(A)
+ print("inverse of A:", str(perf_counter() - start) + "s")
+ start = perf_counter()
+ np.linalg.solve(A, I)
+ print("A\I:", str(perf_counter() - start) + "s")
+
+ print("There is no significant performance deviation in Python.")
+
+print("\nSection 1.6: #71")
def proof(dim):
L = np.identity(dim) - np.diag([x/(1+x) for x in range(1,dim)], -1)
@@ -141,19 +192,17 @@ def proof(dim):
Linv = np.linalg.inv(L)
print("L^{-1}:")
print(Linv)
- right = True
for j in range(1,dim+1):
for i in range(1,j+1):
if not np.isclose(Linv[j-1][i-1], i/j):
print("the assertion in the book is wrong at",i,j)
- right = False
- if right:
- print("The book is right")
+ return
+ print("The book is right")
proof(4)
proof(5)
-print("Section 2.2: #33")
+print("\nSection 2.2: #33")
print("For the first matrix:")
A = [[1, 3, 3], [2, 6, 9], [-1, -3, 3]]
@@ -170,17 +219,36 @@ print(scipy.linalg.null_space(A))
print("particular solution:")
print(np.linalg.lstsq(A, b, rcond=None)[0])
-print("Section 2.2: #35")
-print("TODO")
+print("\nSection 2.2: #35")
+# b is, trivially, in the column space.
+
+def restrictions(A):
+ p,l,u = scipy.linalg.lu(A)
+ # PLU = A \to col(A) = col(PL) because U is full rank
+ # PLx = b
+ order = np.arange(len(A)) # [0,1,2,3]
+ for col in reversed(np.transpose(p@l)):
+ print(' + '.join(
+ str(col[int(pos)]) + '*x_' + str(int(pos))
+ for pos in order
+ if not np.isclose(col[int(pos)], 0)
+ )
+ + ' = 0.')
+
print("For the first system:")
A = [[1, 2], [2, 4], [2, 5], [3, 9]]
+restrictions(A)
-print("Section 2.2: #36")
+print("For the second system:")
+A = [[1,2,3], [2,4,6], [2,5,7], [3,9,12]]
+restrictions(A)
+
+print("\nSection 2.2: #36")
print("(a)")
A = np.array([[1, 2, 1], [2, 6, 3], [0, 2, 5]])
print("Basis of the column space:")
-print(scipy.linalg.orth(A))
+print(np.linalg.matrix_rank(A))
print("Multiplying the rows by this gives zero (for this matrix, \
equivalent to [0 0 0]^T)")
print(scipy.linalg.null_space(A.transpose()))
@@ -188,12 +256,12 @@ print(scipy.linalg.null_space(A.transpose()))
print("(b)")
A = np.array([[1, 1, 1], [1, 2, 4], [2, 4, 8]])
print("Basis of the column space:")
-print(scipy.linalg.orth(A))
+print(scipy.linalg.orth(A)) # ugh this ain't right
print("Multiplying the rows by this gives zero (for this matrix, \
equivalent to [0 2 -1]^T)")
print(scipy.linalg.null_space(A.transpose()))
-print("Section 2.3: #2")
+print("\nSection 2.3: #2")
print("Largest possible number of independent vectors is the dimension \
of the space spanned by these vectors, or the number of vectors in the \
@@ -202,9 +270,9 @@ v = [[ 1, 1, 1, 0, 0, 0],
[-1, 0, 0, 1, 1, 0],
[ 0, -1, 0, -1, 0, 1],
[ 0, 0, -1, 0, -1, -1]]
-print(len(scipy.linalg.orth(v)))
+print(np.linalg.matrix_rank(v))
-print("Section 2.3: #5")
+print("\nSection 2.3: #5")
print("(a)")
@@ -222,17 +290,277 @@ v = [[ 1, 2, -3],
if len(scipy.linalg.orth(v)) == 3: print("they are independent")
else: print("they are not independent")
-print("Section 2.3: #13")
-# probably out of spirit of the matlab hw
-print("This one can be done easily without a calculator because the \
- echelon matrix has already been computed.")
-print("The dimensions of all of these is 2, and the row spaces are the \
- same.")
-
-print("Section 2.3: #16")
-print("TODO")
-
-print("Section 2.3: #18")
-print("TODO")
-
-print("Section 2.3: #24")
+print("\nSection 2.3: #13")
+print("U is the echelon martix of A, and for any matrix and its echelon"
+" form, the dimensions of these spacees are the same, and the row"
+" spaces are equal.")
+
+A = [[1,1,0], [1,3,1], [3,1,-1]]
+U = [[1,1,0], [0,2,1], [0,0,0]]
+print(f"(a) dim col A = {np.linalg.matrix_rank(A)}")
+print(f"(b) dim row A = {np.linalg.matrix_rank(A)}")
+print(f"(c) dim col U = {np.linalg.matrix_rank(U)}")
+print(f"(d) dim row U = {np.linalg.matrix_rank(U)}")
+stack = np.vstack((A, U))
+print("Since the row spaces are the same, the dimension of the space"
+" both their rows span should be 2, like the dimension of each matrix's"
+f" own row space. The dimension is {np.linalg.matrix_rank(stack)}.")
+
+print("\nSection 2.3: #16")
+v = np.transpose([[1,1,0,0], [1,0,1,0], [0,0,1,1], [0,1,0,1]])
+rank = np.linalg.matrix_rank(v)
+if rank == v.shape[0] and rank == v.shape[1]:
+ print("The matrix is full rank, so the rows are linearly "
+ "independent.")
+# Finding the generic solution to Ax = 0 on a computer can also be
+# written as scipy.linalg.null_space
+if len(scipy.linalg.null_space(v)) == 0:
+ print("The matrix only has the trivial solution, so its columns are"
+ " linearly independent, by definition.")
+
+print("\nSection 2.3: #18")
+w = np.transpose([[1,1,0], [2,2,1], [0,0,2]])
+b = [3,4,5]
+lstsq = np.linalg.lstsq(w, b, rcond=None)[0]
+print("We get least squares solution:")
+print(lstsq)
+print("Multiplying back out:")
+mul = np.matmul(w, lstsq)
+print(mul)
+if np.allclose(mul, b):
+ print("which is b, so there is a solution.")
+else:
+ print("which isn't b, so there isn't a solution.")
+
+w = [[1,2,0], [2,5,0], [0,0,2], [0,0,0]]
+rank = np.linalg.matrix_rank(w)
+if rank == 3:
+ print("The vectors span R^3, so a solution exists for all b.")
+else:
+ print("The vectors don't span R^3, so some b don't have a solution")
+
+print("\nSection 2.3: #24")
+v = [1,-2,3] # vT x = 0
+
+print("Basis for the solution set:")
+print(scipy.linalg.null_space([v]))
+
+print("Intersection with the xy plane:") # xy plane -> z = 0
+u = [0,0,1] # uT x = 0
+print(scipy.linalg.null_space([v,u]))
+
+print(f"Basis of V-perp: {v}")
+
+if regression_problems:
+ df = pd.read_csv('insurance.csv')
+ df['smoker'] = df['smoker'].replace({'yes': 1, 'no': 0})
+ df['sex'] = df['sex'].replace({'female': 1, 'male': 0})
+ regions = df['region'].unique()
+ for region in df['region'].unique():
+ df[region] = df.region.apply(lambda r: r == region)
+ del df['region']
+
+ vals = df.to_numpy()
+ # Index(['age', 'sex', 'bmi', 'children', 'smoker', 'charges', 'southwest', 'southeast', 'northwest', 'northeast'],
+ A = [(1, row[0], 1.1**row[0], row[1], row[2], row[3], row[4],
+ row[6], row[7], row[8], row[9]) for row in vals]
+ # exponential and linear model of age, and linear models of
+ # everything else
+ b = [row[5] for row in vals]
+ x, e, rank, s = np.linalg.lstsq(A, b, rcond=None)
+ print(f"With error {rank},\n"
+ f"charges = {x[0]} + {x[1]}*age + {x[2]}*1.1^age + {x[3]}*female + "
+ f"{x[4]}*bmi + {x[5]}*children + {x[6]}*smoker + "
+ f"{x[7]}*southwest + {x[8]}*southeast + {x[9]}*northwest + "
+ f"{x[10]}*northeast")
+
+ print("Expected charges for a 45-year-old woman in the northeast "
+ "who doesn't smoke and has no children at BMI of 27 is "
+ f"{x[0] + x[1]*45 + x[2]*1.1**45 + x[3] + x[4]*27 + x[10]}")
+
+
+print("\nSection 3.3: #32")
+b = (0,8,8,20)
+t = np.array((0,1,3,4))
+# mostly given by np linalg lstsq
+A = np.transpose((t, np.ones(t.shape)))
+x, e, rank, s = np.linalg.lstsq(A, b, rcond=None)
+hatb = np.matmul(A,x)
+print(f"b = {x[0]}t + {x[1]}")
+print(f"Approximations {hatb} have error {hatb-b} with min. error "
+ f"E^2 = {e}")
+
+print("\nSection 3.3: #33")
+p = (1,5,13,17)
+for arow, brow in zip(A, b):
+ print(f"{arow[0]}t + {arow[1]}t = {brow}")
+x, e, rank, s = np.linalg.lstsq(A, p, rcond=None)
+print(f"We get exact solution p = {x[0]}t + {x[1]} with error {e}.")
+
+print("\nSection 3.3: #34")
+e = np.array(b) - np.array(p)
+print(f"A^Te = {np.matmul(np.transpose(A), e)}, so e "
+ "is orthogonal to every column of A.")
+print(f"The error ||e|| is {np.sqrt(np.dot(e,e))}.")
+
+print("\nSection 3.3: #35")
+def buildcoeff(coeff):
+ return ' + '.join(f"{c}*x_{i}" for i,c in enumerate(coeff))
+
+A = np.transpose((t*t, t, np.ones(t.shape)))
+eq = np.matmul( np.transpose(A), b )
+for coeff, of in zip(np.matmul(np.transpose(A), A), eq):
+ print(f"{buildcoeff(coeff)} = {of}")
+
+print("\nSection 3.3: #36")
+A = np.transpose((t*t*t, t*t, t, np.ones(t.shape)))
+x, e, rank, s = np.linalg.lstsq(A, b, rcond=None)
+p = np.matmul(A,x)
+print(f"p = b = {p}")
+print(f"e = p-b = 0 = {p-b}")
+
+print("\nSection 3.3: #37")
+st = sum(t)/len(t)
+sb = sum(b)/len(b)
+A = np.transpose((t, np.ones(t.shape)))
+x, e, rank, s = np.linalg.lstsq(A, b, rcond=None)
+print(f"hat b = {sb} = {np.dot((st, 1), x)} = C(hat t) + D")
+
+print("This is true because the first equation in A^TAp = A^Tb is "
+"(1,1,1,1)Ap = (1,1,1,1)b, so (1,1,1,1)Ap = (1,1,1,1) (Ct + D) = C(hat "
+"t) + D")
+
+print("\nSection 5.2: #29")
+A = ((.6, .4), (.4, .6))
+Aw,Av = np.linalg.eig(A)
+if all(l < 1 for l in Aw):
+ print("A^k converges.")
+else:
+ print("B^k doesn't converge to zero")
+B = ((.6, .9), (.1, .6))
+Bw,Bv = np.linalg.eig(B)
+if all(l < 1 for l in Bw):
+ print("B^k converges.")
+else:
+ print("B^k doesn't converge to zero.")
+
+print("\nSection 5.2: #30")
+if any(l > 1 for l in Aw):
+ print("A^k diverges")
+else:
+ l = [0 if l < 1 else 1 for l in Aw] #Lambda has diagonal = l, 0 else
+ print(f"A^k converges to\n{np.matmul( np.matmul(Av, np.diag(l)), np.linalg.inv(Av) )}")
+
+print("\nSection 5.2: #31")
+for u in ((3, 1), (3, -1), (6,0)):
+ L = np.diag(Aw**10)
+ lim = np.matmul(np.matmul( np.matmul(Av, L), np.linalg.inv(Av) ), u)
+ print(f"Limit of u0 = {u}: {lim}")
+
+print("\nSection 5.2: #45")
+A = np.array( ((.6, .2),(.4,.8)) )
+Alim = np.array( ((1/3, 1/3),(2/3, 2/3)) )
+
+Aw,Av = np.linalg.eig(A)
+Alimw,Alimv = np.linalg.eig(Alim)
+
+print(f"These matrices have the same eigenvectors\n{Aw}\nand\n{Alimw},")
+print("The limit of eigenvalues of A^k is A^\infty, so A^100 is close "
+ "(exponents quickly approach their asymptotes).\n"
+ f"Eigenvalues of A are {Av}, and of A^\infty are {Alimv}")
+
+print("Section 5.3: #3")
+f = np.array(((1,1), (1,0)))
+k = f
+for i in range(1,5):
+ print(f"F^{i} =\n{k}")
+ k = np.matmul(k, f)
+
+n = 20
+fn = round(((1+np.sqrt(5))/2)**n/np.sqrt(5))
+print(fn)
+
+print("\nSection 5.3: #7")
+
+l = (2, 1)
+for x in range(9):
+ l = (l[1], l[0] + l[1])
+print(f"Exactly, {l[1]}")
+
+l1 = (1+np.sqrt(5))/2
+print(f"Approximately, L_{10} = {round(l1**10)}")
+
+print("Section 5.3: #16")
+A = np.array( ((0, -1), (1, 0)) )
+stdeig = np.linalg.eig(A)
+print(stdeig)
+I = np.eye(A.shape[0])
+est = ( I+A, np.linalg.inv(I-A), np.matmul( np.linalg.inv(I - A/2),
+ I+A/2 ) )
+names = ( "Forward", "Backward", "Centered" )
+for f,name in zip(est, names):
+ w,v = np.linalg.eig(f)
+ print(f"{name} approximation has eigenvalues {w}.")
+ if all(np.isclose(abs(e), 1) for e in w):
+ print(f"{name} stays on the circle because all the eigenvalues "
+ "have magnitude 1.")
+
+print("\nSection 5.3: #29")
+
+B = ((3, 2), (-5, -3))
+C = ((5, 7), (-3, -4))
+
+w,v = np.linalg.eig(B)
+print(f"B ({B}) has eigenvalues\n{w}, and B^4 has eigenvalues\n{w**4} ="
+ " (1, 1), meaning that it is the identity.")
+w,v = np.linalg.eig(C)
+print(f"C ({C}) has eigenvalues\n{w}, and C^3 has eigenvalues\n{w**3} ="
+ " (-1, -1), meaning that it is the skew identity -I.")
+
+print("\nSection 5.4: #28")
+for x in range(1000):
+# my'' + by' + ky = 0
+# u = (y,y')^T
+# u' = (y',y'')^T
+# my'' = - (by' + ky)
+# y' = y
+ m,b,k = (np.random.rand(), np.random.rand(), np.random.rand())
+ A = (( 0, 1),
+ (-b,-k))
+ M = (( 1, 0),
+ ( 0, m))
+ # Mu' = Au => u' = Bu
+ B = np.matmul( np.linalg.inv(M), A )
+ w,v = np.linalg.eig(B)
+
+print("\nChecking that roots of characteristic polynomial = eigenvalues")
+lamda = sympy.symbols('lamda')
+for x in range(50):
+ npmat = np.random.rand(10, 10)
+ npmat += np.transpose(npmat) # make it symmetric for ease of proof
+ mat = sympy.Matrix(npmat)
+
+ poly = mat.charpoly(lamda)
+
+ w,v = np.linalg.eig(npmat)
+ roots = sorted(np.array(poly.nroots(), dtype='float64'))
+ w = sorted(w)
+ if not np.allclose(w, roots, atol=.001):
+ print("This property doesn't hold for all matrices")
+
+# Power method for eigenvalues and characteristic polynomials (see doc)
+
+print("\nPower Method for estimating the dominant eigen{vector,value}")
+matrix = np.random.rand(20, 20)
+w,v = np.linalg.eig(matrix)
+truedominant = max(*w)
+
+# a random vector will almost never be in the span of a subset of
+# eigenvectors, so we will use np.random.rand
+vec = np.random.rand(20)
+for x in range(50): # 50 iterations of the power method
+ vec = vec / np.linalg.norm(vec)
+ prev = vec
+ vec = matrix @ vec
+ val = np.linalg.norm(vec) / np.linalg.norm(prev)
+print(f"power method gives a dominant value {val}, compared to true {truedominant}")