"""Importing the required modules"""
import numpy as np #module in which most of the operations of our interest are defined
import matplotlib.pyplot as plt #for plotting
from math import * #module for exp,log,power, trig functions etc...
from numpy.linalg import * #module with linear algebraic functions
import scipy.linalg as sp
import matplotlib.image as mpimg
from sklearn.decomposition import *
a = 2.5
b = 3e01
c = sqrt(2)+1.55555j
d = a + 2*b - np.conj(c) #linear combination of complex numbers
print(d) #plain print statement
print("Result : {}".format(np.round(d,3))) #formatted print statement
$\ $
A list is one of the prominent data types in Python. It's a finite sequence of python objects (number/strings/matrices/other lists etc...) which is very used to define matrices.
a = [1,2,3]
b = [1,'vector',3]
c = [[1,0],[0,1]]
print(a+b+c) #prints the concatenated list
print(type(a+b+c)) #outputs the data type
print("Number of elements in a : {}".format(len(a)))
#a[i] returns the i+1^th element of the list a
print(a[1])
print(b[1][0])
print(c[1][0])
a + 1 #cannot perform element-wise addition in lists directly
np.add(a,1) #This serves the purpose. However, it returns a different data type namely a numpy array which will be described later
np.add(a,list(range(3))) #range()
np.multiply(a,a[1]) #element-wise multiplication
np.multiply(a,[2,3,1/4])
"""List methods"""
a = list(range(3,10))
print(np.sum(a))
print(np.mean(a))
print(sorted(a,reverse=True))
print(a.index(5)) #returns the index of the specified element in the list
a.append(100) #appends the list
print(a)
print(a.pop()) #removes the last element
print(a)
print(a.pop(a[2])) #removes the element of the specified index
a
Arrays are the most useful data type for one who works with matrices. A matrix is defined as a 2-dimensional array in python.
A = np.array([[1,0],[0,1],[0,0]])
print(A)
print('\n',A.shape) #dimension of the array
A.reshape(2,3) #reshapes the array provided the new dim is compatible; does not alter the original array
A.reshape(2,1,3)
A.tolist()
B = 2*A #scalar multiplication
B
C = A + B #matrix addition
print(C)
C[1][0] = 2
C**2 #element-wise operation
C@C.T #Matrix multiplication
D = np.array([1,2]).reshape(2,1)
C@D
print('Outer product : {}'.format(D@D.T))
print('Inner product : {}'.format(D.T@D))
"""Numpy methods for linear algebraic computation"""
print('| I_3 | = {}'.format(det(np.identity(3))))
print('rank(A) = {}'.format(matrix_rank(A)))
print('eigenvalues of CC^T: {}'.format(eig(C@C.T)[0]))
P = eig(C@C.T)[1]
print('eigenvectors of CC^T corresponding to lambda = {}: {}'.format(P[0][0],P[:,0]))
print('eigenvectors of CC^T corresponding to lambda = {}: {}'.format(P[0][1],P[:,1]))
print('eigenvectors of CC^T corresponding to lambda = {}: {}'.format(P[0][2],P[:,2]))
"""Logical operations"""
a,b = 2,3
print('\n',a > b and a == b,'\n', a > b or a != b ,'\n',not(a==b))
"""if - else statements"""
if a < b:
print ('a < b')
elif a == b:
print('a = b')
else:
print('a > b')
#Such a conditional executes the 'first' set of statements for which the condition returns 'True'.
#Note that the indentation is necessary for the compiler to understand the hierarchy.
if a <= b:
print ('a <= b')
elif a < b:
print('a < b')
else:
print('a > b') #The first two conditions return 'True', but only the first statement is executed.
"""for loop""" #the statements are executed until the loop exhausts the list of elements in the iterable object
for j in 'Python':
print (j)
print('\n')
for [k,l] in [[1,2],[2,3]]:
print (k+l)
"""while loop"""
n = [1,2,3]
while(len(n)):
print(n.pop())
def IP(a,b): #computes the euclidean inner product of two 1D arrays
if len(a)==len(b):
return np.sum([np.conj(a[k])*b[k] for k in range(len(a))])
else:
return("Error: The vectors should be of the same dimension")
IP(np.array([0+1j,2,3]),np.array([1,1+2j,1]))
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
ax = axs[0]
x = np.random.normal(size=5000)
ax.hist(x,density=True,bins=50)
ax = axs[1]
n = np.arange(100)
f = 1/n
ax.plot(n,f,'r-',label='sequence')
ax.set_xlabel('n')
ax.set_ylabel('f(n)')
ax.hlines(y=0,xmin=0,xmax=100,colors='g',label='limit')
ax.set_title('$ f(n) = 1/n $')
ax.legend()
plt.show()
x = np.random.normal(5,2,size = 1000)
y = np.array([0.5 + 3*x[i] - 0.5*x[i]**2 + np.random.normal(0,1,size=1) for i in range(len(x))]) #data with noise
#true parameters b0= 0.5, b1 = 3 + b2 = -0.5, noise variance = 1
plt.plot(x,y,'ro')
x = x.reshape(-1,1)
ones = np.ones(len(y)).reshape(-1,1)
X = np.concatenate((ones,x,x**2),axis=1)
B = []
for i in range(1000):
y = np.array([0.5 + 3*x[i] - 0.5*x[i]**2 + np.random.normal(0,1,size=1) for i in range(len(x))])
b = inv((X.T@X))@X.T@y
B.append(b)
B = np.array(B)
B.shape
fig, axs = plt.subplots(1, 3, figsize=(16, 6))
axs[0].hist(B[:,0],density = True,bins=100)
axs[0].set_title('$b_0$')
axs[1].hist(B[:,1],density = True,bins=100)
axs[1].set_title('$b_1$')
axs[2].hist(B[:,2],density = True,bins=100)
axs[2].set_title('$b_2$')
#Note the large variance in data however
B = []
U,s,Vh=sp.svd(X)
#r=2
Uhat = U[:,0:2]
s_hat = np.diag(s[0:2])
Vhat = Vh[0:2].T
Vhat.shape
for i in range(1000):
y = np.array([0.5 + 3*x[i] - 0.5*x[i]**2 + np.random.normal(0,1,size=1) for i in range(len(x))])
b = Vhat@inv(s_hat)@Uhat.T@y
B.append(b)
B = np.array(B)
fig, axs = plt.subplots(1, 3, figsize=(16, 6))
axs[0].hist(B[:,0],density = True,bins=100)
axs[0].set_title('$b_0$')
axs[1].hist(B[:,1],density = True,bins=100)
axs[1].set_title('$b_1$')
axs[2].hist(B[:,2],density = True,bins=100)
axs[2].set_title('$b_2$')
#Notice the reduction in variance. Although, the estimators are not centred at the true values anymore.
fig = plt.figure(figsize=(16,6))
img = mpimg.imread('12437-large.jpeg')
print (img.shape)
plt.axis('off')
plt.imshow(img)
img_r = np.reshape(img, (720, 1280*3))
print (img_r.shape )
pca = PCA(n_components=256)
ipca = pca.fit(img_r)
img_c = pca.transform(img_r)
print (img_c.shape)
print (np.sum(ipca.explained_variance_ratio_))
fig = plt.figure(figsize=(16,6))
temp = pca.inverse_transform(img_c)
print (temp.shape)
temp = np.reshape(temp, (720,1280,3))
print (temp.shape)
plt.axis('off')
plt.imshow(temp/255)
plt.show()
pca = PCA(n_components=64)
ipca = pca.fit(img_r)
img_c = pca.transform(img_r)
print (img_c.shape)
print (np.sum(ipca.explained_variance_ratio_))
fig = plt.figure(figsize=(16,6))
temp = pca.inverse_transform(img_c)
print (temp.shape)
temp = np.reshape(temp, (720,1280,3))
print (temp.shape)
plt.axis('off')
plt.imshow(temp/255)
plt.show()
pca = PCA(n_components=16)
ipca = pca.fit(img_r)
img_c = pca.transform(img_r)
print (img_c.shape)
print (np.sum(ipca.explained_variance_ratio_))
fig = plt.figure(figsize=(16,6))
temp = pca.inverse_transform(img_c)
print (temp.shape)
temp = np.reshape(temp, (720,1280,3))
print (temp.shape)
plt.axis('off')
plt.imshow(temp/255)
plt.show()