Basic Linear Algebra Concepts

Examining some basic concepts of liner algebra using python libraries
jupyter
maths
linear-algebra
tensors
matrices
Published

April 30, 2022

Intro to Linear Algebra

References: 1. https://github.com/jonkrohn/ML-foundations

import numpy as np
import math
import torch
import tensorflow as tf
import matplotlib.pyplot as plt
ModuleNotFoundError: No module named 'torch'

Scalar Tensors

Pytorch tensors can be used for operations on the GPU or CPU, unlike numpy arrays, by casting the tensor to a CUDA data type.

Tensorflow tensors are also immutable, and created using wrappers.

# Example scalar pytorch tensor
x = torch.tensor(10, dtype=torch.int64)
print(f"Type: {x.dtype}, Shape: {x.shape}")
Type: torch.int64, Shape: torch.Size([])
# Example scalar tensorflow tensor
x = tf.constant(10, dtype=tf.int64)
x
NameError: name 'tf' is not defined

Vector Transposition

x = np.array([[5,6,7]]) #Need extra brackets else shape will be (,3) (1D, not 2D)
print(f"x = {x} \nShape = {x.shape}")
x = [[5 6 7]] 
Shape = (1, 3)
x_t = x.T
print(f"x transposed:\n{x_t} \nShape = {x_t.shape}")
x transposed:
[[5]
 [6]
 [7]] 
Shape = (3, 1)
x = torch.tensor([5,6,7])
print(f"x = {x} \nShape = {x.shape}")
x = tensor([5, 6, 7]) 
Shape = torch.Size([3])
#L2 Norm - euclidean distance
x = np.array([[5,6,7]])
np.linalg.norm(x)
10.488088481701515
# L1 Norm
np.sum(np.absolute(x))
18
# Squared L2 Norm
x = np.array([5,6,7])
np.dot(x,x)
110
# Max norm
np.max(np.abs(x))
7
#Orthogonal
i = np.asarray([1,0])
j = np.asarray([0,1])
np.dot(i,j)
0

Matrices (2-Tensor)

X = np.array([[5,3],[6,8],[2,10]])
print(f"{X}\n Shape: {X.shape} Size: {X.size}")
[[ 5  3]
 [ 6  8]
 [ 2 10]]
 Shape: (3, 2) Size: 6
X[:,0] # left column
array([5, 6, 2])
X[1,:] # middle row
array([6, 8])
#pytorch
X = torch.tensor([[5,3],[6,8],[2,10]])
print(f"{X}\n Shape: {X.shape}")
tensor([[ 5,  3],
        [ 6,  8],
        [ 2, 10]])
 Shape: torch.Size([3, 2])
#Tensorflow
X = tf.constant([[5,3],[6,8],[2,10]])
print(f"Rank: {tf.rank(X)} Shape: {tf.shape(X)}")
Rank: 2 Shape: [3 2]

n-Tensors

X = torch.zeros([5,2,4,2])
X.shape
torch.Size([5, 2, 4, 2])
X = tf.zeros([5,2,4,2])
print(f"Rank: {tf.rank(X)}")
Rank: 4

Tensor Operations

X = np.array([[5,3],[6,8],[2,10]])
print(f"X*2 = \n{X*2}\n  X+2 =\n{X+2}")
X*2 = 
[[10  6]
 [12 16]
 [ 4 20]]
  X+2 =
[[ 7  5]
 [ 8 10]
 [ 4 12]]
X = torch.tensor([[5,3],[6,8],[2,10]])
print(f"X*2 = \n{torch.mul(X,2)}\n  X+2 =\n{torch.add(X,2)}")
X*2 = 
tensor([[10,  6],
        [12, 16],
        [ 4, 20]])
  X+2 =
tensor([[ 7,  5],
        [ 8, 10],
        [ 4, 12]])

Elementwise Product

If tensors have the same size, operations can be aplied elementwise, this is the Hadamard product: \boldsymbol X \odot \boldsymbol Y It produces a tensor that is the same shape as the input, unlike the dot product which will produce a scalar value.

X = np.array([[5,3],[6,8],[2,10]])
Y = X + 5
print(X*Y) ##dot product, not matrix multiplication
[[ 50  24]
 [ 66 104]
 [ 14 150]]

Tensor Reduction

print(X.sum(), torch.sum(torch.tensor([[5,3],[6,8],[2,10]])))
print(tf.reduce_sum(tf.constant([[5,3],[6,8],[2,10]])))
print(X.sum(axis=0), X.sum(axis=1)) #along specific axes
print(torch.sum(torch.tensor([[5,3],[6,8],[2,10]]),0))
print(torch.sum(torch.tensor([[5,3],[6,8],[2,10]]),1))
print(tf.reduce_sum(tf.constant([[5,3],[6,8],[2,10]]),0))
print(tf.reduce_sum(tf.constant([[5,3],[6,8],[2,10]]),1))
34 tensor(34)
tf.Tensor(34, shape=(), dtype=int32)
[13 21] [ 8 14 12]
tensor([13, 21])
tensor([ 8, 14, 12])
tf.Tensor([13 21], shape=(2,), dtype=int32)
tf.Tensor([ 8 14 12], shape=(3,), dtype=int32)

Dot Product

The vectors must be of the same length or shape for the element-wise multiplication to occurr.

X = np.array([5,3,6,8,2,10])
Y = X + 1
print(np.dot(X,Y))
272
print(torch.dot(torch.tensor([5,3,6,8,2,10]),torch.add(torch.tensor([5,3,6,8,2,10]),1)))
tensor(272)
print(tf.reduce_sum(tf.multiply(tf.constant([5,3,6,8,2,10]),tf.add(tf.constant([5,3,6,8,2,10]),1))))
tf.Tensor(272, shape=(), dtype=int32)

Matrices

Frobenius Norm

X = np.array([[5,3],[6,8],[2,10]])
print(np.linalg.norm(X))
print(torch.norm(torch.tensor([[5.,3],[6,8],[2,10]])))
print(tf.norm(tf.constant([[5.,3],[6,8],[2,10]])))
15.427248620541512
tensor(15.4272)
tf.Tensor(15.427248, shape=(), dtype=float32)

Matrix Multiplication

A = np.array([[3,4],[5,6],[7,8]])
b = np.array([1,2])
print(np.dot(A,b)) #infers dot product vs matrix multiplication based on shape
C = torch.tensor([[3,4],[5,6],[7,8]])
d = torch.tensor([1,2])
print(torch.matmul(C,d))
E = tf.constant([[3,4],[5,6],[7,8]])
f = tf.constant([1,2])
print(tf.linalg.matvec(E,f))
tensor([11, 17, 23])
tf.Tensor([11 17 23], shape=(3,), dtype=int32)
A = np.array([[3,4],[5,6],[7,8]])
b = np.array([[1,9],[2,0]])
print(np.dot(A,b)) 
C = torch.tensor([[3,4],[5,6],[7,8]])
d = torch.tensor([[1,9],[2,0]])
print(torch.matmul(C,d))
E = tf.convert_to_tensor(A)
f = tf.convert_to_tensor(b)
print(tf.matmul(E,f))
[[11 27]
 [17 45]
 [23 63]]
tensor([[11, 27],
        [17, 45],
        [23, 63]])
tf.Tensor(
[[11 27]
 [17 45]
 [23 63]], shape=(3, 2), dtype=int64)

Matrix Inversion

X = np.array([[4,2],[-5,-3]])
X_inv = np.linalg.inv(X)
y = np.array([4,-7])
print(w:= np.dot(X_inv,y))
print(np.dot(X,w))
[-1.  4.]
[ 4. -7.]