Compare commits

..

No commits in common. "fa9f0faf6260ca484661ebfd917b4cb0d1887dfc" and "d5c072ba3f3e3813e3365e906956bb3f529e5044" have entirely different histories.

13 changed files with 83 additions and 1430 deletions

View File

@ -1 +0,0 @@
,trygve,trygves-laptop,19.09.2023 21:12,file:///home/trygve/.config/libreoffice/4;

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1 +0,0 @@
-0.3120380938053131 0.057060789316892624 0.5383008718490601 -0.4334142804145813 0.20545503497123718 0.8229865431785583 0.26446419954299927 1.3929160833358765 0.40466466546058655 -0.06923668831586838

View File

@ -1,108 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 28 08:23:56 2023
@author: Mohamad Mohannad al Kawadri (mohamad.mohannad.al.kawadri@nmbu.no), Trygve Børte Nomeland (trygve.borte.nomeland@nmbu.no)
"""
from abc import ABC, abstractmethod
import numpy as np
from copy import deepcopy
from torchvision import datasets, transforms
class Network:
def __init__(self, layers, W_file_list, b_file_list):
self.layers = layers
self.W_file_list = W_file_list
self.b_file_list = b_file_list
self.x = input
def run(self, x):
result = x
for n, W_file, b_file in zip(self.layers, self.W_file_list, self.b_file_list):
y = deepcopy(result)
l = n(y, W_file = W_file, b_file = b_file)
result = l.run()
return result
def evaluate(self, x, expected_value):
result = list(self.run(x))
max_value_index = result.index(max(result))
return int(max_value_index) == expected_value
class Layer:
def __init__(self, x, W_file, b_file):
self.x = x
files = read(W_file, b_file)
self.W = files.get('W')
self.b = files.get('b')
@abstractmethod
def run(self):
pass
class SigmaLayer(Layer):
def run(self):
return layer(self.W, self.x, self.b)
class ReluLayer(Layer):
def run(self):
return relu_layer(self.W, self.x, self.b)
def read(W_file, b_file):
return {'W': np.loadtxt(W_file), 'b': np.loadtxt(b_file)}
# define activation function
def sigma(y):
if y > 0:
return y
else:
return 0
sigma_vec = np.vectorize(sigma)
def relu_scalar(x):
if x > 0:
return x
else:
return 0
relu = np.vectorize(relu_scalar)
# define layer function for given weight matrix, input and bias
def layer(W, x, b):
return sigma_vec(W @ x + b)
def relu_layer(W, x, b):
return sigma_vec(W @ x + b)
# Function from example file "read.py"
def get_mnist():
return datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
# Function from example file "read.py"
def return_image(image_index, mnist_dataset):
image, label = mnist_dataset[image_index]
image_matrix = image[0].detach().numpy() # Grayscale image, so we select the first channel (index 0)
return image_matrix.reshape(image_matrix.size), image_matrix, label
def evalualte_on_mnist(image_index, expected_value):
mnist_dataset = get_mnist()
x, image, label = return_image(image_index, mnist_dataset)
network = Network([ReluLayer, ReluLayer, ReluLayer], ['W_1.txt', 'W_2.txt', 'W_3.txt'], ['b_1.txt', 'b_2.txt', 'b_3.txt'])
return network.evaluate(x, expected_value)
def run_on_mnist(image_index):
mnist_dataset = get_mnist()
x, image, label = return_image(image_index, mnist_dataset)
network = Network([ReluLayer, ReluLayer, ReluLayer], ['W_1.txt', 'W_2.txt', 'W_3.txt'], ['b_1.txt', 'b_2.txt', 'b_3.txt'])
return network.run(x)
def main():
print(f'Check if network works on image 19961 (number 4): {evalualte_on_mnist(19961, 4)}')
print(f'Check if network works on image 10003 (number 9): {evalualte_on_mnist(10003, 9)}')
print(f'Check if network works on image 117 (number 2): {evalualte_on_mnist(117, 2)}')
print(f'Check if network works on image 1145 (number 3): {evalualte_on_mnist(1145, 3)}')
print(f'Values image 19961 (number 4): {run_on_mnist(19961)}')
if __name__ == '__main__':
main()

512
W_1.txt

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -15,4 +15,4 @@ def main():
print(table(n_list, sq_list, cube_list)) print(table(n_list, sq_list, cube_list))
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -49,4 +49,5 @@ def print_imp_dir(path="./"):
for f in files: for f in files:
print(f'{Path.cwd()}/{f}: {get_imp_file(f)}') print(f'{Path.cwd()}/{f}: {get_imp_file(f)}')
print_imp_dir() print_imp_dir()
# %%

105
uke6.py
View File

@ -2,35 +2,46 @@ import numpy as np
from copy import deepcopy from copy import deepcopy
class Network: class Network:
def __init__(self, layers, W_file_list, b_file_list): def __init__(self):
self.layers = layers self.layers = []
self.W_file_list = W_file_list
self.b_file_list = b_file_list class Layer:
def __init__(self, w_file=None, b_file=None):
# define dimensions
self.n_layers = 4 self.n_layers = 4
self.n_inputs = 784 self.n_inputs = 784
self.n_outputs = 10 self.n_outputs = 10
self.n = [self.n_inputs, 512, 256, self.n_outputs] self.n = [self.n_inputs, 512, 256, self.n_outputs]
self.x = np.random.rand(self.n_inputs) self.x = np.random.rand(self.n_inputs)
# define weights and biases
# generate random wheights if no file is provided. else read the file
if w_file == None:
self.W_list = []
for (self.n_cur, self.n_next) in zip(self.n[:-1], self.n[1:]):
self.W_list.append(np.random.rand(self.n_next, self.n_cur))
else:
with open(w_file) as f:
lines = f.readlines()
self.W_list = [x.split(' ') for x in lines]
self.W_list = [[float(n) for n in x] for x in self.W_list]
f.close()
if b_file == None:
self.b_list = []
for (self.n_cur, self.n_next) in zip(self.n[:-1], self.n[1:]):
self.b_list.append(np.random.rand(self.n_next))
else:
with open(b_file) as f:
lines = f.readlines()
self.b_list = [x.split(' ') for x in lines]
self.b_list = [[float(n) for n in x] for x in self.W_list]
f.close()
print(len(self.W_list[0]), len(self.b_list[0]), len(self.x))
print(len(self.W_list[-1]), len(self.b_list[-1]), len(self.x))
def run(self): def run(self):
result = self.x print(f(self.W_list, self.b_list, self.x))
for n, W_file, b_file in zip(self.layers, self.W_file_list, self.b_file_list):
y = deepcopy(result)
l = n(y, W_file = W_file, b_file = b_file)
result = l.run()
return result
class Layer:
def __init__(self, x, W_file, b_file):
self.x = x
files = read(W_file, b_file)
self.W = files.get('W')
self.b = files.get('b')
def run(self):
return layer(self.W, self.x, self.b)
def read(W_file, b_file):
return {'W': np.loadtxt(W_file), 'b': np.loadtxt(b_file)}
# define activation function # define activation function
def sigma(y): def sigma(y):
@ -44,9 +55,53 @@ sigma_vec = np.vectorize(sigma)
def layer(W, x, b): def layer(W, x, b):
return sigma_vec(W @ x + b) return sigma_vec(W @ x + b)
# define neural network with all weights W and all biases b in W_list and b_list
def f(W_list, b_list, x):
y = deepcopy(x) # deepcopy so that input is not changed
for W, b in zip(W_list, b_list):
y = layer(W, y, b) # call layer multiple times with all weights and biases
return y
def main(): def main():
network = Network([Layer, Layer, Layer], ['W_1.txt', 'W_2.txt', 'W_3.txt'], ['b_1.txt', 'b_2.txt', 'b_3.txt']) l = Layer()
print(network.run()) l.run()
l2 = Layer(w_file = 'W_1.txt', b_file = 'b_1.txt')
l2.run()
def gamle_greier():
# define dimensions
n_layers = 4
n_inputs = 64
n_outputs = 10
n = [n_inputs, 128, 128, n_outputs]
# define weights and biases
W_list = []
b_list = []
for (n_cur, n_next) in zip(n[:-1], n[1:]):
W_list.append(np.random.rand(n_next, n_cur))
b_list.append(np.random.rand(n_next))
# generate random input (this would usually be pixels of an image)
x = np.random.rand(n_inputs)
# call the network
print(f(W_list, b_list, x))
for W in W_list:
print(W.shape)
if __name__ == '__main__': if __name__ == '__main__':
main() main()
gamle_greier()