Skip to content

Commit

Permalink
README.md update
Browse files Browse the repository at this point in the history
  • Loading branch information
MSadeghzadehG committed Dec 30, 2018
1 parent d37b6ee commit 7e0c4dd
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 17 deletions.
50 changes: 49 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1 +1,49 @@
# simple_neural_network
# Simple Neural Network Maker
a neural network maker uses preceptrons to create a neural network and implementation of forward and back propagation using Chain rule instead of matrix-based approach

## Create Network:
### Create Perceptron:
a perceptron apply activation function on it's inputs sum.
use following command to create a perceptron `p` with initial weights `W` and activation function `f`:
```
p = Perceptron(W,f)
```
to calculate perceptron output and its derivatives with input `X` can use this command:
```
p.cal_output(X)
```
and for updating it's weights after back-propagation:
```
p.update_weights()
```
### Build Network:
to create a neural network with loss function `loss`
```
n = Network(loss)
```
a neural network must have at least one 'input' and one 'output' perceptron.
you can use several 'hidden' neurons in your network.
in designing the structure of network there is almost no limit. you can connect neurons from inputs to output in any way.
for adding perceptron `p` with `i` input and type `t`(output,input,hidden) to the network use:
```
n.add_node(p,t,i)
```
and for connecting neuron `p1` to `i`'th `p2`'s input use this
```
n.connect_nodes(p1,p2,i)
```
after designing the network, it's time to learn our network with training dataset. as you know, training procedure contains two part: forward-propagation and back-propagation.
in each epoch, we once do forward-propagation and then back-propagation.
for feeding data to network we do forward-propagation. with training data `X`, use following command:
```
n.forward_prop(X)
```
for updating weights we do back-propagation. with training data labels `Y` and learning rate `alpha`, use following command:
```
n.back_prop(alpha,Y)
```

## To-Do
##### add `compile` function to `Network` class that checks connections between nodes and their inputs
##### make a higher level api

49 changes: 33 additions & 16 deletions neural.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
import math


c = math.pow(10,-10)
# constant c for calculate derivatives
c = math.pow(10,-15)


def d1_cal(x,f): #calculates derivative f bY x
Expand All @@ -13,27 +14,30 @@ def d2_cal(x1,x2,f): #calculates derivative f bY x1
return float(f(x1,x2) - f(x1-c,x2))/float(c)


# loss functions
def mse_loss(y_pred,y):
return ((y_pred-y)**2)/2


#activation functions
# activation functions
def relu(input):
# print(input)
return max(0,input)


def relu_n(input):
if input>=0:
return input
else:
return 0.01*input


def step(input):
if input>=0:
return 1
else:
return -1


class Perceptron():
def __init__(self,W,activation):
self.output = []
Expand All @@ -55,7 +59,7 @@ def cal_output(self,X):
def output_i(w,x):
return w*x
# print(self.output)
self.d_output.append(X) #is equal X?
self.d_output.append(X)
# print(self.d_output)
return self.output

Expand All @@ -70,35 +74,33 @@ def cal_next_W(self,alpha,Y,i):

def update_weights(self):
self.W = self.W_next
# self.output = []
# self.d_output = []
return self.W


class Network:
def __init__(self):
def __init__(self,l):
self.nodes = []
self.types = []
self.loss = 1
self.loss_function = l


def add_node(self,p,t,n):
self.nodes.append(p)
self.types.append(t)


def connect_nodes(self,p1,p2,n_of_w_in):
p1.next.append((p2,n_of_w_in))
# print(p1.next)
p2.pre.append((p1,n_of_w_in))
# print(p2.pre)



def cal_loss(self,Y):
for p in self.nodes:
if self.types[self.nodes.index(p)] == 'output':
self.loss = sum([mse_loss(p.output[i],Y[i]) for i in range(len(Y))])/(len(Y))
self.loss = sum([self.loss_function(p.output[i],Y[i]) for i in range(len(Y))])/(len(Y))
print('====================')
return self.loss

Expand Down Expand Up @@ -131,7 +133,7 @@ def backward_prop(self,alpha,Y):
if self.types[self.nodes.index(p)] == 'output':
for pre in p.pre:
to_update.add(pre)
self.d_loss = [d2_cal(p.output[i],Y[i],mse_loss) for i in range(len(Y))]
self.d_loss = [d2_cal(p.output[i],Y[i],self.loss_function) for i in range(len(Y))]
for i in range(len(Y)):
p.c.append(self.d_loss[i])
p.cal_next_W(alpha,Y,i)
Expand Down Expand Up @@ -166,20 +168,22 @@ def backward_prop(self,alpha,Y):
p.c = []
p.update_weights()



m = 1000 #num of data
# testing...
m = 100 #num of data
n = 3 #num of input features
alpha = 1 #learning rate
alpha = 0.1 #learning rate
epoch = 1000 #num of epochs

# data generation
Y = [] #labels
X = np.zeros((m,3)) #inputs
for i in range(m):
for j in range(n):
X[i][j] = np.random.normal()
Y.append(sum([t**2 for t in X[i]]))

# initializing weights
W = np.zeros(n) #initial weights
for i in range(n):
W[i]=np.random.normal()
Expand All @@ -189,15 +193,28 @@ def backward_prop(self,alpha,Y):
# print(Y[0])
# input()

N = Network()
# building network
N = Network(mse_loss)
p1 = Perceptron(W,relu_n)
p2 = Perceptron(W,relu_n)
p3 = Perceptron(W[0:2],relu_n)
N.add_node(p3,'output',2)
p4 = Perceptron(W[0:2],relu_n)
p5 = Perceptron(W[0:2],relu_n)

N.add_node(p5,'output',2)
N.add_node(p4,'hidden',2)
N.add_node(p3,'hidden',2)
N.add_node(p2,'input',3)
N.add_node(p1,'input',3)

N.connect_nodes(p1,p3,0)
N.connect_nodes(p2,p3,1)
N.connect_nodes(p1,p4,0)
N.connect_nodes(p2,p4,1)
N.connect_nodes(p3,p5,0)
N.connect_nodes(p4,p5,1)

# learning
for i in range(epoch):
print('--forward start--')
N.forward_prop(X)
Expand Down

0 comments on commit 7e0c4dd

Please sign in to comment.