taga
taga

Reputation: 3895

Tunning Neural Network made in Python with NumPy

I have wrote a code for neural network that uses sigmoid function. I made it with NumPy and Python. Code works good, but now I want to tune it, to improve accuracy. How can I tune my NN, do I need to add some parameters, or to add hidden layers to it? Is it even possible?

This is the code that I have:

import numpy as np
import pandas as pd

df = pd.DataFrame({'input 1':[0.5, 0.3, 0, 0.1, 0.4, -0.4, 0.4, 0.1, -0.6, 0.2, 0.6, 0, 0.2, 0.2, -0.1, -0.1, 0, 0.4, -0.2, -0.4],
                   'input 2':[0.3, 0.6, -0.4, -0.2, 0.9, 0, 0.35, -0.4, -0.9, 0.4, 0.3, -0.1, 0.1, 0.3, 0.1, 0.1, 0.3, 0.1, 0.3, 0.3],
                   'input 3':[0, 0.4, 0, -0.1, 0.4, -0.2, 0.7, -0.3, -0.1, 0.1, 0.3, 0, 0.5, 0.4, -0.31, 0.1, 0.3, 0.1, 0.1, 0.2],
                   'result':[1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0]})

print(df)

def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def sigmoid_derivate(x):
    return x * (1 - x)


features = df.iloc[:,:-1].to_numpy()
results =  df.iloc[:,-1:].to_numpy()

np.random.seed(1)

weights = 2 * np.random.random((3,1)) - 1

print('These are my random weights:\n')
print(weights)

for iteration in range(100000):

    input_layer = features

    outputs = sigmoid(np.dot(input_layer, weights))

    error = results - outputs

    adjustments = error * sigmoid_derivate(outputs)
    weights += np.dot(input_layer.T, adjustments)

outputs = outputs.round(0).tolist()
outputs  = list(itertools.chain(*outputs))

outputs.insert(0,'None')

df['output prediction'] = outputs
print(df)

df1 = df.tail(len(df)-1)
#print(df1)

acc = 0
for i, j in zip(df1['result'] ,df1['output prediction']):

    if i == j:

        acc += 1

accuracy = round(acc * 100 /len(df1), 2)
print(accuracy)

I think that I it should be added below part where I define weights, but Im not sure.

Thanks for your help!

Upvotes: 1

Views: 263

Answers (1)

Denzel
Denzel

Reputation: 449

import numpy as np
import pandas as pd

df = pd.DataFrame({'input 1':[0.5, 0.3, 0, 0.1, 0.4, -0.4, 0.4, 0.1, -0.6, 0.2, 0.6, 0, 0.2, 0.2, -0.1, -0.1, 0, 0.4, -0.2, -0.4],
                   'input 2':[0.3, 0.6, -0.4, -0.2, 0.9, 0, 0.35, -0.4, -0.9, 0.4, 0.3, -0.1, 0.1, 0.3, 0.1, 0.1, 0.3, 0.1, 0.3, 0.3],
                   'input 3':[0, 0.4, 0, -0.1, 0.4, -0.2, 0.7, -0.3, -0.1, 0.1, 0.3, 0, 0.5, 0.4, -0.31, 0.1, 0.3, 0.1, 0.1, 0.2],
                   'result':[1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0]})

print(df)

def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def sigmoid_derivate(x):
    return x * (1 - x)

alpha=0.1#define alpha
features = df.iloc[:,:-1]
results =  df.iloc[:,-1:]
features=np.array(features)
results=np.array(results)

np.random.seed(1)


weight0  = 2*np.random.random((3,10)) - 1 #3 - number of features; 10 - number of nodes in hidden layer
weight1  = 2*np.random.random((10,4)) - 1 #10 - number of nodes in hidden layer; 4 - number of nodes in output layer
weight2  = 2*np.random.random((4,1)) - 1 #4 - number of nodes in output layer; 1 - number of labels
# you can change layer's nodes, but they must be able to make dot product. For example (320,160) and (160,40)
for iteration in range(1000):

    l0 = features
    l1 = sigmoid(np.dot(l0,weight0)) 
    l2 = sigmoid(np.dot(l1,weight1))
    l3 = sigmoid(np.dot(l2,weight2))

    l3_error = results - l3
    print ("Error after "+str(iteration)+" iterations:" + str(np.mean(np.abs(l3_error))))
    l3_delta = l3_error*sigmoid_derivate(l3)
    l2_error = l3_delta.dot(weight2.T)
    l2_delta = l2_error * sigmoid_derivate(l2)
    l1_error = l2_delta.dot(weight1.T)
    l1_delta = l1_error * sigmoid_derivate(l1)
    weight2 += alpha*l2.T.dot(l3_delta)
    weight1 += alpha*l1.T.dot(l2_delta)
    weight0 += alpha*l0.T.dot(l1_delta)

Here is your code with 1 input, 1 hidden and 1 output layers.

Upvotes: 1

Related Questions