![]() |
#1
|
|||
|
|||
![]()
Hello All,
I am simply generating data points in first and third quadrant with below code: import random import numpy as np import matplotlib.pyplot as plt plt.rcParams['axes.unicode_minus'] = False #generate a data set of 100. #for simplicity, 50 in the first quadrant, another 50 in the third quadrant train_X1 = [] train_Y1 = [] train_X2 = [] train_Y2 = [] for i in range(50): train_X1.append(random.uniform(0,1)) train_Y1.append(random.uniform(0,1)) train_X2.append(random.uniform(-1,0)) train_Y2.append(random.uniform(-1,0)) #label the data train_data1 = [np.array([1,train_X1[i],train_Y1[i],1]) for i in range(50)] train_data2 = [np.array([1,train_X2[i],train_Y2[i],-1]) for i in range(50)] train_data = train_data1 + train_data2 I have my perceptron algorithm as below: #Problem 1.5 class Perceptron(object): def __init__(self, data): self.W = np.zeros(len(data[0:3])) self.update = 0 self.learning_rate = 0.01 def predict(self, x): activation = np.dot(self.W.T,x) return np.sign(activation) def fit(self, data): count = 0 X = np.array(data)[:,0:3] d = np.array(data)[:, 3:4] while self.update < 1000: #self.update = 0 for i in range(len(data)): predicted_value_y = self.predict(X[i]) expected_value = d[i] if expected_value * predicted_value_y <=1: self.W = self.W + self.learning_rate*(expected_value - predicted_value_y) * X[i] #print(self.W) self.update += 1 print("Number of iterations for converging:",count) For me it seems correct according to mention of update rule in problem 1.5 in book. But some how even if my learning rate changes my target function 'g' does not change and classifies some data points incorrectly. Is this expected? |
Tags |
problem 1.5, variation of adaline |
Thread Tools | |
Display Modes | |
|
|