Davids perceptron.pl: Difference between revisions
Jump to navigation
Jump to search
(New page: <PRE> #!/usr/bin/env python __author__ = "David Stainton" __license__ = "Apache License" import sys class perceptron(object): def __init__(self, values): self.weight = []...) |
No edit summary |
||
| Line 1: | Line 1: | ||
<PRE> | <PRE> | ||
#!/usr/bin/env python | #!/usr/bin/env python | ||
| Line 13: | Line 12: | ||
def __init__(self, values): | def __init__(self, values): | ||
self.weight = [] | self.weight = [] | ||
# is threshold equivalent to bias? | |||
# http://en.wikipedia.org/wiki/Perceptron | |||
self.threshold = .5 | self.threshold = .5 | ||
self.learning_rate = .1 | self.learning_rate = .1 | ||
| Line 18: | Line 19: | ||
self.weight.append(value) | self.weight.append(value) | ||
def | def eval(self, input): | ||
sum = dot_product( | sum = dot_product(input, self.weight) | ||
if sum == self.threshold: | if sum == self.threshold: | ||
output = 0 | output = 0 | ||
| Line 26: | Line 27: | ||
if sum > self.threshold: | if sum > self.threshold: | ||
output = 1 | output = 1 | ||
return output | |||
# returns a boolean indicating if the | |||
def train(self, input, expected): | |||
output = self.eval(input) | |||
if output != expected: | if output != expected: | ||
for elk in range(0,len( | for elk in range(0,len(input)): | ||
if | if input[elk] == 1: | ||
change = (expected - output) * self.learning_rate | change = (expected - output) * self.learning_rate | ||
#print "%s %s" % (elk, change) | #print "%s %s" % (elk, change) | ||
| Line 37: | Line 43: | ||
def dot_product(a, b): | def dot_product(a, b): | ||
sum = 0 | sum = 0 | ||
| Line 48: | Line 50: | ||
i += 1 | i += 1 | ||
return sum | return sum | ||
def main(): | def main(): | ||
# | # training set : | ||
# a list of expected values for each input list | |||
# in this case the training set will teach boolean or | |||
# | |||
expect = [1,0,1,1] | expect = [1,0,1,1] | ||
input = [[1,1], [0,0], [0,1], [1,0]] | input = [[1,1], [0,0], [0,1], [1,0]] | ||
| Line 63: | Line 65: | ||
repeat = 10 | repeat = 10 | ||
# create a perceptron object and initialize weight values... | |||
p = perceptron([0,0]) | |||
# repeatedly train with the training data set | |||
for c in range(0,repeat): | for c in range(0,repeat): | ||
results = [] | results = [] | ||
for elk in range(len(expect)): | for elk in range(len(expect)): | ||
results.append(p. | results.append(p.train(input[elk], expect[elk])) | ||
p. | print(p.weight) | ||
# if the training doesn't return an error for the entire set | |||
# then stop training | |||
if False not in results: | |||
break | |||
print(p.weight) | |||
print "training complete." | |||
# and now we use the trained perceptron | |||
# to evaluate the sets of data.. | |||
for i in input: | |||
output = p.eval(i) | |||
print "%s eval = %s" % (i,output) | |||
print "---" | |||
# of course this below training set doesn't make | |||
# sense from a boolean logic view point; | |||
# nevertheless i had to try it... | |||
input2 = [[1.2,1.3], [0.2,0.7], [0,.5], [1,0]] | |||
for i in input2: | |||
output = p.eval(i) | |||
print "%s eval = %s" % (i,output) | |||
Revision as of 00:40, 12 March 2009
#!/usr/bin/env python
__author__ = "David Stainton"
__license__ = "Apache License"
import sys
class perceptron(object):
def __init__(self, values):
self.weight = []
# is threshold equivalent to bias?
# http://en.wikipedia.org/wiki/Perceptron
self.threshold = .5
self.learning_rate = .1
for value in values:
self.weight.append(value)
def eval(self, input):
sum = dot_product(input, self.weight)
if sum == self.threshold:
output = 0
if sum < self.threshold:
output = 0
if sum > self.threshold:
output = 1
return output
# returns a boolean indicating if the
def train(self, input, expected):
output = self.eval(input)
if output != expected:
for elk in range(0,len(input)):
if input[elk] == 1:
change = (expected - output) * self.learning_rate
#print "%s %s" % (elk, change)
self.weight[elk] += change
return False
return True
def dot_product(a, b):
sum = 0
i = 0
while i < len(a):
sum += a[i] * b[i]
i += 1
return sum
def main():
# training set :
# a list of expected values for each input list
# in this case the training set will teach boolean or
expect = [1,0,1,1]
input = [[1,1], [0,0], [0,1], [1,0]]
# try to expose our perceptron to the training set 10 times
repeat = 10
# create a perceptron object and initialize weight values...
p = perceptron([0,0])
# repeatedly train with the training data set
for c in range(0,repeat):
results = []
for elk in range(len(expect)):
results.append(p.train(input[elk], expect[elk]))
print(p.weight)
# if the training doesn't return an error for the entire set
# then stop training
if False not in results:
break
print(p.weight)
print "training complete."
# and now we use the trained perceptron
# to evaluate the sets of data..
for i in input:
output = p.eval(i)
print "%s eval = %s" % (i,output)
print "---"
# of course this below training set doesn't make
# sense from a boolean logic view point;
# nevertheless i had to try it...
input2 = [[1.2,1.3], [0.2,0.7], [0,.5], [1,0]]
for i in input2:
output = p.eval(i)
print "%s eval = %s" % (i,output)
if __name__ == "__main__":
main()