2017-10-24

Oct 25 In-Class Exercise Thread.

Post your solutions to the Oct 25 In-Class Exercise Here.
Best, Chris
Post your solutions to the Oct 25 In-Class Exercise Here. Best, Chris
2017-10-25

-- Oct 25 In-Class Exercise Thread
import tensorflow as tf
def perceptron(weights, inputs, biases, activation):
    nodes = weights * inputs + biases
    return activation(nodes)
def step(nodes):
    return tf.ceil(tf.clip_by_value(nodes, 0, 1))
x = tf.placeholder(tf.float32, shape=(3))
W = tf.Variable([[2, 2, 2], [-2/3, -2/3, -2/3], [2/5, 2/5, 2/5]], dtype=tf.float32)
bias = tf.zeros((3), dtype=tf.float32)
layer1 = perceptron(W, x, bias, step) layer2 = perceptron(W, layer1, bias, step)
session = tf.Session()
  1. TODO
import tensorflow as tf def perceptron(weights, inputs, biases, activation): nodes = weights * inputs + biases return activation(nodes) def step(nodes): return tf.ceil(tf.clip_by_value(nodes, 0, 1)) x = tf.placeholder(tf.float32, shape=(3)) W = tf.Variable([[2, 2, 2], [-2/3, -2/3, -2/3], [2/5, 2/5, 2/5]], dtype=tf.float32) bias = tf.zeros((3), dtype=tf.float32) layer1 = perceptron(W, x, bias, step) layer2 = perceptron(W, layer1, bias, step) session = tf.Session() # TODO

-- Oct 25 In-Class Exercise Thread
import tensorflow as tf import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def perceptron(weights, inputs, biases, activation):
    #print(tf.shape(weights))
    #print(tf.shape(inputs))
    nodes = tf.matmul(weights, inputs) + biases
    return activation(nodes)
def step(nodes):
    return tf.ceil(tf.clip_by_value(nodes, 0, 1))
x = tf.placeholder(tf.float32, shape=(3, 1)) W1 = tf.Variable([[2, 2, 2], [-2 / 3, -2 / 3, -2 / 3], [2 / 5, 2 / 5, 2 / 5]], dtype=tf.float32) b1 = tf.Variable([[-1], [1], [-1]], dtype=tf.float32)
W2 = tf.Variable(1/2, 1/2, 1 , dtype=tf.float32) b2 = tf.Variable(-1, dtype=tf.float32)
[[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] my_layer1 = perceptron(W1, x, b1, step)
my_layer2 = perceptron(W2, my_layer1, b2, step)
session = tf.Session() init = tf.global_variables_initializer() session.run(init) l1 = session.run(my_layer1, {x: [[1.0], [1.0], [1.0]]}) print(tf.shape(l1)) print("L1 layer") print(l1)
print("L2 layer") print(session.run(my_layer2, {x: l1}))
  1. outputs
  2. array([[ 1., 1.],
  3. [ 1., 1.]], dtype=float32)
(Edited: 2017-10-25)
import tensorflow as tf import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' def perceptron(weights, inputs, biases, activation): #print(tf.shape(weights)) #print(tf.shape(inputs)) nodes = tf.matmul(weights, inputs) + biases return activation(nodes) def step(nodes): return tf.ceil(tf.clip_by_value(nodes, 0, 1)) x = tf.placeholder(tf.float32, shape=(3, 1)) W1 = tf.Variable([[2, 2, 2], [-2 / 3, -2 / 3, -2 / 3], [2 / 5, 2 / 5, 2 / 5]], dtype=tf.float32) b1 = tf.Variable([[-1], [1], [-1]], dtype=tf.float32) W2 = tf.Variable([[1/2, 1/2, 1]], dtype=tf.float32) b2 = tf.Variable(-1, dtype=tf.float32) [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] my_layer1 = perceptron(W1, x, b1, step) my_layer2 = perceptron(W2, my_layer1, b2, step) session = tf.Session() init = tf.global_variables_initializer() session.run(init) l1 = session.run(my_layer1, {x: [[1.0], [1.0], [1.0]]}) print(tf.shape(l1)) print("L1 layer") print(l1) print("L2 layer") print(session.run(my_layer2, {x: l1})) # outputs # array([[ 1., 1.], # [ 1., 1.]], dtype=float32)

-- Oct 25 In-Class Exercise Thread
Name : Kunal Deshmukh
import tensorflow as tf
W1 = tf.Variable([[2,2,2],[-1.5,-1.5,-1.5],[0.5,0.5,1]], dtype=tf.float32) b1 = tf.Variable(-1,-1,-1 , dtype=tf.float32) W2 = tf.Variable(2,2,1 , dtype=tf.float32) b2 = tf.Variable(1 , dtype=tf.float32) session = tf.Session() init = tf.global_variables_initializer() session.run(init) session.run(W) session.run(b)
def perceptron(weights, inputs, biases, activation):
    nodes = weights * inputs + biases
    return activation(nodes)
def step(nodes):
    return tf.ceil(tf.clip_by_value(nodes, 0, 1))
x = tf.placeholder(tf.float32, shape=(2)) my_layer1 = perceptron(W1, x, b1, step) my_layer2 = perceptron(W2,my_layer1,b2,step) session = tf.Session() init = tf.global_variables_initializer() session.run(init) session.run(my_layer1, {x:[1,1,1]}) session.run(my_layer2, {x:[1,1,1]})
(Edited: 2017-10-25)
Name : Kunal Deshmukh import tensorflow as tf W1 = tf.Variable([[2,2,2],[-1.5,-1.5,-1.5],[0.5,0.5,1]], dtype=tf.float32) b1 = tf.Variable([[-1,-1,-1]], dtype=tf.float32) W2 = tf.Variable([[2,2,1]], dtype=tf.float32) b2 = tf.Variable([[1]], dtype=tf.float32) session = tf.Session() init = tf.global_variables_initializer() session.run(init) session.run(W) session.run(b) def perceptron(weights, inputs, biases, activation): nodes = weights * inputs + biases return activation(nodes) def step(nodes): return tf.ceil(tf.clip_by_value(nodes, 0, 1)) x = tf.placeholder(tf.float32, shape=(2)) my_layer1 = perceptron(W1, x, b1, step) my_layer2 = perceptron(W2,my_layer1,b2,step) session = tf.Session() init = tf.global_variables_initializer() session.run(init) session.run(my_layer1, {x:[1,1,1]}) session.run(my_layer2, {x:[1,1,1]})

-- Oct 25 In-Class Exercise Thread
import tensorflow as tf import os
def perceptron(weights, inputs, biases, activation):
    nodes = weights * inputs + biases
    return activation(nodes)
def step(nodes):
    return tf.ceil(tf.clip_by_value(nodes, 0, 1))
x = tf.placeholder(tf.float32, shape=(3))
W1 = tf.Variable([[2, 2, 2], [-2 / 3, -2 / 3, -2 / 3], [2 / 5, 2 / 5, 2 / 5]], dtype=tf.float32)
b1 = tf.Variable([0.0, 0.0, 0.0], dtype=tf.float32)
W2 = tf.Variable([1 / 2, 1 / 2, 1], dtype=tf.float32) b2 = tf.Variable(0.0, dtype=tf.float32)
layer1 = perceptron(W1, x, b1, step)
layer2 = perceptron(W2, my_layer1, b2, step)
session = tf.Session()
init = tf.global_variables_initializer()
session.run(init)
l1= session.run(my_layer1, {x: [0.0, 1.0, 0.0]})
print(l1) print(session.run(layer2, {x:l1[0]}))
import tensorflow as tf import os def perceptron(weights, inputs, biases, activation): nodes = weights * inputs + biases return activation(nodes) def step(nodes): return tf.ceil(tf.clip_by_value(nodes, 0, 1)) x = tf.placeholder(tf.float32, shape=(3)) W1 = tf.Variable([[2, 2, 2], [-2 / 3, -2 / 3, -2 / 3], [2 / 5, 2 / 5, 2 / 5]], dtype=tf.float32) b1 = tf.Variable([0.0, 0.0, 0.0], dtype=tf.float32) W2 = tf.Variable([1 / 2, 1 / 2, 1], dtype=tf.float32) b2 = tf.Variable(0.0, dtype=tf.float32) layer1 = perceptron(W1, x, b1, step) layer2 = perceptron(W2, my_layer1, b2, step) session = tf.Session() init = tf.global_variables_initializer() session.run(init) l1= session.run(my_layer1, {x: [0.0, 1.0, 0.0]}) print(l1) print(session.run(layer2, {x:l1[0]}))

-- Oct 25 In-Class Exercise Thread
import tensorflow as tf
def perceptron(weights, inputs, biases, activation):
    nodes = tf.add(tf.matmul(weights,inputs), biases)
    return activation(nodes)
def step(nodes):
    return tf.ceil(tf.clip_by_value(nodes, 0, 1))
uniform_init = tf.random_uniform_initializer(0, 1)
  1. x = tf.Variable([1,0,0], dtype=tf.float32) x = tf.placeholder(tf.float32, shape=(3,1))
W1 = tf.Variable( ([[2,2,2], [-2/3, -2/3, -2/3], [1/2.5, 1/2.5, 1/2.5]]), dtype=tf.float32) b1 = tf.Variable([[-1],[1],[-1]], dtype=tf.float32)
W2 = tf.Variable( (1/2, 1/2, 1 ), dtype=tf.float32) b2 = tf.Variable([-1], dtype=tf.float32)
  1. G1 checks x+y+z > 1/2
  2. W1 = tf.Variable([2,2,2], dtype=tf.float32)
  1. G2 checks x+y+z < 3/2
  2. W2 = tf.Variable([-2/3,-2/3,-2/3], dtype=tf.float32)
  1. G2 checks x+y+z > 2.5
  2. W3 = tf.Variable([1/2.5,1/2.5,1/2.5], dtype=tf.float32)
  1. b1 = tf.placeholder(tf.float32, shape=(3,3))
  1. W1 = tf.get_variable("W1", shape=[2, 2], initializer=uniform_init)
  2. b1 = tf.get_variable("b1", initializer=tf.zeros_initializer)
my_layer1 = perceptron(W1, x, b1, step)
session = tf.Session() init = tf.global_variables_initializer()
  1. l1_out = tf.placeholder(tf.float32)
  1. OutG1+OutG2+2OutG3≥2
my_layer2 = perceptron(W2, my_layer1,b2, step)
print(session.run(init))
print(session.run(my_layer1, {x: [[0], [1], [1]]}))
print(session.run(my_layer2, {x: [[0], [1], [1]]})) print(session.run(my_layer2, {x: [[0], [1], [1]]}))
  1. print(session.run(my_layer2, {x: [[1], [1], [1]]}))
  2. print(session.run(my_layer2, {x: [[1], [0], [1]]}))
  3. print(session.run(my_layer2, {x: [[0], [0], [1]]}))
  4. print(session.run(my_layer2, {x: [[0], [0], [0]]}))
import tensorflow as tf def perceptron(weights, inputs, biases, activation): nodes = tf.add(tf.matmul(weights,inputs), biases) return activation(nodes) def step(nodes): return tf.ceil(tf.clip_by_value(nodes, 0, 1)) uniform_init = tf.random_uniform_initializer(0, 1) # x = tf.Variable([1,0,0], dtype=tf.float32) x = tf.placeholder(tf.float32, shape=(3,1)) W1 = tf.Variable( ([[2,2,2], [-2/3, -2/3, -2/3], [1/2.5, 1/2.5, 1/2.5]]), dtype=tf.float32) b1 = tf.Variable([[-1],[1],[-1]], dtype=tf.float32) W2 = tf.Variable( ([[1/2, 1/2, 1]]), dtype=tf.float32) b2 = tf.Variable([-1], dtype=tf.float32) # G1 checks x+y+z > 1/2 # W1 = tf.Variable([2,2,2], dtype=tf.float32) # G2 checks x+y+z < 3/2 # W2 = tf.Variable([-2/3,-2/3,-2/3], dtype=tf.float32) # G2 checks x+y+z > 2.5 # W3 = tf.Variable([1/2.5,1/2.5,1/2.5], dtype=tf.float32) # b1 = tf.placeholder(tf.float32, shape=(3,3)) # W1 = tf.get_variable("W1", shape=[2, 2], initializer=uniform_init) # b1 = tf.get_variable("b1", initializer=tf.zeros_initializer) my_layer1 = perceptron(W1, x, b1, step) session = tf.Session() init = tf.global_variables_initializer() # l1_out = tf.placeholder(tf.float32) #OutG1+OutG2+2OutG3≥2 my_layer2 = perceptron(W2, my_layer1,b2, step) print(session.run(init)) print(session.run(my_layer1, {x: [[0], [1], [1]]})) print(session.run(my_layer2, {x: [[0], [1], [1]]})) print(session.run(my_layer2, {x: [[0], [1], [1]]})) # print(session.run(my_layer2, {x: [[1], [1], [1]]})) # print(session.run(my_layer2, {x: [[1], [0], [1]]})) # print(session.run(my_layer2, {x: [[0], [0], [1]]})) # print(session.run(my_layer2, {x: [[0], [0], [0]]})) #

-- Oct 25 In-Class Exercise Thread
import tensorflow as tf
import os
def perceptron(weights, inputs, biases, activation):
    nodes = weights * inputs + biases
    return activation(nodes)
def step(nodes):
    return tf.ceil(tf.clip_by_value(nodes, 0, 1))
x = tf.placeholder(tf.float32, shape=(3))
W1 = tf.Variable([[2, 2, 2], [-2 / 3, -2 / 3, -2 / 3], [2 / 5, 2 / 5, 2 / 5]], dtype=tf.float32)
b1 = tf.Variable([0.0, 0.0, 0.0], dtype=tf.float32)
W2 = tf.Variable([1 / 2, 1 / 2, 1], dtype=tf.float32) b2 = tf.Variable(0.0, dtype=tf.float32)
layer1 = perceptron(W1, x, b1, step)
layer2 = perceptron(W2, my_layer1, b2, step)
session = tf.Session()
init = tf.global_variables_initializer()
session.run(init)
l1= session.run(my_layer1, {x: [0.0, 1.0, 0.0]})
print(l1) print(session.run(layer2, {x:l1[0]}))
(Edited: 2017-10-25)
import tensorflow as tf import os def perceptron(weights, inputs, biases, activation): nodes = weights * inputs + biases return activation(nodes) def step(nodes): return tf.ceil(tf.clip_by_value(nodes, 0, 1)) x = tf.placeholder(tf.float32, shape=(3)) W1 = tf.Variable([[2, 2, 2], [-2 / 3, -2 / 3, -2 / 3], [2 / 5, 2 / 5, 2 / 5]], dtype=tf.float32) b1 = tf.Variable([0.0, 0.0, 0.0], dtype=tf.float32) W2 = tf.Variable([1 / 2, 1 / 2, 1], dtype=tf.float32) b2 = tf.Variable(0.0, dtype=tf.float32) layer1 = perceptron(W1, x, b1, step) layer2 = perceptron(W2, my_layer1, b2, step) session = tf.Session() init = tf.global_variables_initializer() session.run(init) l1= session.run(my_layer1, {x: [0.0, 1.0, 0.0]}) print(l1) print(session.run(layer2, {x:l1[0]}))

-- Oct 25 In-Class Exercise Thread
  • import tensorflow as tf
  • def perceptron(weights, inputs, activation):
     *   nodes = weights * inputs
      *  return activation(nodes)
    
  • def step(nodes):
     *   return tf.ceil(tf.clip_by_value(nodes, 0, 1))
    
  • x = tf.placeholder(tf.float32, shape=(3))
  • W1 = tf.constant([[2,2,2],[-2/3,-2/3,-2/3],[1/2.5,1/2.5,1/25]],tf.float32)
  • my_layer1 = perceptron(W1, x, step)
  • W2 = tf.constant([1/2,1/2,1],tf.float32)
  • my_layer2 = perceptron(W2, my_layer1, step)
  • session = tf.Session()
  • init = tf.global_variables_initializer()
  • session.run(init)
  • session.run(my_layer1, {x:[1,0,1]})
  • print session.run(my_layer2, {x:[1,0,1]})
(Edited: 2017-10-25)
*import tensorflow as tf *def perceptron(weights, inputs, activation): * nodes = weights * inputs * return activation(nodes) *def step(nodes): * return tf.ceil(tf.clip_by_value(nodes, 0, 1)) *x = tf.placeholder(tf.float32, shape=(3)) *W1 = tf.constant([[2,2,2],[-2/3,-2/3,-2/3],[1/2.5,1/2.5,1/25]],tf.float32) *my_layer1 = perceptron(W1, x, step) *W2 = tf.constant([1/2,1/2,1],tf.float32) *my_layer2 = perceptron(W2, my_layer1, step) *session = tf.Session() *init = tf.global_variables_initializer() *session.run(init) *session.run(my_layer1, {x:[1,0,1]}) *print session.run(my_layer2, {x:[1,0,1]})

-- Oct 25 In-Class Exercise Thread
Did not install tensorflow before class. Hence, discussed with my classmate and wrote this:
 import tensorflow as tf import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' def perceptron(weights, inputs, biases, activation):
    #print(tf.shape(weights))
    #print(tf.shape(inputs))
    nodes = tf.matmul(weights, inputs) + biases
    return activation(nodes)
def step(nodes):
    return tf.ceil(tf.clip_by_value(nodes, 0, 1))
x = tf.placeholder(tf.float32, shape=(3, 1)) W1 = tf.Variable([[2, 2, 2], [-2 / 3, -2 / 3, -2 / 3], [2 / 5, 2 / 5, 2 / 5]], dtype=tf.float32) b1 = tf.Variable([[-1], [1], [-1]], dtype=tf.float32) W2 = tf.Variable(1/2, 1/2, 1 , dtype=tf.float32) b2 = tf.Variable(-1, dtype=tf.float32) [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] my_layer1 = perceptron(W1, x, b1, step) my_layer2 = perceptron(W2, my_layer1, b2, step) session = tf.Session() init = tf.global_variables_initializer() session.run(init) l1 = session.run(my_layer1, {x: [[1.0], [1.0], [1.0]]}) print(tf.shape(l1)) print("L1 layer") print(l1) print("L2 layer") print(session.run(my_layer2, {x: l1})) outputs array([[ 1., 1.], [ 1., 1.]], dtype=float32)
Did not install tensorflow before class. Hence, discussed with my classmate and wrote this: import tensorflow as tf import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' def perceptron(weights, inputs, biases, activation): #print(tf.shape(weights)) #print(tf.shape(inputs)) nodes = tf.matmul(weights, inputs) + biases return activation(nodes) def step(nodes): return tf.ceil(tf.clip_by_value(nodes, 0, 1)) x = tf.placeholder(tf.float32, shape=(3, 1)) W1 = tf.Variable([[2, 2, 2], [-2 / 3, -2 / 3, -2 / 3], [2 / 5, 2 / 5, 2 / 5]], dtype=tf.float32) b1 = tf.Variable([[-1], [1], [-1]], dtype=tf.float32) W2 = tf.Variable(1/2, 1/2, 1 , dtype=tf.float32) b2 = tf.Variable(-1, dtype=tf.float32) [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] my_layer1 = perceptron(W1, x, b1, step) my_layer2 = perceptron(W2, my_layer1, b2, step) session = tf.Session() init = tf.global_variables_initializer() session.run(init) l1 = session.run(my_layer1, {x: [[1.0], [1.0], [1.0]]}) print(tf.shape(l1)) print("L1 layer") print(l1) print("L2 layer") print(session.run(my_layer2, {x: l1})) outputs array([[ 1., 1.], [ 1., 1.]], dtype=float32)

-- Oct 25 In-Class Exercise Thread
import tensorflow as tf
def perceptron(p, activation):
    node = tf.reduce_sum(p)
    return activation(node)
point = tf.placeholder(tf.float32, shape=(None, 3))
def g1(node):
    return tf.cond(
        tf.greater_equal(tf.cast(node, tf.float32), tf.constant(0.5)),
        lambda: 1, lambda: 0)
def g2(node):
    return tf.cond(
        tf.greater_equal(tf.cast(node, tf.float32), tf.constant(1.5)),
        lambda: 1, lambda: 0)
def g3(node):
    return tf.cond(
        tf.greater_equal(tf.cast(node, tf.float32), tf.constant(2.5)),
        lambda: 1, lambda: 0)
def g4(node):
    return tf.cond(
        tf.greater_equal(tf.cast(node, tf.float32), tf.constant(2.0)),
        lambda: 1, lambda: 0)
p1 = perceptron(point, g1) p2 = perceptron(point, g2) p3 = perceptron(point, g3) p4 = perceptron([p1, p2, p3], g4)
input_data = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0],
              [0.0, 1.0, 1.0],
              [1.0, 0.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0],
              [1.0, 1.0, 1.0]]
init = tf.global_variables_initializer() session = tf.Session() session.run(init) result = session.run(p4, feed_dict={point: input_data}) print 'Result', result session.close()
(Edited: 2017-10-26)
import tensorflow as tf def perceptron(p, activation): node = tf.reduce_sum(p) return activation(node) point = tf.placeholder(tf.float32, shape=(None, 3)) def g1(node): return tf.cond( tf.greater_equal(tf.cast(node, tf.float32), tf.constant(0.5)), lambda: 1, lambda: 0) def g2(node): return tf.cond( tf.greater_equal(tf.cast(node, tf.float32), tf.constant(1.5)), lambda: 1, lambda: 0) def g3(node): return tf.cond( tf.greater_equal(tf.cast(node, tf.float32), tf.constant(2.5)), lambda: 1, lambda: 0) def g4(node): return tf.cond( tf.greater_equal(tf.cast(node, tf.float32), tf.constant(2.0)), lambda: 1, lambda: 0) p1 = perceptron(point, g1) p2 = perceptron(point, g2) p3 = perceptron(point, g3) p4 = perceptron([p1, p2, p3], g4) input_data = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, 1.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0], [1.0, 1.0, 1.0]] init = tf.global_variables_initializer() session = tf.Session() session.run(init) result = session.run(p4, feed_dict={point: input_data}) print 'Result', result session.close()
[ Next ]
X