[ Prev ]
2017-11-01

-- Oct 25 In-Class Exercise Thread
import tensorflow as tf import numpy as np
def perceptron(weights, inputs, biases, activation):
    nodes = tf.matmul(inputs, weights) + biases
    return activation(nodes)
def step(nodes):
    return tf.ceil(tf.clip_by_value(nodes, 0, 1))
x = tf.placeholder(tf.float32, shape=(3)) uniform_init = tf.random_uniform_initializer(0, 1)
  1. G1: x+y+z >= 1/2; G2: x+y+z <= 3/2; G3: x+y+z >= 2.5
  2. G1: -x-y-z+0.5 <=0 ; G2: x+y+z-1.5 <=0; -x-y-z+2.5 <=0
  3. W1 = tf.get_variable("W1", shape=[3,3], initializer = [[-1,-1,-1], [1,1,1], [-1,-1,-1]])
weights_1 = [[-1,1,-1], [-1,1,-1], [-1,1,-1]] w_1_init = tf.constant_initializer(weights_1) bias_1 = [0.5,-1.5,2.5] b_1_init = tf.constant_initializer(bias_1)
W1 = tf.get_variable("W1", shape=[3,3], initializer = w_1_init) b1 = tf.get_variable("b1", shape=[3], initializer = b_1_init) my_layer1 = perceptron(W1, x, b1, step)
  1. G4: out_G1 + out_G2 + 2*out_G3 >= 2
  2. G4: -out_G1 - out_G2 - 2*out_G3 +2 <= 0 weights_2 = [[-1],[-1],[-2]] w_2_init = tf.constant_initializer(weights_2) bias_2 = [2] b_2_init = tf.constant_initializer(bias_2)
W2 = tf.get_variable("W2", shape=[3,1], initializer = w_2_init) b2 = tf.get_variable("b2", shape=[1,1], initializer = b_2_init) my_layer2 = perceptron(W2, my_layer1, b2, step)
session = tf.Session() init = tf.global_variables_initializer() session.run(init) out = session.run(my_layer1, {x:1 ,[1],[1]})
print (out)
import tensorflow as tf import numpy as np def perceptron(weights, inputs, biases, activation): nodes = tf.matmul(inputs, weights) + biases return activation(nodes) def step(nodes): return tf.ceil(tf.clip_by_value(nodes, 0, 1)) x = tf.placeholder(tf.float32, shape=(3)) uniform_init = tf.random_uniform_initializer(0, 1) # G1: x+y+z >= 1/2; G2: x+y+z <= 3/2; G3: x+y+z >= 2.5 # G1: -x-y-z+0.5 <=0 ; G2: x+y+z-1.5 <=0; -x-y-z+2.5 <=0 #W1 = tf.get_variable("W1", shape=[3,3], initializer = [[-1,-1,-1], [1,1,1], [-1,-1,-1]]) weights_1 = [[-1,1,-1], [-1,1,-1], [-1,1,-1]] w_1_init = tf.constant_initializer(weights_1) bias_1 = [0.5,-1.5,2.5] b_1_init = tf.constant_initializer(bias_1) W1 = tf.get_variable("W1", shape=[3,3], initializer = w_1_init) b1 = tf.get_variable("b1", shape=[3], initializer = b_1_init) my_layer1 = perceptron(W1, x, b1, step) # G4: out_G1 + out_G2 + 2*out_G3 >= 2 # G4: -out_G1 - out_G2 - 2*out_G3 +2 <= 0 weights_2 = [[-1],[-1],[-2]] w_2_init = tf.constant_initializer(weights_2) bias_2 = [2] b_2_init = tf.constant_initializer(bias_2) W2 = tf.get_variable("W2", shape=[3,1], initializer = w_2_init) b2 = tf.get_variable("b2", shape=[1,1], initializer = b_2_init) my_layer2 = perceptron(W2, my_layer1, b2, step) session = tf.Session() init = tf.global_variables_initializer() session.run(init) out = session.run(my_layer1, {x:[[1]],[1],[1]}) print (out)

-- Oct 25 In-Class Exercise Thread
```import tensorflow as tf
def perceptron(weights, inputs, biases, activation):
    nodes = tf.reduce_sum(weights * inputs, 1) + biases
    return activation(nodes)
def stepfunction(nodes):
    return tf.ceil(tf.clip_by_value(nodes, 0, 1))
x = tf.placeholder(tf.float32, shape=3)
W = tf.constant([
    [1.0, 1.0, 1.0],
    [-1.0, -1.0, -1.0],
    [1.0, 1.0, 1.0]
])
B = tf.constant([
    -0.5,
    1.5,
    -2.5
])
layer_1 = perceptron(W, x, B, stepfunction) W_2 = tf.constant([
    [1.0, 1.0, 2.0]
])
B_2 = tf.constant([
    -1.99
])
layer_2 = perceptron(W_2, layer_1, B_2, stepfunction) session = tf.Session() init = tf.global_variables_initializer() session.run(init)
print session.run(layer_2, {x: [0, 1, 1]})```
(Edited: 2017-11-01)
@BT@@BT@@BT@import tensorflow as tf def perceptron(weights, inputs, biases, activation): nodes = tf.reduce_sum(weights * inputs, 1) + biases return activation(nodes) def stepfunction(nodes): return tf.ceil(tf.clip_by_value(nodes, 0, 1)) x = tf.placeholder(tf.float32, shape=3) W = tf.constant([ [1.0, 1.0, 1.0], [-1.0, -1.0, -1.0], [1.0, 1.0, 1.0] ]) B = tf.constant([ -0.5, 1.5, -2.5 ]) layer_1 = perceptron(W, x, B, stepfunction) W_2 = tf.constant([ [1.0, 1.0, 2.0] ]) B_2 = tf.constant([ -1.99 ]) layer_2 = perceptron(W_2, layer_1, B_2, stepfunction) session = tf.Session() init = tf.global_variables_initializer() session.run(init) print session.run(layer_2, {x: [0, 1, 1]})@BT@@BT@@BT@
2017-12-14

-- Oct 25 In-Class Exercise Thread
import tensorflow as tf import os def perceptron(weights, inputs, biases, activation):
    nodes = weights * inputs + biases
    return activation(nodes)
def step(nodes):
    return tf.ceil(tf.clip_by_value(nodes, 0, 1))
x = tf.placeholder(tf.float32, shape=(3)) W1 = tf.Variable([[2, 2, 2], [-2 / 3, -2 / 3, -2 / 3], [2 / 5, 2 / 5, 2 / 5]], dtype=tf.float32) b1 = tf.Variable([0.0, 0.0, 0.0], dtype=tf.float32) W2 = tf.Variable([1 / 2, 1 / 2, 1], dtype=tf.float32) b2 = tf.Variable(0.0, dtype=tf.float32) layer1 = perceptron(W1, x, b1, step) layer2 = perceptron(W2, my_layer1, b2, step) session = tf.Session() init = tf.global_variables_initializer() session.run(init) l1= session.run(my_layer1, {x: [0.0, 1.0, 0.0]}) print(l1) print(session.run(layer2, {x:l1[0]}))
import tensorflow as tf import os def perceptron(weights, inputs, biases, activation): nodes = weights * inputs + biases return activation(nodes) def step(nodes): return tf.ceil(tf.clip_by_value(nodes, 0, 1)) x = tf.placeholder(tf.float32, shape=(3)) W1 = tf.Variable([[2, 2, 2], [-2 / 3, -2 / 3, -2 / 3], [2 / 5, 2 / 5, 2 / 5]], dtype=tf.float32) b1 = tf.Variable([0.0, 0.0, 0.0], dtype=tf.float32) W2 = tf.Variable([1 / 2, 1 / 2, 1], dtype=tf.float32) b2 = tf.Variable(0.0, dtype=tf.float32) layer1 = perceptron(W1, x, b1, step) layer2 = perceptron(W2, my_layer1, b2, step) session = tf.Session() init = tf.global_variables_initializer() session.run(init) l1= session.run(my_layer1, {x: [0.0, 1.0, 0.0]}) print(l1) print(session.run(layer2, {x:l1[0]}))
X