Skip to content

Commit 07da067

Browse files
author
Lazy Programmer
committed
Merge branch 'master' of github.com:lazyprogrammer/machine_learning_examples
2 parents 98a798a + 99bbada commit 07da067

24 files changed

+334
-294
lines changed

ann_class/tf_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def forward(X, W1, b1, W2, b2):
5858
logits = forward(tfX, W1, b1, W2, b2)
5959

6060
cost = tf.reduce_mean(
61-
tf.nn.softmax_cross_entropy_with_logits(
61+
tf.nn.softmax_cross_entropy_with_logits_v2(
6262
labels=tfY,
6363
logits=logits
6464
)

ann_class2/adam.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,9 @@ def main():
1919
max_iter = 10
2020
print_period = 10
2121

22-
X, Y = get_normalized_data()
22+
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
2323
reg = 0.01
2424

25-
Xtrain = X[:-1000,]
26-
Ytrain = Y[:-1000]
27-
Xtest = X[-1000:,]
28-
Ytest = Y[-1000:]
2925
Ytrain_ind = y2indicator(Ytrain)
3026
Ytest_ind = y2indicator(Ytest)
3127

ann_class2/batch_norm_tf.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -183,11 +183,7 @@ def predict(self, X):
183183

184184
def main():
185185
# step 1: get the data and define all the usual variables
186-
X, Y = get_normalized_data()
187-
# Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.03)
188-
X, Y = shuffle(X, Y)
189-
Xtrain, Ytrain = X[:-1000], Y[:-1000]
190-
Xtest, Ytest = X[-1000:], Y[-1000:]
186+
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
191187

192188
ann = ANN([500, 300])
193189

ann_class2/batch_norm_theano.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -202,11 +202,7 @@ def score(self, X, Y):
202202

203203
def main():
204204
# step 1: get the data and define all the usual variables
205-
X, Y = get_normalized_data()
206-
# Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.3)
207-
X, Y = shuffle(X, Y)
208-
Xtrain, Ytrain = X[:-1000], Y[:-1000]
209-
Xtest, Ytest = X[-1000:], Y[-1000:]
205+
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
210206

211207
ann = ANN([500, 300])
212208
ann.fit(Xtrain, Ytrain, Xtest, Ytest, show_fig=True)

ann_class2/cntk_example.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -37,22 +37,21 @@
3737

3838

3939
# get the data, same as Theano + Tensorflow examples
40-
X, Y = get_normalized_data()
40+
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
4141

4242
# get shapes
43-
N, D = X.shape
44-
K = len(set(Y))
43+
N, D = Xtrain.shape
44+
K = len(set(Ytrain))
4545

4646
# we want one-hot encoded labels
47-
Y = y2indicator(Y)
47+
Ytrain = y2indicator(Ytrain)
48+
Ytest = y2indicator(Ytest)
4849

4950
# split the data
5051
X = X.astype(np.float32)
5152
Y = Y.astype(np.float32)
52-
Xtrain = X[:-1000,]
53-
Ytrain = Y[:-1000]
54-
Xtest = X[-1000:,]
55-
Ytest = Y[-1000:]
53+
Xtest = Xtest.astype(np.float32)
54+
Ytest = Ytest.astype(np.float32)
5655

5756

5857
# the model will be a sequence of layers

ann_class2/dropout_tensorflow.py

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -33,16 +33,11 @@ def __init__(self, hidden_layer_sizes, p_keep):
3333
self.hidden_layer_sizes = hidden_layer_sizes
3434
self.dropout_rates = p_keep
3535

36-
def fit(self, X, Y, lr=1e-4, mu=0.9, decay=0.9, epochs=15, batch_sz=100, split=True, print_every=20):
37-
# make a validation set
38-
X, Y = shuffle(X, Y)
36+
def fit(self, X, Y, Xvalid, Yvalid, lr=1e-4, mu=0.9, decay=0.9, epochs=15, batch_sz=100, print_every=50):
3937
X = X.astype(np.float32)
4038
Y = Y.astype(np.int64)
41-
if split:
42-
Xvalid, Yvalid = X[-1000:], Y[-1000:]
43-
X, Y = X[:-1000], Y[:-1000]
44-
else:
45-
Xvalid, Yvalid = X, Y
39+
Xvalid = Xvalid.astype(np.float32)
40+
Yvalid = Yvalid.astype(np.int64)
4641

4742
# initialize hidden layers
4843
N, D = X.shape
@@ -143,10 +138,10 @@ def relu(a):
143138

144139
def main():
145140
# step 1: get the data and define all the usual variables
146-
X, Y = get_normalized_data()
141+
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
147142

148143
ann = ANN([500, 300], [0.8, 0.5, 0.5])
149-
ann.fit(X, Y)
144+
ann.fit(Xtrain, Ytrain, Xtest, Ytest)
150145

151146

152147
if __name__ == '__main__':

ann_class2/dropout_theano.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -39,13 +39,11 @@ def __init__(self, hidden_layer_sizes, p_keep):
3939
self.hidden_layer_sizes = hidden_layer_sizes
4040
self.dropout_rates = p_keep
4141

42-
def fit(self, X, Y, learning_rate=1e-4, mu=0.9, decay=0.9, epochs=8, batch_sz=100, show_fig=False):
43-
# make a validation set
44-
X, Y = shuffle(X, Y)
42+
def fit(self, X, Y, Xvalid, Yvalid, learning_rate=1e-4, mu=0.9, decay=0.9, epochs=8, batch_sz=100, show_fig=False):
4543
X = X.astype(np.float32)
4644
Y = Y.astype(np.int32)
47-
Xvalid, Yvalid = X[-1000:], Y[-1000:]
48-
X, Y = X[:-1000], Y[:-1000]
45+
Xvalid = Xvalid.astype(np.float32)
46+
Yvalid = Yvalid.astype(np.int32)
4947

5048
self.rng = RandomStreams()
5149

@@ -125,7 +123,7 @@ def fit(self, X, Y, learning_rate=1e-4, mu=0.9, decay=0.9, epochs=8, batch_sz=10
125123

126124
train_op(Xbatch, Ybatch)
127125

128-
if j % 20 == 0:
126+
if j % 50 == 0:
129127
c, p = cost_predict_op(Xvalid, Yvalid)
130128
costs.append(c)
131129
e = error_rate(Yvalid, p)
@@ -166,10 +164,10 @@ def relu(a):
166164

167165
def main():
168166
# step 1: get the data and define all the usual variables
169-
X, Y = get_normalized_data()
167+
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
170168

171169
ann = ANN([500, 300], [0.8, 0.5, 0.5])
172-
ann.fit(X, Y, show_fig=True)
170+
ann.fit(Xtrain, Ytrain, Xtest, Ytest, show_fig=True)
173171

174172

175173
if __name__ == '__main__':

ann_class2/keras_example.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,17 +19,18 @@
1919

2020
# get the data, same as Theano + Tensorflow examples
2121
# no need to split now, the fit() function will do it
22-
X, Y = get_normalized_data()
22+
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
2323

2424
# get shapes
25-
N, D = X.shape
26-
K = len(set(Y))
25+
N, D = Xtrain.shape
26+
K = len(set(Ytrain))
2727

2828
# by default Keras wants one-hot encoded labels
2929
# there's another cost function we can use
3030
# where we can just pass in the integer labels directly
3131
# just like Tensorflow / Theano
32-
Y = y2indicator(Y)
32+
Ytrain = y2indicator(Ytrain)
33+
Ytest = y2indicator(Ytest)
3334

3435

3536
# the model will be a sequence of layers
@@ -60,7 +61,7 @@
6061

6162

6263
# gives us back a <keras.callbacks.History object at 0x112e61a90>
63-
r = model.fit(X, Y, validation_split=0.33, epochs=15, batch_size=32)
64+
r = model.fit(Xtrain, Ytrain, validation_data=(Xtest, Ytest), epochs=15, batch_size=32)
6465
print("Returned:", r)
6566

6667
# print the available keys

ann_class2/momentum.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,16 +26,12 @@ def main():
2626
# 3. batch SGD with Nesterov momentum
2727

2828
max_iter = 20 # make it 30 for sigmoid
29-
print_period = 10
29+
print_period = 50
3030

31-
X, Y = get_normalized_data()
31+
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
3232
lr = 0.00004
3333
reg = 0.01
3434

35-
Xtrain = X[:-1000,]
36-
Ytrain = Y[:-1000]
37-
Xtest = X[-1000:,]
38-
Ytest = Y[-1000:]
3935
Ytrain_ind = y2indicator(Ytrain)
4036
Ytest_ind = y2indicator(Ytest)
4137

ann_class2/mxnet_example.py

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -27,18 +27,11 @@
2727

2828
# get the data, same as Theano + Tensorflow examples
2929
# no need to split now, the fit() function will do it
30-
X, Y = get_normalized_data()
30+
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
3131

3232
# get shapes
33-
N, D = X.shape
34-
K = len(set(Y))
35-
36-
# split the data
37-
Xtrain = X[:-1000,]
38-
Ytrain = Y[:-1000]
39-
Xtest = X[-1000:,]
40-
Ytest = Y[-1000:]
41-
33+
N, D = Xtrain.shape
34+
K = len(set(Ytrain))
4235

4336
# training config
4437
batch_size = 32

0 commit comments

Comments
 (0)