Skip to content

Commit 2067d12

Browse files
Cleaned up imports
1 parent 5f7d7f4 commit 2067d12

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+132
-395
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -252,7 +252,7 @@ but rather to present the inner workings of them in a transparent way.
252252
- [Deep Q-Network](mlfromscratch/reinforcement_learning/deep_q_network.py)
253253

254254
### Deep Learning
255-
+ [Base Class](mlfromscratch/deep_learning/neural_network.py)
255+
+ [Neural Network](mlfromscratch/deep_learning/neural_network.py)
256256
+ [Layers](mlfromscratch/deep_learning/layers.py)
257257
* Activation Layer
258258
* Average Pooling Layer

mlfromscratch/deep_learning/activation_functions.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import numpy as np
2-
import sys
32

43
# Collection of activation functions
54
# Reference: https://en.wikipedia.org/wiki/Activation_function
@@ -31,7 +30,6 @@ def function(self, x):
3130
return 2 / (1 + np.exp(-2*x)) - 1
3231

3332
def gradient(self, x):
34-
# Avoid overflow for large inputs
3533
return 1 - np.power(self.function(x), 2)
3634

3735
class ReLU():
@@ -64,8 +62,8 @@ def gradient(self, x):
6462
return np.where(x >= 0.0, 1, self.function(x) + self.alpha)
6563

6664
class SELU():
67-
# Reference : https://arxiv.org/abs/1706.02515,
68-
# https://github.com/bioinf-jku/SNNs/blob/master/SelfNormalizingNetworks_MLP_MNIST.ipynb
65+
# Reference : https://arxiv.org/abs/1706.02515,
66+
# https://github.com/bioinf-jku/SNNs/blob/master/SelfNormalizingNetworks_MLP_MNIST.ipynb
6967
def __init__(self):
7068
self.alpha = 1.6732632423543772848170429916717
7169
self.scale = 1.0507009873554804934193349852946
@@ -83,5 +81,5 @@ def function(self, x):
8381
return np.log(1 + np.exp(x))
8482

8583
def gradient(self, x):
86-
return Sigmoid().function(x)
84+
return 1 / (1 + np.exp(-x))
8785

mlfromscratch/deep_learning/layers.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,10 @@
11

22
from __future__ import print_function, division
3-
import sys
4-
import os
53
import math
64
import numpy as np
75
import copy
8-
from mlfromscratch.deep_learning.activation_functions import Sigmoid, ReLU, SoftPlus, LeakyReLU, TanH, ELU, SELU, Softmax
6+
from mlfromscratch.deep_learning.activation_functions import Sigmoid, ReLU, SoftPlus, LeakyReLU
7+
from mlfromscratch.deep_learning.activation_functions import TanH, ELU, SELU, Softmax
98

109

1110
class Layer(object):

mlfromscratch/deep_learning/loss_functions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from __future__ import division
22
import numpy as np
3-
from mlfromscratch.utils.data_operation import accuracy_score
3+
from mlfromscratch.utils import accuracy_score
44
from mlfromscratch.deep_learning.activation_functions import Sigmoid
55

66
class Loss(object):

mlfromscratch/deep_learning/neural_network.py

Lines changed: 12 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,8 @@
11
from __future__ import print_function
22
from terminaltables import AsciiTable
3-
import copy
43
import numpy as np
54
import progressbar
6-
7-
# Import helper functions
8-
from mlfromscratch.utils.data_manipulation import train_test_split, to_categorical, normalize
9-
from mlfromscratch.utils.data_manipulation import get_random_subsets, shuffle_data, batch_iterator
10-
from mlfromscratch.utils.data_operation import accuracy_score
5+
from mlfromscratch.utils import batch_iterator
116
from mlfromscratch.deep_learning.loss_functions import CrossEntropy
127
from mlfromscratch.utils.misc import bar_widgets
138

@@ -30,7 +25,7 @@ def __init__(self, optimizer, loss=CrossEntropy, validation_data=None):
3025
self.layers = []
3126
self.errors = {"training": [], "validation": []}
3227
self.loss_function = loss()
33-
28+
3429
self.validation_set = False
3530
if validation_data:
3631
self.validation_set = True
@@ -47,50 +42,41 @@ def add(self, layer):
4742
# to the output shape of the last added layer
4843
if self.layers:
4944
layer.set_input_shape(shape=self.layers[-1].output_shape())
50-
5145
# If the layer has weights that needs to be initialized
5246
if hasattr(layer, 'initialize'):
5347
layer.initialize(optimizer=self.optimizer)
54-
5548
# Add layer to the network
5649
self.layers.append(layer)
5750

5851
def train_on_batch(self, X, y):
59-
# Calculate output
6052
y_pred = self._forward_pass(X)
61-
# Calculate the training loss
53+
# Calculate the loss and accuracy of the prediction
6254
loss = np.mean(self.loss_function.loss(y, y_pred))
55+
acc = self.loss_function.acc(y, y_pred)
6356
# Calculate the gradient of the loss function wrt y_pred
6457
loss_grad = self.loss_function.gradient(y, y_pred)
65-
# Calculate the accuracy of the prediction
66-
acc = self.loss_function.acc(y, y_pred)
67-
# Backprop. Update weights
58+
# Backpropagate. Update weights
6859
self._backward_pass(loss_grad=loss_grad)
6960

7061
return loss, acc
7162

72-
7363
def fit(self, X, y, n_epochs, batch_size):
74-
7564
n_samples = np.shape(X)[0]
7665
n_batches = int(n_samples / batch_size)
7766

7867
bar = progressbar.ProgressBar(widgets=bar_widgets)
7968
for _ in bar(range(n_epochs)):
80-
idx = range(n_samples)
81-
np.random.shuffle(idx)
82-
83-
batch_t_error = 0 # Mean batch training error
69+
batch_error = 0
8470
for X_batch, y_batch in batch_iterator(X, y, batch_size=batch_size):
8571
loss, _ = self.train_on_batch(X_batch, y_batch)
86-
batch_t_error += loss
72+
batch_error += loss
73+
74+
self.errors["training"].append(batch_error / n_batches)
8775

88-
# Save the epoch mean error
89-
self.errors["training"].append(batch_t_error / n_batches)
9076
if self.validation_set:
9177
# Determine validation error
92-
y_val_p = self._forward_pass(self.X_val)
93-
validation_loss = np.mean(self.loss_function.loss(self.y_val, y_val_p))
78+
y_val_pred = self._forward_pass(self.X_val)
79+
validation_loss = np.mean(self.loss_function.loss(self.y_val, y_val_pred))
9480
self.errors["validation"].append(validation_loss)
9581

9682
return self.errors["training"], self.errors["validation"]
@@ -116,7 +102,7 @@ def summary(self, name="Model Summary"):
116102
print (AsciiTable([[name]]).table)
117103
# Network input shape (first layer's input shape)
118104
print ("Input Shape: %s" % str(self.layers[0].input_shape))
119-
# Get each layer's configuration
105+
# Iterate through network and get each layer's configuration
120106
table_data = [["Layer Type", "Parameters", "Output Shape"]]
121107
tot_params = 0
122108
for layer in self.layers:
@@ -125,10 +111,8 @@ def summary(self, name="Model Summary"):
125111
out_shape = layer.output_shape()
126112
table_data.append([layer_name, str(params), str(out_shape)])
127113
tot_params += params
128-
129114
# Print network configuration table
130115
print (AsciiTable(table_data).table)
131-
132116
print ("Total Parameters: %d\n" % tot_params)
133117

134118
def predict(self, X):

mlfromscratch/deep_learning/optimizers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import numpy as np
2-
from mlfromscratch.utils.data_manipulation import make_diagonal, normalize
2+
from mlfromscratch.utils import make_diagonal, normalize
33

44
# Optimizers for models that use gradient based methods for finding the
55
# weights that minimizes the loss.

mlfromscratch/examples/convolutional_neural_network.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,12 @@
77

88
# Import helper functions
99
from mlfromscratch.deep_learning import NeuralNetwork
10-
from mlfromscratch.utils.data_manipulation import train_test_split, to_categorical, normalize
11-
from mlfromscratch.utils.data_manipulation import get_random_subsets, shuffle_data
10+
from mlfromscratch.utils import train_test_split, to_categorical, normalize
11+
from mlfromscratch.utils import get_random_subsets, shuffle_data, Plot
1212
from mlfromscratch.utils.data_operation import accuracy_score
1313
from mlfromscratch.deep_learning.optimizers import StochasticGradientDescent, Adam, RMSprop, Adagrad, Adadelta
1414
from mlfromscratch.deep_learning.loss_functions import CrossEntropy
1515
from mlfromscratch.utils.misc import bar_widgets
16-
from mlfromscratch.utils import Plot
1716
from mlfromscratch.deep_learning.layers import Dense, Dropout, Conv2D, Flatten, Activation, MaxPooling2D
1817
from mlfromscratch.deep_learning.layers import AveragePooling2D, ZeroPadding2D, BatchNormalization, RNN
1918

@@ -25,7 +24,7 @@ def main():
2524
# Conv Net
2625
#----------
2726

28-
optimizer = Adadelta()
27+
optimizer = Adam()
2928

3029
data = datasets.load_digits()
3130
X = data.data
@@ -62,7 +61,6 @@ def main():
6261
clf.add(BatchNormalization())
6362
clf.add(Dense(10))
6463
clf.add(Activation('softmax'))
65-
6664
print ()
6765
clf.summary(name="ConvNet")
6866

mlfromscratch/examples/decision_tree_classifier.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,8 @@
66
import os
77

88
# Import helper functions
9-
from mlfromscratch.utils.data_manipulation import train_test_split, standardize
10-
from mlfromscratch.utils.data_operation import accuracy_score
11-
from mlfromscratch.utils.data_operation import mean_squared_error, calculate_variance
12-
from mlfromscratch.utils import Plot
9+
from mlfromscratch.utils import train_test_split, standardize, accuracy_score
10+
from mlfromscratch.utils import mean_squared_error, calculate_variance, Plot
1311
from mlfromscratch.supervised_learning import ClassificationTree
1412

1513
def main():

mlfromscratch/examples/decision_tree_regressor.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,10 @@
11
from __future__ import division, print_function
22
import numpy as np
3-
from sklearn import datasets
43
import matplotlib.pyplot as plt
54
import pandas as pd
6-
import sys
7-
import os
8-
9-
# Import helper functions
10-
from mlfromscratch.utils.data_manipulation import train_test_split, standardize
11-
from mlfromscratch.utils.data_operation import accuracy_score
12-
from mlfromscratch.utils.data_operation import mean_squared_error, calculate_variance
13-
from mlfromscratch.utils import Plot
5+
6+
from mlfromscratch.utils import train_test_split, standardize, accuracy_score
7+
from mlfromscratch.utils import mean_squared_error, calculate_variance, Plot
148
from mlfromscratch.supervised_learning import RegressionTree
159

1610
def main():

mlfromscratch/examples/deep_q_network.py

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,6 @@
11
from __future__ import print_function
2-
import sys
3-
import os
4-
import math
5-
import random
62
import numpy as np
7-
import progressbar
8-
import gym
9-
from collections import deque
10-
11-
from mlfromscratch.utils.data_manipulation import to_categorical
3+
from mlfromscratch.utils import to_categorical
124
from mlfromscratch.deep_learning.optimizers import Adam
135
from mlfromscratch.deep_learning.loss_functions import SquareLoss
146
from mlfromscratch.deep_learning.layers import Dense, Dropout, Flatten, Activation, Reshape, BatchNormalization

mlfromscratch/examples/demo.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,11 @@
11
from __future__ import print_function
2-
import sys, os
32
from sklearn import datasets
43
import numpy as np
5-
import pandas as pd
4+
import math
65
import matplotlib.pyplot as plt
76

8-
from mlfromscratch.utils.data_manipulation import train_test_split, normalize, to_categorical
9-
from mlfromscratch.utils.data_operation import accuracy_score
10-
from mlfromscratch.deep_learning.optimizers import GradientDescent, Adam
7+
from mlfromscratch.utils import train_test_split, normalize, to_categorical, accuracy_score
8+
from mlfromscratch.deep_learning.optimizers import Adam
119
from mlfromscratch.deep_learning.loss_functions import CrossEntropy
1210
from mlfromscratch.deep_learning.activation_functions import Softmax
1311
from mlfromscratch.utils.kernels import *

mlfromscratch/examples/gradient_boosting_classifier.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,11 @@
11
from __future__ import division, print_function
22
import numpy as np
33
from sklearn import datasets
4-
import sys
5-
import os
64
import matplotlib.pyplot as plt
75

86
# Import helper functions
9-
from mlfromscratch.utils.data_manipulation import train_test_split
10-
from mlfromscratch.utils.data_operation import accuracy_score
11-
from mlfromscratch.utils.loss_functions import CrossEntropy
7+
from mlfromscratch.utils import train_test_split, accuracy_score
8+
from mlfromscratch.deep_learning.loss_functions import CrossEntropy
129
from mlfromscratch.utils import Plot
1310
from mlfromscratch.supervised_learning import GradientBoostingClassifier
1411

mlfromscratch/examples/gradient_boosting_regressor.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,14 @@
11
from __future__ import division, print_function
22
import numpy as np
3-
from sklearn import datasets
43
import pandas as pd
54
import matplotlib.pyplot as plt
6-
from scipy.optimize import line_search
75
import progressbar
86

9-
# Import helper functions
10-
from mlfromscratch.utils.data_manipulation import train_test_split, standardize, to_categorical
11-
from mlfromscratch.utils.data_operation import mean_squared_error, accuracy_score
7+
from mlfromscratch.utils import train_test_split, standardize, to_categorical
8+
from mlfromscratch.utils import mean_squared_error, accuracy_score, Plot
129
from mlfromscratch.utils.loss_functions import SquareLoss
1310
from mlfromscratch.utils.misc import bar_widgets
1411
from mlfromscratch.supervised_learning import GradientBoostingRegressor
15-
from mlfromscratch.utils import Plot
1612

1713

1814
def main():

mlfromscratch/examples/k_means.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,4 @@
11
from __future__ import division, print_function
2-
import sys
3-
import os
4-
import math
5-
import random
62
from sklearn import datasets
73
import numpy as np
84

mlfromscratch/examples/k_nearest_neighbors.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,10 @@
11
from __future__ import print_function
2-
import sys
3-
import os
4-
import math
52
import numpy as np
63
import matplotlib.pyplot as plt
74
from sklearn import datasets
85

9-
# Import helper functions
10-
from mlfromscratch.utils.data_manipulation import train_test_split, normalize
11-
from mlfromscratch.utils.data_operation import euclidean_distance, accuracy_score
12-
from mlfromscratch.unsupervised_learning import PCA
13-
from mlfromscratch.utils import Plot
6+
from mlfromscratch.utils import train_test_split, normalize, accuracy_score
7+
from mlfromscratch.utils import euclidean_distance, Plot
148
from mlfromscratch.supervised_learning import KNN
159

1610
def main():

mlfromscratch/examples/linear_discriminant_analysis.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,12 @@
11
from __future__ import print_function
2-
import sys
3-
import os
42
from sklearn import datasets
53
import matplotlib.pyplot as plt
64
import numpy as np
7-
import pandas as pd
85

9-
# Import helper functions
106
from mlfromscratch.supervised_learning import LDA
11-
from mlfromscratch.utils.data_operation import calculate_covariance_matrix, accuracy_score
12-
from mlfromscratch.utils.data_manipulation import normalize, standardize, train_test_split
7+
from mlfromscratch.utils import calculate_covariance_matrix, accuracy_score
8+
from mlfromscratch.utils import normalize, standardize, train_test_split, Plot
139
from mlfromscratch.unsupervised_learning import PCA
14-
from mlfromscratch.utils import Plot
1510

1611
def main():
1712
# Load the dataset

mlfromscratch/examples/linear_regression.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,8 @@
22
import pandas as pd
33
import matplotlib.pyplot as plt
44

5-
# Import helper functions
6-
from mlfromscratch.utils.data_manipulation import train_test_split, polynomial_features
7-
from mlfromscratch.utils.data_operation import mean_squared_error
8-
from mlfromscratch.utils import Plot
5+
from mlfromscratch.utils import train_test_split, polynomial_features
6+
from mlfromscratch.utils import mean_squared_error, Plot
97
from mlfromscratch.supervised_learning import LinearRegression
108

119
def main():

mlfromscratch/examples/logistic_regression.py

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,11 @@
11
from __future__ import print_function
2-
import sys
3-
import os
4-
import math
52
from sklearn import datasets
63
import numpy as np
7-
import pandas as pd
84
import matplotlib.pyplot as plt
95

106
# Import helper functions
11-
from mlfromscratch.utils.data_manipulation import make_diagonal, normalize, train_test_split
12-
from mlfromscratch.utils.data_operation import accuracy_score
13-
from mlfromscratch.utils.activation_functions import Sigmoid
14-
from mlfromscratch.utils.optimizers import GradientDescent
7+
from mlfromscratch.utils import make_diagonal, normalize, train_test_split, accuracy_score
8+
from mlfromscratch.deep_learning.activation_functions import Sigmoid
159
from mlfromscratch.utils import Plot
1610
from mlfromscratch.supervised_learning import LogisticRegression
1711

@@ -30,7 +24,6 @@ def main():
3024
y_pred = clf.predict(X_test)
3125

3226
accuracy = accuracy_score(y_test, y_pred)
33-
3427
print ("Accuracy:", accuracy)
3528

3629
# Reduce dimension to two using PCA and plot the results

0 commit comments

Comments
 (0)