Spaces:
Sleeping
Sleeping
Commit
·
4175aca
1
Parent(s):
6e6a688
we should try to do this with regression on a simple dataset from the internet somewhere because I think it is hard to see how well it is doing with completley random data
Browse files- main.py +1 -1
- neural_network/activation.py +6 -13
- neural_network/main.py +8 -3
- neural_network/model.py +1 -1
- neural_network/opts.py +5 -0
- neural_network/plot.py +17 -4
main.py
CHANGED
@@ -19,7 +19,7 @@ def main():
|
|
19 |
except KeyError:
|
20 |
raise f"Invalid method \"{method}\". Try one of these\n{list(options.keys())}"
|
21 |
|
22 |
-
X, y = random_dataset(rows=
|
23 |
func(X, y)
|
24 |
|
25 |
|
|
|
19 |
except KeyError:
|
20 |
raise f"Invalid method \"{method}\". Try one of these\n{list(options.keys())}"
|
21 |
|
22 |
+
X, y = random_dataset(rows=1000, features=10)
|
23 |
func(X, y)
|
24 |
|
25 |
|
neural_network/activation.py
CHANGED
@@ -1,17 +1,10 @@
|
|
1 |
import numpy as np
|
2 |
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
return sigmoid(x) / (1.0 - sigmoid(x))
|
10 |
-
|
11 |
-
|
12 |
-
def relu(x):
|
13 |
-
return np.maximum(x, 0)
|
14 |
-
|
15 |
-
|
16 |
-
def relu_prime(x):
|
17 |
-
return np.where(x > 0, 1, 0)
|
|
|
1 |
import numpy as np
|
2 |
|
3 |
+
relu = lambda x: np.maximum(x, 0)
|
4 |
+
relu_prime = lambda x: np.where(x > 0, 1, 0)
|
5 |
|
6 |
+
tanh = lambda x: np.tanh(x)
|
7 |
+
tanh_prime = lambda x: 1 - tanh(x) ** 2
|
8 |
|
9 |
+
sigmoid = lambda x: 1.0 / (1.0 + np.exp(-x))
|
10 |
+
sigmoid_prime = lambda x: sigmoid(x) / 1.0 - sigmoid(x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
neural_network/main.py
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
from sklearn.model_selection import train_test_split
|
|
|
2 |
import numpy as np
|
3 |
|
4 |
from neural_network.opts import activation
|
5 |
from neural_network.backprop import bp
|
6 |
-
from neural_network.model import
|
7 |
from neural_network.plot import loss_history_plt
|
8 |
|
9 |
|
@@ -52,7 +53,9 @@ def main(
|
|
52 |
results, loss_history = bp(X_train, y_train, wb, args)
|
53 |
final = results[args["epochs"] - 1]
|
54 |
func = activation[args["activation_func"]]["main"]
|
55 |
-
|
|
|
|
|
56 |
|
57 |
# predict the x test data and compare it to y test data
|
58 |
pred = fm.predict(X_test)
|
@@ -61,4 +64,6 @@ def main(
|
|
61 |
|
62 |
# plot predicted versus actual
|
63 |
# also plot the training loss over epochs
|
64 |
-
loss_history_plt(loss_history)
|
|
|
|
|
|
1 |
from sklearn.model_selection import train_test_split
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
import numpy as np
|
4 |
|
5 |
from neural_network.opts import activation
|
6 |
from neural_network.backprop import bp
|
7 |
+
from neural_network.model import Network
|
8 |
from neural_network.plot import loss_history_plt
|
9 |
|
10 |
|
|
|
53 |
results, loss_history = bp(X_train, y_train, wb, args)
|
54 |
final = results[args["epochs"] - 1]
|
55 |
func = activation[args["activation_func"]]["main"]
|
56 |
+
|
57 |
+
# initialize our final network
|
58 |
+
fm = Network(final_wb=final, activation_func=func)
|
59 |
|
60 |
# predict the x test data and compare it to y test data
|
61 |
pred = fm.predict(X_test)
|
|
|
64 |
|
65 |
# plot predicted versus actual
|
66 |
# also plot the training loss over epochs
|
67 |
+
animated_loss_plt = loss_history_plt(loss_history)
|
68 |
+
# eventually we will save this plot
|
69 |
+
plt.show()
|
neural_network/model.py
CHANGED
@@ -2,7 +2,7 @@ import numpy as np
|
|
2 |
from typing import Callable
|
3 |
|
4 |
|
5 |
-
class
|
6 |
def __init__(self, final_wb: dict[str, np.array], activation_func: Callable):
|
7 |
self.func = activation_func
|
8 |
self.final_wb = final_wb
|
|
|
2 |
from typing import Callable
|
3 |
|
4 |
|
5 |
+
class Network:
|
6 |
def __init__(self, final_wb: dict[str, np.array], activation_func: Callable):
|
7 |
self.func = activation_func
|
8 |
self.final_wb = final_wb
|
neural_network/opts.py
CHANGED
@@ -10,4 +10,9 @@ activation = {
|
|
10 |
"main": sigmoid,
|
11 |
"prime": sigmoid_prime,
|
12 |
},
|
|
|
|
|
|
|
|
|
|
|
13 |
}
|
|
|
10 |
"main": sigmoid,
|
11 |
"prime": sigmoid_prime,
|
12 |
},
|
13 |
+
|
14 |
+
"tanh": {
|
15 |
+
"main": tanh,
|
16 |
+
"prime": tanh_prime,
|
17 |
+
},
|
18 |
}
|
neural_network/plot.py
CHANGED
@@ -1,11 +1,17 @@
|
|
1 |
import seaborn as sns
|
2 |
import matplotlib.pyplot as plt
|
3 |
-
from matplotlib.animation import FuncAnimation
|
4 |
|
5 |
sns.set()
|
6 |
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
|
|
|
9 |
fig, ax = plt.subplots()
|
10 |
|
11 |
def animate(i):
|
@@ -18,5 +24,12 @@ def loss_history_plt(loss_history: list) -> None:
|
|
18 |
ax.set_xlabel("Epoch")
|
19 |
ax.set_ylabel("Training Loss")
|
20 |
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import seaborn as sns
|
2 |
import matplotlib.pyplot as plt
|
3 |
+
from matplotlib.animation import FuncAnimation, FFMpegWriter
|
4 |
|
5 |
sns.set()
|
6 |
|
7 |
+
"""
|
8 |
+
Save plots to the plots folder for when
|
9 |
+
we would like to show results on our little
|
10 |
+
flask application
|
11 |
+
"""
|
12 |
|
13 |
+
|
14 |
+
def loss_history_plt(loss_history: list) -> FuncAnimation:
|
15 |
fig, ax = plt.subplots()
|
16 |
|
17 |
def animate(i):
|
|
|
24 |
ax.set_xlabel("Epoch")
|
25 |
ax.set_ylabel("Training Loss")
|
26 |
|
27 |
+
return FuncAnimation(fig, animate, frames=len(loss_history), interval=100)
|
28 |
+
|
29 |
+
|
30 |
+
def save_plt(plot, filename: str, animated: bool, fps=10):
|
31 |
+
if not animated:
|
32 |
+
plot.savefig(filename)
|
33 |
+
return
|
34 |
+
writer = FFMpegWriter(fps=fps)
|
35 |
+
plot.save(filename, writer=writer)
|