Jensen-holm commited on
Commit
bec1ee5
·
1 Parent(s): 204251b

plot functionality'

Browse files
neural_network/backprop.py CHANGED
@@ -13,14 +13,14 @@ def bp(X_train: np.array, y_train: np.array, wb: dict, args: dict) -> (dict, np.
13
  lr = args["learning_rate"]
14
 
15
  r = {}
16
- loss_history = np.array([])
17
  for e in tqdm(range(epochs)):
18
  # forward prop
19
  node1 = compute_node(arr=X_train, w=w1, b=b1, func=func)
20
  y_hat = compute_node(arr=node1, w=w2, b=b2, func=func)
21
  error = y_hat - y_train
22
  mean_squared_error = mse(y_train, y_hat)
23
- loss_history = np.append(loss_history, mean_squared_error)
24
 
25
  # backprop
26
  dw1 = np.dot(
 
13
  lr = args["learning_rate"]
14
 
15
  r = {}
16
+ loss_history = []
17
  for e in tqdm(range(epochs)):
18
  # forward prop
19
  node1 = compute_node(arr=X_train, w=w1, b=b1, func=func)
20
  y_hat = compute_node(arr=node1, w=w2, b=b2, func=func)
21
  error = y_hat - y_train
22
  mean_squared_error = mse(y_train, y_hat)
23
+ loss_history.append(mean_squared_error)
24
 
25
  # backprop
26
  dw1 = np.dot(
neural_network/main.py CHANGED
@@ -4,6 +4,7 @@ import numpy as np
4
  from neural_network.opts import activation
5
  from neural_network.backprop import bp
6
  from neural_network.model import Model
 
7
 
8
 
9
  def get_args() -> dict:
@@ -60,3 +61,4 @@ def main(
60
 
61
  # plot predicted versus actual
62
  # also plot the training loss over epochs
 
 
4
  from neural_network.opts import activation
5
  from neural_network.backprop import bp
6
  from neural_network.model import Model
7
+ from neural_network.plot import loss_history_plt
8
 
9
 
10
  def get_args() -> dict:
 
61
 
62
  # plot predicted versus actual
63
  # also plot the training loss over epochs
64
+ loss_history_plt(loss_history)
neural_network/plot.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import numpy as np
3
+ import seaborn as sns
4
+
5
+ sns.set()
6
+
7
+
8
+ def loss_history_plt(loss_history: np.array) -> None:
9
+ sns.lineplot(
10
+ x=np.arange(len(loss_history)),
11
+ y=loss_history,
12
+ )
13
+ plt.xlabel("Epoch")
14
+ plt.ylabel("Training Loss")
15
+ plt.show()
requirements.txt CHANGED
@@ -1,3 +1,5 @@
 
1
  numpy==1.24.2
2
  scikit_learn==1.2.2
 
3
  tqdm==4.65.0
 
1
+ matplotlib==3.7.1
2
  numpy==1.24.2
3
  scikit_learn==1.2.2
4
+ seaborn==0.12.2
5
  tqdm==4.65.0