Spaces:
Sleeping
Sleeping
File size: 2,029 Bytes
031ac83 4175aca 031ac83 f9522cf 4e6140d 4175aca bec1ee5 4e6140d f9522cf 92f14e0 f9522cf 4c97910 4e6140d 4c97910 c777165 4c97910 92f14e0 4e6140d 031ac83 c777165 031ac83 4c97910 4e6140d 204251b 4e6140d 4175aca 4e6140d 204251b 4175aca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
from neural_network.opts import activation
from neural_network.backprop import bp
from neural_network.model import Network
from neural_network.plot import loss_history_plt
def get_args() -> dict:
"""
returns a dictionary containing
the arguments to be passed to
the main function
"""
return {
"epochs": int(input("Enter the number of epochs: ")),
"hidden_size": int(input("Enter the number of hidden nodes: ")),
"learning_rate": float(input("Enter the learning rate: ")),
"activation_func": input("Enter the activation function: "),
}
def init(X: np.array, hidden_size: int) -> dict:
"""
returns a dictionary containing randomly initialized
weights and biases to start off the neural_network
"""
return {
"W1": np.random.randn(X.shape[1], hidden_size),
"b1": np.zeros((1, hidden_size)),
"W2": np.random.randn(hidden_size, 1),
"b2": np.zeros((1, 1)),
}
def main(
X: np.array,
y: np.array,
) -> None:
args = get_args()
wb = init(X, args["hidden_size"])
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.3,
random_state=8675309
)
# once we have these results we should test it against
# the y_test data
results, loss_history = bp(X_train, y_train, wb, args)
final = results[args["epochs"] - 1]
func = activation[args["activation_func"]]["main"]
# initialize our final network
fm = Network(final_wb=final, activation_func=func)
# predict the x test data and compare it to y test data
pred = fm.predict(X_test)
mse = np.mean((pred - y_test) ** 2)
print(f"mean squared error: {mse}")
# plot predicted versus actual
# also plot the training loss over epochs
animated_loss_plt = loss_history_plt(loss_history)
# eventually we will save this plot
plt.show()
|