Jensen-holm commited on
Commit
6377159
·
1 Parent(s): 137390c

trying to move forwards with the iris dataset

Browse files
Files changed (3) hide show
  1. app.py +16 -10
  2. dataset/iris.py +29 -0
  3. neural_network/main.py +5 -10
app.py CHANGED
@@ -1,6 +1,7 @@
1
  from flask import Flask, request, jsonify, make_response, render_template
2
 
3
  from dataset.random import random_dataset
 
4
  from opts import options
5
 
6
  app = Flask(
@@ -29,23 +30,28 @@ def index():
29
  if request.method == "GET":
30
  return render_template("index.html")
31
 
32
- params = request.json
33
- error_message = not_valid(params=params)
34
  if error_message:
35
  return make_response(error_message, 400)
36
 
37
  # parse arguments
38
- algorithm = options[params["algorithm"]]
39
- args = params["arguments"]
40
 
41
  # in the future instead of a random data set
42
  # we should do a more real one like palmer penguins
43
- X, y = random_dataset(100, 10)
44
- model = algorithm(X, y, args)
45
- return jsonify(model)
 
 
 
 
 
 
 
 
46
 
47
 
48
  if __name__ == "__main__":
49
- app.run(
50
- debug=False,
51
- )
 
1
  from flask import Flask, request, jsonify, make_response, render_template
2
 
3
  from dataset.random import random_dataset
4
+ from dataset.iris import iris
5
  from opts import options
6
 
7
  app = Flask(
 
30
  if request.method == "GET":
31
  return render_template("index.html")
32
 
33
+ error_message = not_valid(params=request.json)
 
34
  if error_message:
35
  return make_response(error_message, 400)
36
 
37
  # parse arguments
38
+ algorithm = options[request.json["algorithm"]]
39
+ args = request.json["arguments"]
40
 
41
  # in the future instead of a random data set
42
  # we should do a more real one like palmer penguins
43
+
44
+ X_train, X_test, y_train, y_test = iris()
45
+ return jsonify(
46
+ algorithm(
47
+ X_train=X_train,
48
+ y_train=y_train,
49
+ X_test=X_test,
50
+ y_test=y_test,
51
+ args=args,
52
+ )
53
+ )
54
 
55
 
56
  if __name__ == "__main__":
57
+ app.run(debug=False)
 
 
dataset/iris.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn.datasets import load_iris
2
+ from sklearn.preprocessing import OneHotEncoder, StandardScaler
3
+ from sklearn.model_selection import train_test_split
4
+ import numpy as np
5
+
6
+
7
+ def iris() -> tuple[np.array]:
8
+ """
9
+ returns a tuple of numpy arrays containing the
10
+ iris dataset split into training and testing sets
11
+ after being normalized and one-hot encoded
12
+ """
13
+ iris = load_iris()
14
+ X_train, X_test, y_train, y_test = train_test_split(
15
+ iris.data,
16
+ iris.target,
17
+ test_size=0.3,
18
+ random_state=8675309,
19
+ )
20
+ scaler = StandardScaler()
21
+ X_train, X_test = scaler.fit_transform(
22
+ X_train
23
+ ), scaler.fit_transform(
24
+ X_test
25
+ )
26
+
27
+ y_train = OneHotEncoder().fit_transform(y_train.reshape(-1, 1)).toarray()
28
+ y_test = OneHotEncoder().fit_transform(y_test.reshape(-1, 1)).toarray()
29
+ return X_train, X_test, y_train, y_test
neural_network/main.py CHANGED
@@ -22,21 +22,16 @@ def init(
22
 
23
 
24
  def main(
25
- X: np.array,
26
- y: np.array,
 
 
27
  args,
28
  ) -> None:
29
- wb = init(X, args["hidden_size"])
30
  act = activation[args["activation_func"]]
31
  args["activation_func"] = act["main"]
32
  args["func_prime"] = act["prime"]
33
-
34
- X_train, X_test, y_train, y_test = train_test_split(
35
- X,
36
- y,
37
- test_size=0.3,
38
- random_state=8675309
39
- )
40
  model = bp(X_train, y_train, wb, args)
41
 
42
  # evaluate the model and return final results
 
22
 
23
 
24
  def main(
25
+ X_train: np.array,
26
+ y_train: np.array,
27
+ X_test: np.array,
28
+ y_test: np.array,
29
  args,
30
  ) -> None:
31
+ wb = init(X_train, args["hidden_size"])
32
  act = activation[args["activation_func"]]
33
  args["activation_func"] = act["main"]
34
  args["func_prime"] = act["prime"]
 
 
 
 
 
 
 
35
  model = bp(X_train, y_train, wb, args)
36
 
37
  # evaluate the model and return final results