durrani commited on
Commit
6514fb4
·
1 Parent(s): 193f105
Files changed (1) hide show
  1. app.py +60 -14
app.py CHANGED
@@ -1,14 +1,60 @@
1
- import numpy as np
2
- import gradio as gr
3
- import torch
4
- #function to predict the input hours
5
- def predict_score(hours):
6
- #hours = np.array(hours)
7
- pred_score = -0.5738734424645411 + 2.1659122905141825*hours
8
- return pred_score #np.round(pred_score[0], 2)
9
- input = gr.inputs.x1(label='Number New Students')
10
- input = gr.inputs.x2(label='Number New Temperature')
11
- output = gr.outputs.Textbox(label='Predicted Score')
12
- gr.Interface( fn=predict_score,
13
- inputs=input,
14
- outputs=output).launch();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Input data
2
+ x1 = torch.tensor([50, 60, 70, 80, 90])
3
+ x2 = torch.tensor([20, 21, 22, 23, 24])
4
+ y_actual = torch.tensor([30, 35, 40, 45, 50])
5
+
6
+ # Learning rate and maximum number of iterations
7
+ alpha = 0.01
8
+ max_iters = 1000
9
+
10
+ # Initial values for Theta0, Theta1, and Theta2
11
+ Theta0 = torch.tensor(0.0, requires_grad=True)
12
+ Theta1 = torch.tensor(0.0, requires_grad=True)
13
+ Theta2 = torch.tensor(0.0, requires_grad=True)
14
+
15
+ # Start the iteration counter
16
+ iter_count = 0
17
+
18
+ # Loop until convergence or maximum number of iterations
19
+ while iter_count < max_iters:
20
+ # Compute the predicted output
21
+ y_pred = Theta0 + Theta1 * x1 + Theta2 * x2
22
+
23
+ # Compute the errors
24
+ errors = y_pred - y_actual
25
+
26
+ # Compute the cost function
27
+ cost = torch.sum(errors ** 2) / (2 * len(x1))
28
+
29
+ # Print the cost function every 100 iterations
30
+ if iter_count % 100 == 0:
31
+ print("Iteration {}: Cost = {}, Theta0 = {}, Theta1 = {}, Theta2 = {}".format(iter_count, cost, Theta0.item(), Theta1.item(),
32
+ Theta2.item()))
33
+
34
+ # Check for convergence (if the cost is decreasing by less than 0.0001)
35
+ if iter_count > 0 and torch.abs(cost - prev_cost) < 0.0001:
36
+ print("Converged after {} iterations".format(iter_count))
37
+ break
38
+
39
+ # Perform automatic differentiation to compute gradients
40
+ cost.backward()
41
+
42
+ # Update Theta0, Theta1, and Theta2 using gradient descent
43
+ with torch.no_grad():
44
+ Theta0 -= alpha * Theta0.grad
45
+ Theta1 -= alpha * Theta1.grad
46
+ Theta2 -= alpha * Theta2.grad
47
+
48
+ # Reset gradients for the next iteration
49
+ Theta0.grad.zero_()
50
+ Theta1.grad.zero_()
51
+ Theta2.grad.zero_()
52
+
53
+ # Update the iteration counter and previous cost
54
+ iter_count += 1
55
+ prev_cost = cost
56
+
57
+ # Print the final values of Theta0, Theta1, and Theta2
58
+ print("Final values: Theta0 = {}, Theta1 = {}, Theta2 = {}".format(Theta0.item(), Theta1.item(), Theta2.item()))
59
+ print("Final Cost: Cost = {}".format(cost.item()))
60
+ print("Final values: y_pred = {}, y_actual = {}".format(y_pred, y_actual))