Spaces:
Runtime error
Runtime error
app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,4 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
import torch
|
| 3 |
-
import gradio as gr
|
| 4 |
|
| 5 |
def predict_score(x1, x2):
|
| 6 |
Theta0 = torch.tensor(-0.5738734424645411)
|
|
@@ -9,72 +7,77 @@ def predict_score(x1, x2):
|
|
| 9 |
y_actual = Theta0 + Theta1 * x1 + Theta2 * 23 # Adjust the constant value here if needed
|
| 10 |
return y_actual.item()
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
break
|
| 56 |
|
| 57 |
-
|
| 58 |
-
cost.backward()
|
| 59 |
-
|
| 60 |
-
# Update Theta0, Theta1, and Theta2 using gradient descent
|
| 61 |
-
with torch.no_grad():
|
| 62 |
-
Theta0 -= alpha * Theta0.grad
|
| 63 |
-
Theta1 -= alpha * Theta1.grad
|
| 64 |
-
Theta2 -= alpha * Theta2.grad
|
| 65 |
-
|
| 66 |
-
# Reset gradients for the next iteration
|
| 67 |
-
Theta0.grad.zero_()
|
| 68 |
-
Theta1.grad.zero_()
|
| 69 |
-
Theta2.grad.zero_()
|
| 70 |
-
|
| 71 |
-
# Update the iteration counter and previous cost
|
| 72 |
-
iter_count += 1
|
| 73 |
-
prev_cost = cost
|
| 74 |
-
|
| 75 |
-
gr.Interface(fn=predict_score, inputs=[input1, input2], outputs=output).launch()
|
| 76 |
-
|
| 77 |
-
# Print the final values of Theta0, Theta1, and Theta2
|
| 78 |
-
print("Final values: Theta0 = {}, Theta1 = {}, Theta2 = {}".format(Theta0.item(), Theta1.item(), Theta2.item()))
|
| 79 |
-
print("Final Cost: Cost = {}".format(cost.item()))
|
| 80 |
-
print("Final values: y_pred = {}, y_actual = {}".format(y_pred, y_actual))
|
|
|
|
|
|
|
| 1 |
import torch
|
|
|
|
| 2 |
|
| 3 |
def predict_score(x1, x2):
|
| 4 |
Theta0 = torch.tensor(-0.5738734424645411)
|
|
|
|
| 7 |
y_actual = Theta0 + Theta1 * x1 + Theta2 * 23 # Adjust the constant value here if needed
|
| 8 |
return y_actual.item()
|
| 9 |
|
| 10 |
+
def gradient_descent():
|
| 11 |
+
# Input data
|
| 12 |
+
x1 = torch.tensor([50, 60, 70, 80, 90])
|
| 13 |
+
x2 = torch.tensor([20, 21, 22, 23, 24])
|
| 14 |
+
y_actual = torch.tensor([30, 35, 40, 45, 50])
|
| 15 |
+
|
| 16 |
+
# Learning rate and maximum number of iterations
|
| 17 |
+
alpha = 0.01
|
| 18 |
+
max_iters = 1000
|
| 19 |
+
|
| 20 |
+
# Initial values for Theta0, Theta1, and Theta2
|
| 21 |
+
Theta0 = torch.tensor(0.0, requires_grad=True)
|
| 22 |
+
Theta1 = torch.tensor(0.0, requires_grad=True)
|
| 23 |
+
Theta2 = torch.tensor(0.0, requires_grad=True)
|
| 24 |
+
|
| 25 |
+
# Start the iteration counter
|
| 26 |
+
iter_count = 0
|
| 27 |
+
|
| 28 |
+
# Loop until convergence or maximum number of iterations
|
| 29 |
+
while iter_count < max_iters:
|
| 30 |
+
# Compute the predicted output
|
| 31 |
+
y_pred = Theta0 + Theta1 * x1 + Theta2 * x2
|
| 32 |
+
|
| 33 |
+
# Compute the errors
|
| 34 |
+
errors = y_pred - y_actual
|
| 35 |
+
|
| 36 |
+
# Compute the cost function
|
| 37 |
+
cost = torch.sum(errors ** 2) / (2 * len(x1))
|
| 38 |
+
|
| 39 |
+
# Print the cost function every 100 iterations
|
| 40 |
+
if iter_count % 100 == 0:
|
| 41 |
+
print("Iteration {}: Cost = {}, Theta0 = {}, Theta1 = {}, Theta2 = {}".format(iter_count, cost, Theta0.item(), Theta1.item(), Theta2.item()))
|
| 42 |
+
|
| 43 |
+
# Check for convergence (if the cost is decreasing by less than 0.0001)
|
| 44 |
+
if iter_count > 0 and torch.abs(cost - prev_cost) < 0.0001:
|
| 45 |
+
print("Converged after {} iterations".format(iter_count))
|
| 46 |
+
break
|
| 47 |
+
|
| 48 |
+
# Perform automatic differentiation to compute gradients
|
| 49 |
+
cost.backward()
|
| 50 |
+
|
| 51 |
+
# Update Theta0, Theta1, and Theta2 using gradient descent
|
| 52 |
+
with torch.no_grad():
|
| 53 |
+
Theta0 -= alpha * Theta0.grad
|
| 54 |
+
Theta1 -= alpha * Theta1.grad
|
| 55 |
+
Theta2 -= alpha * Theta2.grad
|
| 56 |
+
|
| 57 |
+
# Reset gradients for the next iteration
|
| 58 |
+
Theta0.grad.zero_()
|
| 59 |
+
Theta1.grad.zero_()
|
| 60 |
+
Theta2.grad.zero_()
|
| 61 |
+
|
| 62 |
+
# Update the iteration counter and previous cost
|
| 63 |
+
iter_count += 1
|
| 64 |
+
prev_cost = cost
|
| 65 |
+
|
| 66 |
+
# Print the final values of Theta0, Theta1, and Theta2
|
| 67 |
+
print("Final values: Theta0 = {}, Theta1 = {}, Theta2 = {}".format(Theta0.item(), Theta1.item(), Theta2.item()))
|
| 68 |
+
print("Final Cost: Cost = {}".format(cost.item()))
|
| 69 |
+
print("Final values: y_pred = {}, y_actual = {}".format(y_pred, y_actual))
|
| 70 |
+
|
| 71 |
+
# Launch the prediction interface
|
| 72 |
+
while True:
|
| 73 |
+
x1 = float(input("Enter the number of new students: "))
|
| 74 |
+
x2 = float(input("Enter the number of temperature: "))
|
| 75 |
+
predicted_rooms = predict_score(x1, x2)
|
| 76 |
+
print("Predicted rooms:", predicted_rooms)
|
| 77 |
+
print()
|
| 78 |
+
|
| 79 |
+
choice = input("Do you want to predict again? (y/n): ")
|
| 80 |
+
if choice.lower() != 'y':
|
| 81 |
break
|
| 82 |
|
| 83 |
+
gradient_descent()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|