File size: 2,635 Bytes
6a32c67 1ad8ad3 0ef611f 2d62ac6 1ad8ad3 a448ea8 8b0bd94 0ef611f 2d62ac6 1ad8ad3 34d050c 1ad8ad3 0ef611f 6efbfeb 1ad8ad3 0ef611f 8b0bd94 1ad8ad3 0ef611f 8b0bd94 1ad8ad3 0ef611f 8b0bd94 1ad8ad3 0ef611f 1ad8ad3 8b0bd94 0ef611f 1ad8ad3 f616e96 0ef611f 1ad8ad3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import numpy as np
import gradio as gr
import torch
#function to predict the input hours
def predict_score(hours):
#hours = np.array(hours)
pred_score = -0.5738734424645411 + 2.1659122905141825*hours
return pred_score #np.round(pred_score[0], 2)
input = gr.inputs.x1(label='Number New Students')
input = gr.inputs.x2(label='Number New Temperature')
output = gr.outputs.Textbox(label='Predicted Score')
gr.Interface( fn=predict_score,
inputs=input,
outputs=output).launch();
# Input data
x1 = torch.tensor([50, 60, 70, 80, 90])
x2 = torch.tensor([20, 21, 22, 23, 24])
y_actual = torch.tensor([30, 35, 40, 45, 50])
# Learning rate and maximum number of iterations
alpha = 0.01
max_iters = 1000
# Initial values for Theta0, Theta1, and Theta2
Theta0 = torch.tensor(0.0, requires_grad=True)
Theta1 = torch.tensor(0.0, requires_grad=True)
Theta2 = torch.tensor(0.0, requires_grad=True)
# Start the iteration counter
iter_count = 0
# Loop until convergence or maximum number of iterations
while iter_count < max_iters:
# Compute the predicted output
y_pred = Theta0 + Theta1 * x1 + Theta2 * x2
# Compute the errors
errors = y_pred - y_actual
# Compute the cost function
cost = torch.sum(errors ** 2) / (2 * len(x1))
# Print the cost function every 100 iterations
if iter_count % 100 == 0:
print("Iteration {}: Cost = {}, Theta0 = {}, Theta1 = {}, Theta2 = {}".format(iter_count, cost, Theta0.item(), Theta1.item(),
Theta2.item()))
# Check for convergence (if the cost is decreasing by less than 0.0001)
if iter_count > 0 and torch.abs(cost - prev_cost) < 0.0001:
print("Converged after {} iterations".format(iter_count))
break
# Perform automatic differentiation to compute gradients
cost.backward()
# Update Theta0, Theta1, and Theta2 using gradient descent
with torch.no_grad():
Theta0 -= alpha * Theta0.grad
Theta1 -= alpha * Theta1.grad
Theta2 -= alpha * Theta2.grad
# Reset gradients for the next iteration
Theta0.grad.zero_()
Theta1.grad.zero_()
Theta2.grad.zero_()
# Update the iteration counter and previous cost
iter_count += 1
prev_cost = cost
# Print the final values of Theta0, Theta1, and Theta2
print("Final values: Theta0 = {}, Theta1 = {}, Theta2 = {}".format(Theta0.item(), Theta1.item(), Theta2.item()))
print("Final Cost: Cost = {}".format(cost.item()))
print("Final values: y_pred = {}, y_actual = {}".format(y_pred, y_actual))
|