Spaces:
Running
Running
File size: 2,579 Bytes
7073eba 7e3c3ff 00d7a2e cbc3cf5 9d3e711 0370488 cbc3cf5 903fe66 7b65907 07f8c16 007bdd0 4b69c94 dcb550c 4b69c94 7e3c3ff 2798e6f c00d42c a3bf946 0370488 a3bf946 cbc3cf5 07f8c16 2798e6f 4b69c94 7b65907 4d2ca25 df8aa24 7b65907 df8aa24 7b65907 2798e6f 827c0a2 27c3fd1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import gradio as gr
from transformers import AutoTokenizer, TFBlenderbotForConditionalGeneration
import tensorflow as tf
import json
import os
from datetime import datetime
data = {"Interactions":[]}
with open("question_answer.json", "w") as file:
json.dump(data, file, indent=4)
print("Loading the model......")
model_name = "WICKED4950/Irisonego5"
strategy = tf.distribute.MirroredStrategy()
tf.config.optimizer.set_jit(True) # Enable XLA
tokenizer = AutoTokenizer.from_pretrained(model_name)
with strategy.scope():
model = TFBlenderbotForConditionalGeneration.from_pretrained(model_name)
def save_question(question,answer,path = "question_answer.json"):
with open(path, "r") as file:
data = json.load(file)
data["Interactions"].append({"Question:":question,"Answer:":answer,"Time:":datetime.now().strftime("%Y-%m-%d %H:%M:%S")})
with open(path, "w") as file:
json.dump(data, file, indent=4)
print("Interface getting done....")
# Define the chatbot function
def predict(user_input):
if user_input == "Print_data_hmm":
with open("question_answer.json", "r") as file:
print(json.load(file))
print()
return "Done"
else:
inputs = tokenizer(user_input, return_tensors="tf", padding=True, truncation=True)
# Generate the response using the model
response_id = model.generate(
inputs['input_ids'],
max_length=128, # Set max length of response
do_sample=True, # Sampling for variability
top_k=15, # Consider top 50 tokens
top_p=0.95, # Nucleus sampling
temperature=0.8 # Adjusts creativity of response
)
# Decode the response
response = tokenizer.decode(response_id[0], skip_special_tokens=True)
save_question(question = user_input,answer=response)
print("Q:",user_input)
print("A:",response)
print("T:",datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
return response
# Gradio interface
gr.Interface(
fn=predict,
inputs=gr.Textbox(label="Ask Iris anything!"),
outputs=gr.Textbox(label="Iris's Response"),
examples=[
["What should I do if I'm feeling down?"],
["How do I deal with stress?"],
["Tell me something positive!"]
],
description="A chatbot trained to provide friendly and comforting responses. Type your question below and let Iris help!",
title="Iris - Your Friendly Mental Health Assistant",
).launch()
|