Space_testing / app.py
WICKED4950's picture
Update app.py
9d3e711 verified
raw
history blame
2.14 kB
import gradio as gr
from transformers import AutoTokenizer, TFBlenderbotForConditionalGeneration
import tensorflow as tf
import json
import os
print("Loading the model......")
model_name = "WICKED4950/Irisonego5"
strategy = tf.distribute.MirroredStrategy()
tf.config.optimizer.set_jit(True) # Enable XLA
tokenizer = AutoTokenizer.from_pretrained(model_name)
with strategy.scope():
model = TFBlenderbotForConditionalGeneration.from_pretrained(model_name)
def save_question(question,answer,path = "question_answer.json"):
print(f"Saving data to: {os.path.abspath(path)}")
with open(path, "r") as file:
data = json.load(file)
data["Interactions"].append({"Question:":question,"Answer:":answer})
print(data)
with open(path, "w") as file:
json.dump(data, file, indent=4)
print("saving question")
print("Interface getting done....")
# Define the chatbot function
def predict(user_input):
# Tokenize input text
inputs = tokenizer(user_input, return_tensors="tf", padding=True, truncation=True)
# Generate the response using the model
response_id = model.generate(
inputs['input_ids'],
max_length=128, # Set max length of response
do_sample=True, # Sampling for variability
top_k=15, # Consider top 50 tokens
top_p=0.95, # Nucleus sampling
temperature=0.8 # Adjusts creativity of response
)
# Decode the response
response = tokenizer.decode(response_id[0], skip_special_tokens=True)
save_question(question = user_input,answer=response)
return response
# Gradio interface
gr.Interface(
fn=predict,
inputs=gr.Textbox(label="Ask Iris anything!"),
outputs=gr.Textbox(label="Iris's Response"),
examples=[
["What should I do if I'm feeling down?"],
["How do I deal with stress?"],
["Tell me something positive!"]
],
description="A chatbot trained to provide friendly and comforting responses. Type your question below and let Iris help!",
title="Iris - Your Friendly Mental Health Assistant",
).launch()