SivaResearch commited on
Commit
69fcc4b
·
verified ·
1 Parent(s): db3ef92

update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -5,30 +5,30 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
5
  tokenizer = AutoTokenizer.from_pretrained("ai4bharat/Airavata")
6
  model = AutoModelForCausalLM.from_pretrained("ai4bharat/Airavata")
7
 
8
- def chat_interface(prompt):
9
- # Tokenize input prompt and generate response
10
- inputs = tokenizer(prompt, return_tensors="pt", max_length=256, truncation=True)
 
 
 
11
  outputs = model.generate(**inputs)
12
  response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
13
 
14
- return response
15
 
16
  # Define Gradio Chat Interface
17
  iface = gr.ChatInterface(
18
  chat_model=chat_interface,
19
  title="GPT-2 Chat Interface",
20
- inputs=["text"],
21
- outputs=["text"],
22
- examples = [
23
- ["मैं अपने समय प्रबंधन कौशल को कैसे सुधार सकता हूँ? मुझे पांच बिंदु बताएं।"],
24
- ["मैं अपने समय प्रबंधन कौशल को कैसे सुधार सकता हूँ? मुझे पांच बिंदु बताएं और उनका वर्णन करें।"],
25
- ],
26
  )
27
 
28
  # Launch Gradio Chat Interface
29
  iface.launch()
30
 
31
 
 
32
  # import torch
33
  # from transformers import AutoTokenizer, AutoModelForCausalLM
34
  # import gradio as gr
 
5
  tokenizer = AutoTokenizer.from_pretrained("ai4bharat/Airavata")
6
  model = AutoModelForCausalLM.from_pretrained("ai4bharat/Airavata")
7
 
8
+ def chat_interface(user_input, assistant_input):
9
+ # Concatenate the user and assistant inputs to simulate a chat conversation
10
+ chat_history = f"{assistant_input} User: {user_input}"
11
+
12
+ # Tokenize the chat history and generate the response
13
+ inputs = tokenizer(chat_history, return_tensors="pt", max_length=256, truncation=True)
14
  outputs = model.generate(**inputs)
15
  response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
16
 
17
+ return response, chat_history
18
 
19
  # Define Gradio Chat Interface
20
  iface = gr.ChatInterface(
21
  chat_model=chat_interface,
22
  title="GPT-2 Chat Interface",
23
+ inputs=["text", "text"],
24
+ outputs=["text", "text"],
 
 
 
 
25
  )
26
 
27
  # Launch Gradio Chat Interface
28
  iface.launch()
29
 
30
 
31
+
32
  # import torch
33
  # from transformers import AutoTokenizer, AutoModelForCausalLM
34
  # import gradio as gr