tstone87 commited on
Commit
ff195b8
·
verified ·
1 Parent(s): 8dae174

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -17
app.py CHANGED
@@ -1,23 +1,24 @@
1
  from transformers import pipeline
 
2
 
3
  # Load the pipeline for text generation
4
- generator = pipeline("text-generation", model="microsoft/DialoGPT-small")
 
 
 
 
5
 
6
  # Function to generate a response
7
  def dialoGPT_response(user_input, history):
8
- # Since the pipeline handles everything, we just need to format our input
9
- conversation = [{"role": "user", "content": user_input}] if history is None else history + [{"role": "user", "content": user_input}]
10
-
11
- # Generate response using the pipeline, which manages all pre/post-processing
12
- response = generator(conversation, return_full_text=False, max_length=1000)
13
-
14
- # Extract the last assistant response
15
- assistant_response = response[0]['generated_text']
16
-
17
- # Append this response to history
18
- new_history = conversation + [{"role": "assistant", "content": assistant_response}]
19
-
20
- return assistant_response, new_history
21
 
22
  # Gradio interface
23
  iface = gr.Interface(
@@ -27,12 +28,13 @@ iface = gr.Interface(
27
  "state"
28
  ],
29
  outputs=[
30
- "text", # The response
31
- "state" # Updated history
32
  ],
33
  title="DialoGPT Chat",
34
  description="Chat with DialoGPT-small model. Your conversation history is maintained.",
35
  allow_flagging="never"
36
  )
37
 
38
- iface.launch()
 
 
1
  from transformers import pipeline
2
+ import gradio as gr
3
 
4
  # Load the pipeline for text generation
5
+ try:
6
+ generator = pipeline("text-generation", model="microsoft/DialoGPT-small")
7
+ except Exception as e:
8
+ print(f"Error loading the model: {e}")
9
+ raise
10
 
11
  # Function to generate a response
12
  def dialoGPT_response(user_input, history):
13
+ try:
14
+ conversation = [{"role": "user", "content": user_input}] if history is None else history + [{"role": "user", "content": user_input}]
15
+ response = generator(conversation, return_full_text=False, max_length=1000)
16
+ assistant_response = response[0]['generated_text']
17
+ new_history = conversation + [{"role": "assistant", "content": assistant_response}]
18
+ return assistant_response, new_history
19
+ except Exception as e:
20
+ print(f"Error generating response: {e}")
21
+ return "An error occurred while generating a response.", history
 
 
 
 
22
 
23
  # Gradio interface
24
  iface = gr.Interface(
 
28
  "state"
29
  ],
30
  outputs=[
31
+ "text",
32
+ "state"
33
  ],
34
  title="DialoGPT Chat",
35
  description="Chat with DialoGPT-small model. Your conversation history is maintained.",
36
  allow_flagging="never"
37
  )
38
 
39
+ if __name__ == "__main__":
40
+ iface.launch(debug=True)