AI-DHD commited on
Commit
9a3ed94
·
1 Parent(s): 4d4c970

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -14
app.py CHANGED
@@ -1,15 +1,10 @@
1
  import os
2
  import openai
3
  import gradio as gr
4
- from transformers import AutoModelForCausalLM, AutoTokenizer
5
- import torch
6
 
7
  openai.api_key = os.getenv("OPENAI_API_KEY")
8
 
9
- def predict(input, history=[]):
10
-
11
- new_user_input_ids = input
12
- response = openai.Completion.create(
13
  model="davinci:ft-placeholder-2022-12-10-04-13-26",
14
  prompt=input
15
  temperature=0.13,
@@ -17,16 +12,10 @@ def predict(input, history=[]):
17
  top_p=1,
18
  frequency_penalty=0.36,
19
  presence_penalty=1.25
20
- )
21
 
22
- # generate a response
23
- history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
24
 
25
- # convert the tokens to text, and then split the responses into lines
26
- response = tokenizer.decode(history[0]).split("<|endoftext|>")
27
- response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
28
- return response, history
29
 
30
  gr.Interface(fn=predict,
31
  inputs=["text", "state"],
32
- outputs=["chatbot", "state"]).launch()
 
1
  import os
2
  import openai
3
  import gradio as gr
 
 
4
 
5
  openai.api_key = os.getenv("OPENAI_API_KEY")
6
 
7
+ response = openai.Completion.create(
 
 
 
8
  model="davinci:ft-placeholder-2022-12-10-04-13-26",
9
  prompt=input
10
  temperature=0.13,
 
12
  top_p=1,
13
  frequency_penalty=0.36,
14
  presence_penalty=1.25
15
+ )
16
 
 
 
17
 
 
 
 
 
18
 
19
  gr.Interface(fn=predict,
20
  inputs=["text", "state"],
21
+ outputs=["response", "state"]).launch()