AI-DHD commited on
Commit
4d4c970
·
1 Parent(s): c45ecef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -36
app.py CHANGED
@@ -6,45 +6,26 @@ import torch
6
 
7
  openai.api_key = os.getenv("OPENAI_API_KEY")
8
 
9
- def predict(input, history=[]):
10
-
11
- new_user_input_ids = input
12
- response = openai.Completion.create(
13
- model="davinci:ft-placeholder-2022-12-10-04-13-26",
14
- prompt=input
15
- temperature=0.13,
16
- max_tokens=310,
17
- top_p=1,
18
- frequency_penalty=0.36,
19
- presence_penalty=1.25
20
- )
21
-
22
- # generate a response
23
- history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
24
 
25
- # convert the tokens to text, and then split the responses into lines
26
- response = tokenizer.decode(history[0]).split("<|endoftext|>")
27
- response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
28
- return response, history
29
-
30
-
31
-
32
-
33
- new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
34
-
35
- # append the new user input tokens to the chat history
36
- bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
37
-
38
- # generate a response
39
  response = openai.Completion.create(
40
- model="text-davinci-003",
41
- #model="davinci:ft-placeholder:ai-dhd-2022-12-07-10-09-37",
42
- prompt= input,
43
- temperature=0.09,
44
- max_tokens=608,
45
  top_p=1,
46
- frequency_penalty=0,
47
- presence_penalty=0).tolist()
 
 
 
 
 
 
 
 
 
48
 
49
  gr.Interface(fn=predict,
50
  inputs=["text", "state"],
 
6
 
7
  openai.api_key = os.getenv("OPENAI_API_KEY")
8
 
9
+ def predict(input, history=[]):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ new_user_input_ids = input
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  response = openai.Completion.create(
13
+ model="davinci:ft-placeholder-2022-12-10-04-13-26",
14
+ prompt=input
15
+ temperature=0.13,
16
+ max_tokens=310,
 
17
  top_p=1,
18
+ frequency_penalty=0.36,
19
+ presence_penalty=1.25
20
+ )
21
+
22
+ # generate a response
23
+ history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
24
+
25
+ # convert the tokens to text, and then split the responses into lines
26
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
27
+ response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
28
+ return response, history
29
 
30
  gr.Interface(fn=predict,
31
  inputs=["text", "state"],