AI-Edify commited on
Commit
4a93d1e
Β·
1 Parent(s): 9dba9ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -24
app.py CHANGED
@@ -4,11 +4,9 @@ import requests
4
  import csv
5
  import os
6
 
7
-
8
-
9
- API = os.environ.get("api_key")
10
- openai.api_key = API
11
- print(API)
12
 
13
  prompt_templates = {"Default ChatGPT": ""}
14
 
@@ -35,14 +33,14 @@ def download_prompt_templates():
35
  choices = choices[:1] + sorted(choices[1:])
36
  return gr.update(value=choices[0], choices=choices)
37
 
38
- def on_token_change(user_token):
39
- openai.api_key = API
40
 
41
  def on_prompt_template_change(prompt_template):
42
  if not isinstance(prompt_template, str): return
43
  return prompt_templates[prompt_template]
44
 
45
- def submit_message(user_token, prompt, prompt_template, temperature, max_tokens, context_length, state):
46
 
47
  history = state['messages']
48
 
@@ -57,7 +55,7 @@ def submit_message(user_token, prompt, prompt_template, temperature, max_tokens,
57
 
58
  prompt_msg = { "role": "user", "content": prompt }
59
 
60
- if not user_token:
61
  history.append(prompt_msg)
62
  history.append({
63
  "role": "system",
@@ -84,7 +82,7 @@ def submit_message(user_token, prompt, prompt_template, temperature, max_tokens,
84
  chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
85
 
86
  return '', chat_messages, total_tokens_used_msg, state
87
- #print(user_token)
88
 
89
  def clear_conversation():
90
  return gr.update(value=None, visible=True), None, "", get_empty_state()
@@ -107,7 +105,7 @@ css = """
107
 
108
  }
109
  """
110
- #print(user_token)
111
  with gr.Blocks(css=css) as demo:
112
 
113
  state = gr.State(get_empty_state())
@@ -126,28 +124,26 @@ with gr.Blocks(css=css) as demo:
126
  total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
127
  btn_clear_conversation = gr.Button("πŸ”ƒ Start New Conversation")
128
  with gr.Column():
129
- gr.Markdown("Enter your OpenAI API Key.", elem_id="label",visible=False)
130
- user_token=gr.Textbox(value="",type='password',visible=False)
131
- user_token.update(API)
132
  prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", choices=list(prompt_templates.keys()))
133
  prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview", visible=False)
134
  with gr.Accordion("Advanced parameters", open=False, visible=False):
135
  temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, label="Temperature", info="Higher = more creative/chaotic")
136
  max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, label="Max tokens per response")
137
  context_length = gr.Slider(minimum=1, maximum=10, value=2, step=1, label="Context length", info="Number of previous messages to send to the chatbot. Be careful with high values, it can blow up the token budget quickly.")
138
-
139
  print(user_token)
140
-
141
-
142
- btn_submit.click(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
143
- input_message.submit(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
144
  btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
145
  prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
146
- user_token.change(on_token_change, inputs=[user_token], outputs=[])
147
-
148
-
149
  demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template], queur=False)
150
- openai.api_key = API
151
- print(openai.api_key)
152
  demo.queue(concurrency_count=10)
153
  demo.launch(height='1000px',width='800px')
 
4
  import csv
5
  import os
6
 
7
+ API= os.environ.get("api_key")
8
+ user_token = os.environ.get("api_key")
9
+ print(user_token)
 
 
10
 
11
  prompt_templates = {"Default ChatGPT": ""}
12
 
 
33
  choices = choices[:1] + sorted(choices[1:])
34
  return gr.update(value=choices[0], choices=choices)
35
 
36
+ #def on_token_change(user_token):
37
+ #openai.api_key = user_token
38
 
39
  def on_prompt_template_change(prompt_template):
40
  if not isinstance(prompt_template, str): return
41
  return prompt_templates[prompt_template]
42
 
43
+ def submit_message( prompt, prompt_template, temperature, max_tokens, context_length, state):
44
 
45
  history = state['messages']
46
 
 
55
 
56
  prompt_msg = { "role": "user", "content": prompt }
57
 
58
+ if not API:
59
  history.append(prompt_msg)
60
  history.append({
61
  "role": "system",
 
82
  chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
83
 
84
  return '', chat_messages, total_tokens_used_msg, state
85
+ print(user_token)
86
 
87
  def clear_conversation():
88
  return gr.update(value=None, visible=True), None, "", get_empty_state()
 
105
 
106
  }
107
  """
108
+ print(user_token)
109
  with gr.Blocks(css=css) as demo:
110
 
111
  state = gr.State(get_empty_state())
 
124
  total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
125
  btn_clear_conversation = gr.Button("πŸ”ƒ Start New Conversation")
126
  with gr.Column():
127
+ #gr.Markdown("Enter your OpenAI API Key.", elem_id="label")
128
+ #user_token=gr.Textbox(value=os.getenv("api_key"),type='password',visible=False)
 
129
  prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", choices=list(prompt_templates.keys()))
130
  prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview", visible=False)
131
  with gr.Accordion("Advanced parameters", open=False, visible=False):
132
  temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, label="Temperature", info="Higher = more creative/chaotic")
133
  max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, label="Max tokens per response")
134
  context_length = gr.Slider(minimum=1, maximum=10, value=2, step=1, label="Context length", info="Number of previous messages to send to the chatbot. Be careful with high values, it can blow up the token budget quickly.")
135
+
136
  print(user_token)
137
+
138
+ btn_submit.click(submit_message, [ input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
139
+ input_message.submit(submit_message, [ input_message, prompt_template, temperature, max_tokens, context_length, state], [input_message, chatbot, total_tokens_str, state])
 
140
  btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
141
  prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
142
+ #user_token.change(on_token_change, inputs=[user_token], outputs=[])
143
+ #user_token.submit()
144
+ openai.api_key = API
145
  demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template], queur=False)
146
+
147
+ print(user_token)
148
  demo.queue(concurrency_count=10)
149
  demo.launch(height='1000px',width='800px')