juewang commited on
Commit
d7570a5
·
1 Parent(s): a531b86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -10
app.py CHANGED
@@ -2,7 +2,13 @@ import streamlit as st
2
  import requests
3
  import time
4
 
5
- def infer(prompt, model_name, max_new_tokens=10, temperature=0.0, top_p=1.0):
 
 
 
 
 
 
6
 
7
  model_name_map = {
8
  "GPT-JT-6B-v1": "Together-gpt-JT-6B-v1",
@@ -12,7 +18,7 @@ def infer(prompt, model_name, max_new_tokens=10, temperature=0.0, top_p=1.0):
12
  "type": "general",
13
  "payload": {
14
  "max_tokens": int(max_new_tokens),
15
- "n": 1,
16
  "temperature": float(temperature),
17
  "top_p": float(top_p),
18
  "model": model_name_map[model_name],
@@ -21,7 +27,7 @@ def infer(prompt, model_name, max_new_tokens=10, temperature=0.0, top_p=1.0):
21
  "stop": None,
22
  "best_of": 1,
23
  "echo": False,
24
- "seed": 42,
25
  "prompt_embedding": False,
26
  },
27
  "returned_payload": {},
@@ -47,6 +53,14 @@ st.title("TOMA Application")
47
 
48
  col1, col2 = st.columns([1, 3])
49
 
 
 
 
 
 
 
 
 
50
  with col2:
51
  s_example = "Please answer the following question:\n\nQuestion: Where is Zurich?\nAnswer:"
52
  prompt = st.text_area(
@@ -63,11 +77,8 @@ with col2:
63
 
64
  if button_submit:
65
  with st.spinner(text="In progress.."):
66
- report_text = infer(prompt, model_name=model_name, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p)
 
 
 
67
  generated_area.markdown(report_text)
68
-
69
- with col1:
70
- model_name = st.selectbox("Model", ["GPT-JT-6B-v1"])
71
- max_new_tokens = st.text_input('Max new tokens', "10")
72
- temperature = st.text_input('temperature', "0.0")
73
- top_p = st.text_input('top_p', "1.0")
 
2
  import requests
3
  import time
4
 
5
+ def infer(prompt,
6
+ model_name,
7
+ max_new_tokens=10,
8
+ temperature=0.0,
9
+ top_p=1.0,
10
+ num_completions=1,
11
+ seed=42,):
12
 
13
  model_name_map = {
14
  "GPT-JT-6B-v1": "Together-gpt-JT-6B-v1",
 
18
  "type": "general",
19
  "payload": {
20
  "max_tokens": int(max_new_tokens),
21
+ "n": int(num_completions),
22
  "temperature": float(temperature),
23
  "top_p": float(top_p),
24
  "model": model_name_map[model_name],
 
27
  "stop": None,
28
  "best_of": 1,
29
  "echo": False,
30
+ "seed": int(seed),
31
  "prompt_embedding": False,
32
  },
33
  "returned_payload": {},
 
53
 
54
  col1, col2 = st.columns([1, 3])
55
 
56
+ with col1:
57
+ model_name = st.selectbox("Model", ["GPT-JT-6B-v1"])
58
+ max_new_tokens = st.text_input('Max new tokens', "10")
59
+ temperature = st.text_input('temperature', "1.0")
60
+ top_p = st.text_input('top_p', "1.0")
61
+ num_completions = st.text_input('num_completions', "1")
62
+ seed = st.text_input('seed', "42")
63
+
64
  with col2:
65
  s_example = "Please answer the following question:\n\nQuestion: Where is Zurich?\nAnswer:"
66
  prompt = st.text_area(
 
77
 
78
  if button_submit:
79
  with st.spinner(text="In progress.."):
80
+ report_text = infer(
81
+ prompt, model_name=model_name, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p,
82
+ num_completions=num_completions, seed=seed,
83
+ )
84
  generated_area.markdown(report_text)