davidkim205 commited on
Commit
a65f008
·
verified ·
1 Parent(s): 1a2430b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -28
app.py CHANGED
@@ -1,53 +1,84 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- import re
4
- import time
5
 
6
  # Function to create an Inference Client based on selected model
7
  def create_inference_client(model_name):
8
  return InferenceClient(model_name)
9
 
 
10
  # Function to generate a response
11
  def respond(
12
- message,
13
- system_message,
14
- model,
15
- max_tokens,
16
- temperature,
17
- top_p,
18
  ):
19
  # Create InferenceClient based on model selection
20
  client = create_inference_client(model)
21
 
22
  messages = [{"role": "system", "content": system_message}]
23
  messages.append({"role": "user", "content": message})
24
-
25
  response = ""
26
  for message in client.chat_completion(
27
- messages,
28
- max_tokens=max_tokens,
29
- stream=True,
30
- temperature=temperature,
31
- top_p=top_p,
32
  ):
33
  token = message.choices[0].delta.content
34
  response += token
35
  print(response)
36
  yield response
37
 
38
- # Gradio interface setup
39
- demo = gr.Interface(
40
- fn=respond,
41
- inputs=[
42
- gr.Textbox(label="User Message"),
43
- gr.Textbox(value="한국어 문맥상 부자연스러운 부분을 찾으시오. 오류 문장과 개수는 <incorrect grammar> </incorrect grammar> tag, <incorrect grammar> - 오류 문장과 설명 </incorrect grammar> 안에 담겨 있으며, <wrong count> </wrong count> tag, 즉 <wrong count> 오류 개수 </wrong count> 이다.", label="System message"),
44
- gr.Dropdown(choices=["davidkim205/kgrammar-2-1b", "davidkim205/kgrammar-2-3b"], value="davidkim205/kgrammar-2-1b", label="Model Selection"),
45
- gr.Slider(minimum=1, maximum=2048, value=1024, step=1, label="Max new tokens"),
46
- gr.Slider(minimum=0.1, maximum=4.0, value=1.0, step=0.1, label="Temperature"),
47
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
48
- ],
49
- outputs="textbox"
50
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  if __name__ == "__main__":
53
- demo.launch()
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+
 
4
 
5
  # Function to create an Inference Client based on selected model
6
  def create_inference_client(model_name):
7
  return InferenceClient(model_name)
8
 
9
+
10
  # Function to generate a response
11
  def respond(
12
+ message,
13
+ system_message,
14
+ model,
15
+ max_tokens,
16
+ temperature,
17
+ top_p,
18
  ):
19
  # Create InferenceClient based on model selection
20
  client = create_inference_client(model)
21
 
22
  messages = [{"role": "system", "content": system_message}]
23
  messages.append({"role": "user", "content": message})
24
+
25
  response = ""
26
  for message in client.chat_completion(
27
+ messages,
28
+ max_tokens=max_tokens,
29
+ stream=True,
30
+ temperature=temperature,
31
+ top_p=top_p,
32
  ):
33
  token = message.choices[0].delta.content
34
  response += token
35
  print(response)
36
  yield response
37
 
38
+
39
+ def main():
40
+ description_text = """
41
+ </br><span style="font-size: 18px;">Use <strong>KGrammar</strong>, an offline-ready evaluation framework for Korean AI models, to detect grammatical errors in an LLM-generated response. KGrammar focuses solely on identifying errors, including foreign language usage, without assessing response accuracy or usefulness. Submit the inputs to obtain the grammar evaluation results.</span></br></br>
42
+ <span style="font-size: 18px;">1️⃣ <strong>User Message</strong>: Input the LLM-generated response to be evaluated.</span> </br>
43
+ <span style="font-size: 18px;">2️⃣ <strong>System Message</strong>: Provide the prompt used for grammar evaluation.</span> </br>
44
+ <span style="font-size: 18px;">3️⃣ <strong>Model Selection</strong>: Choose a KGrammar model for assessment.</span> </br>
45
+ <span style="font-size: 18px;">4️⃣ <strong>Max New Tokens</strong>: Set the maximum number of tokens for the evaluation output.</span> </br>
46
+ <span style="font-size: 18px;">5️⃣ <strong>Temperature</strong>: Adjust the temperature to balance predictability and creativity in KGrammar’s evaluation.</span> </br>
47
+ <span style="font-size: 18px;">6️⃣ <strong>Top-p (Nucleus Sampling)</strong>: Modify the top-p value to control fluency and diversity in the evaluation output.</span> </br></br>
48
+ <span style="font-size: 18px;"><a href="https://huggingface.co/davidkim205/kgrammar-2-9b" target="_blank" style="text-decoration: none; color: #000080;">Kgrammar model link << Click here!</a></span> </br></br>
49
+ """
50
+ with gr.Blocks() as app:
51
+ gr.Markdown("# Checking Korean Grammar Accuracy with KGrammar")
52
+ gr.Markdown(description_text)
53
+ gr.Markdown("## Example")
54
+ with gr.Row():
55
+ with gr.Column(scale=1):
56
+ gr.Textbox(label="Input Example",
57
+ value="우선, 성인 티켓의 가격은 $4입니다. \n총 판매된 티켓 수는 59장이며, 학생 티켓은 9장입니다. 我们需要计算学生票价。\n성인 티켓은 50장이 판매되었으므로, 성인 티켓으로 얻은 수익은 50 * 4 = $200입니다. \nTotal revenue是 $222.50, 所以学生票的收入为 $222.50 - $200 = $22.50。俄语로 说, 每张学生票的价格 = $22.50 ÷ 9 = $2.50。\n학생 티켓의 가격은 2.5ดอลล่าครับ.",
58
+ max_lines=7)
59
+ with gr.Column(scale=1):
60
+ gr.Textbox(label="Output Example",
61
+ value="<incorrect grammar>```\n- \"我们需要计算学生票价。\" 문장에서 중국어 사용이 문맥상 부자연스럽습니다.\n- \"Total revenue是 $222.50, 所以学生票的收入为 $222.50 - $200 = $22.50。\" 문장에서 중국어 사용이 문맥상 부자연스럽습니다.\n- \"俄语로 说, 每张学生票的价格 = $22.50 ÷ 9 = $2.50。\" 문장에서 중국어 사용이 문맥상 부자연스럽습니다.\n- \"학생 티켓의 가격은 2.5ดอลล่าครับ.\" 문장에서 태국어 사용이 문맥상 부자연스럽습니다.\n</incorrect grammar> <wrong count>4</wrong count>\n```",
62
+ max_lines=7)
63
+ gr.Markdown("## Try it out!")
64
+ gr.Interface(
65
+ fn=respond,
66
+ inputs=[
67
+ gr.Textbox(label="User Message"),
68
+ gr.Textbox(
69
+ value="한국어 문맥상 부자연스러운 부분을 찾으시오. 오류 문장과 개수는 <incorrect grammar> </incorrect grammar> tag, 즉 <incorrect grammar> - 오류 문장과 설명 </incorrect grammar> 안에 담겨 있으며, <wrong count> </wrong count> tag, 즉 <wrong count> 오류 개수 </wrong count> 이다.",
70
+ label="System message"),
71
+ gr.Dropdown(choices=["davidkim205/kgrammar-2-1b", "davidkim205/kgrammar-2-3b"],
72
+ value="davidkim205/kgrammar-2-1b", label="Model Selection"),
73
+ gr.Slider(minimum=1, maximum=2048, value=1024, step=1, label="Max new tokens"),
74
+ gr.Slider(minimum=0.1, maximum=4.0, value=1.0, step=0.1, label="Temperature"),
75
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
76
+ ],
77
+ outputs="textbox"
78
+ )
79
+ app.launch()
80
+
81
 
82
  if __name__ == "__main__":
83
+ main()
84
+