Wei-Hsu-AI commited on
Commit
7bf67d3
·
1 Parent(s): f14a2c8

feat: test version interface

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. .gitignore +2 -0
  3. app.py +189 -47
  4. puzzles.xlsx +3 -0
  5. requirements.txt +6 -1
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ puzzles.xlsx filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ venv
2
+ watch.py
app.py CHANGED
@@ -1,63 +1,205 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
27
 
28
- response = ""
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
41
 
 
 
 
 
 
42
 
 
43
  """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
 
 
 
 
 
 
 
 
 
45
  """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
 
63
  if __name__ == "__main__":
 
1
  import gradio as gr
2
+ from transformers import BertTokenizerFast, BertForSequenceClassification, AutoModelForMaskedLM, T5Tokenizer, T5ForConditionalGeneration
3
+ import torch
4
+ import pandas as pd
5
+ from sentence_transformers import SentenceTransformer
6
+ from sklearn.metrics.pairwise import cosine_similarity
7
 
8
+ # 加載 BERT Tokenizer 和 預訓練模型
9
+ # tokenizer = BertTokenizerFast.from_pretrained('bert-base-chinese')
10
+ # model = AutoModelForMaskedLM.from_pretrained('bert-base-chinese')
 
11
 
12
+ tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
13
+ question_judge = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large")
14
 
15
+ answer_judge = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
 
 
 
 
 
 
 
 
16
 
 
 
 
 
 
17
 
 
18
 
19
+ zh_sys_msg="""
20
+ 你是遊戲的裁判,根據提供的<湯麵>和<湯底>理解故事。玩家會根據<湯麵>進行猜測,並要求你判斷猜測是否正確。你只能回答三種答案:對、錯、不知道。
21
 
22
+ ## 判定規則:
23
+ 1. 玩家猜測正確:回答"對"。
24
+ 2. 玩家猜測錯誤:回答"錯"。
25
+ 3. 無法從<湯麵>和<湯底>得出結論或無法推理的問題:回答"不知道"。
 
 
 
 
26
 
27
+ ## 注意:
28
+ 1. 玩家只能看到<湯麵>,所以他是基於<湯麵>進行猜測,即使<湯底>中有其他信息,也不會影響對<湯麵>的判定。
29
+ 2. 如果猜測無法從故事中推理出結論,回答"不知道"。
30
+ 3. 嚴格遵守只回答"對"、"錯"、"不知道"。
31
 
32
+ ## 題目內容:
33
+ ### 湯麵:
34
+ 一個男人走進一家酒吧,並向酒保要了一杯水。酒保突然拿出一把手槍瞄準他,而男子竟只是笑著說:「謝謝你!」然後從容離開,請問發生了什麼事?
35
+ ### 湯底:
36
+ 男子打嗝,他希望喝一杯水來改善狀況。酒保意識到這一點,選擇拿槍嚇他,男子一緊張之下,打嗝自然消失,因而衷心感謝酒保後就離開了。
37
 
38
+ 現在,請判斷以下玩家猜測:
39
  """
40
+
41
+
42
+
43
+ intro="""
44
+ ### 玩法介紹
45
+
46
+ 遊戲一開始,我會給你一個不完整的故事,這個故事通常有很多未知的細節,你需要透過提出問題來探索更多線索。你可以問我各種問題,不過請記住,我只能回答三種答案:「是」、「不是」或「與此無關」。你的目標是根據這些有限的答案,逐步推理出故事的完整脈絡,從而揭開事件的真相。
47
+
48
+ 這個遊戲的名稱來自於其中一個最經典的題目,海龜湯的故事。由於這類型的遊戲強調水平思考,也就是用非傳統的方式解決問題,這些遊戲就被大家統稱為「海龜湯」,有點像是可樂成為所有碳酸飲料的代名詞。
49
+
50
+ 在遊戲中,你的提問會讓你逐漸接近真相。準備好發揮你的推理能力,讓我們開始吧!
51
  """
52
+
53
+
54
+ class PuzzleGame:
55
+ def __init__(self):
56
+ """
57
+ 初始化遊戲類別。
58
+ """
59
+ self.df = pd.read_excel('puzzles.xlsx')
60
+ self.title = None
61
+ self.story = None
62
+ self.answer = None
63
+ self.prompt = None
64
+
65
+ def get_random_puzzle(self):
66
+ """
67
+ 隨機選擇一個謎題並設定當前謎題的標題、故事和答案。
68
+ """
69
+ puzzle = self.df.sample(n=1).iloc[0]
70
+ self.title = puzzle['title']
71
+ # self.story = puzzle['story']
72
+ # self.answer = puzzle['answer']
73
+ self.story = """A man walks into a bar and asks the bartender for a glass of water. The bartender suddenly pulls out a gun and points it at him. The man smiles and says, "Thank you!" then calmly leaves. What happened?"""
74
+ self.answer = """The man had hiccups and wanted a glass of water to cure them. The bartender realized this and chose to scare him with a gun. The man's hiccups disappeared due to the sudden shock, so he sincerely thanked the bartender before leaving."""
75
+
76
+ def get_sample_puzzle(self):
77
+ """
78
+ 選擇範例謎題並設定當前謎題的標題、故事和答案。
79
+ """
80
+ self.title = "The Hiccuping Man"
81
+ self.story = """A man walks into a bar and asks the bartender for a glass of water. The bartender suddenly pulls out a gun and points it at him. The man smiles and says, "Thank you!" then calmly leaves. What happened?"""
82
+ self.answer = """The man had hiccups and wanted a glass of water to cure them. The bartender realized this and chose to scare him with a gun. The man's hiccups disappeared due to the sudden shock, so he sincerely thanked the bartender before leaving."""
83
+
84
+ def get_prompt(self):
85
+ """
86
+ 返回填入謎題故事和答案的 prompt
87
+ """
88
+ prompt = f"""
89
+ You are the game referee, and based on the provided story context and background information, you need to understand the story. The player will make guesses based on the story context, and you are required to judge whether the guess is correct. You can only respond with three possible answers: Correct, Incorrect, or I don’t know.
90
+
91
+ Judgment Rules:
92
+ If the player guesses correctly: Answer "Correct".
93
+ If the player guesses incorrectly: Answer "Incorrect".
94
+ If the guess cannot be concluded from the story context and background information, or if it cannot be logically inferred: Answer "I don’t know".
95
+
96
+ Notes:
97
+ The player can only see the story context, so they will make guesses based on the story context. Even if there is other information in the background, it should not influence the judgment of the story context.
98
+ If the guess cannot be inferred from the story, answer "I don’t know".
99
+ Strictly follow the answers "Correct", "Incorrect", or "I don’t know".
100
+
101
+ Story Context:
102
+ {self.story}
103
+
104
+ Background Information:
105
+ {self.answer}
106
+
107
+ Now, please judge the following player's guess:
108
+ """
109
+
110
+ return prompt
111
+
112
+
113
+ game = PuzzleGame()
114
+
115
+ def restart():
116
+ game.get_random_puzzle()
117
+ story = [{"role": "assistant", "content": game.story}]
118
+ return story
119
+
120
+ def user(message, history):
121
+ history.append({"role": "user", "content": message})
122
+ return message, history
123
+
124
+ def bot(message, history):
125
+ inputs = game.get_prompt() + message
126
+ input_ids = tokenizer(inputs, return_tensors="pt").input_ids
127
+
128
+ with torch.no_grad():
129
+ outputs = question_judge.generate(input_ids)
130
+
131
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
132
+ history.append({"role": "assistant", "content": response})
133
+
134
+ return "", history
135
+
136
+ def check_question(question, history):
137
+ inputs = game.get_prompt() + question
138
+ input_ids = tokenizer(inputs, return_tensors="pt").input_ids
139
+
140
+ with torch.no_grad():
141
+ outputs = question_judge.generate(input_ids)
142
+
143
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
144
+
145
+ history.append({"role": "assistant", "content": response})
146
+
147
+ return "", history
148
+
149
+
150
+ def check_answer(answer, history):
151
+ sentences = [answer, game.answer]
152
+ embeddings = answer_judge.encode(sentences)
153
+
154
+ cos_sim = cosine_similarity([embeddings[0]], [embeddings[1]])
155
+
156
+ print("相似度: ", cos_sim[0][0])
157
+
158
+ if cos_sim[0][0] > 0.8:
159
+ response = "正確!你猜對了! 完整故事:\n" + game.answer
160
+ elif cos_sim[0][0] > 0.5:
161
+ response = "接近了!再試一次!"
162
+ else:
163
+ response = "錯誤!再試一次!"
164
+
165
+ history.append({"role": "assistant", "content": response})
166
+
167
+ return "", history
168
+
169
+
170
+ with gr.Blocks() as demo:
171
+ gr.Markdown(intro)
172
+ gr.Markdown("---")
173
+ story = restart()
174
+ chatbot = gr.Chatbot(type='messages', value=story, height=600)
175
+
176
+
177
+ with gr.Tab("提出問題"):
178
+ question_input_box = gr.Textbox(
179
+ show_label=False,
180
+ placeholder="提問各種可能性的問題...",
181
+
182
+ submit_btn=True,
183
+ )
184
+
185
+ question_input_box.submit(user, [question_input_box, chatbot], [question_input_box, chatbot]).then(
186
+ check_question, [question_input_box, chatbot], [question_input_box, chatbot]
187
+ )
188
+
189
+ with gr.Tab("輸入答案"):
190
+ answer_input_box = gr.Textbox(
191
+ show_label=False,
192
+ placeholder="請輸入你的答案...",
193
+ submit_btn=True,
194
+ )
195
+
196
+ answer_input_box.submit(user, [answer_input_box, chatbot], [answer_input_box, chatbot]).then(
197
+ check_answer, [answer_input_box, chatbot], [answer_input_box, chatbot]
198
+ )
199
+
200
+ restart_btn = gr.ClearButton(value='重新開始新遊戲', inputs=[question_input_box, chatbot])
201
+ restart_btn.click(restart, outputs=[chatbot])
202
+
203
 
204
 
205
  if __name__ == "__main__":
puzzles.xlsx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2334e69f0ac1382849365d13a6d50cd17a64df4bcf7ab1ff57a79f8fde70ebb9
3
+ size 289440
requirements.txt CHANGED
@@ -1 +1,6 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
1
+ gradio==5.7.1
2
+ pandas==2.2.3
3
+ scikit_learn==1.5.2
4
+ sentence_transformers==3.3.1
5
+ torch==2.5.1+cu118
6
+ transformers==4.46.3