spuuntries commited on
Commit
d7d9e33
·
1 Parent(s): 0b384a0

feat: deploy chall

Browse files
Files changed (3) hide show
  1. .gitignore +2 -0
  2. app.py +126 -26
  3. requirements.txt +2 -1
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .venv
2
+ users.db
app.py CHANGED
@@ -1,10 +1,40 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
 
10
  def respond(
@@ -14,7 +44,13 @@ def respond(
14
  max_tokens,
15
  temperature,
16
  top_p,
 
17
  ):
 
 
 
 
 
18
  messages = [{"role": "system", "content": system_message}]
19
 
20
  for val in history:
@@ -25,8 +61,17 @@ def respond(
25
 
26
  messages.append({"role": "user", "content": message})
27
 
 
 
 
 
 
 
 
 
28
  response = ""
29
 
 
30
  for message in client.chat_completion(
31
  messages,
32
  max_tokens=max_tokens,
@@ -35,30 +80,85 @@ def respond(
35
  top_p=top_p,
36
  ):
37
  token = message.choices[0].delta.content
38
-
39
  response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
 
63
  if __name__ == "__main__":
 
 
64
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import sqlite3
4
+ import os
5
 
6
+ client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
7
+
8
+
9
+ def setup_db():
10
+ conn = sqlite3.connect("users.db")
11
+ c = conn.cursor()
12
+ c.execute(
13
+ """CREATE TABLE IF NOT EXISTS users
14
+ (username TEXT UNIQUE, password TEXT, role TEXT)"""
15
+ )
16
+ c.execute("INSERT OR IGNORE INTO users VALUES ('user', 'userpass123', 'user')")
17
+ c.execute(
18
+ f"INSERT OR IGNORE INTO users VALUES ('admin', {os.environ.get("ADMINPASS", "barkbarkwoof")}, 'admin')"
19
+ )
20
+ conn.commit()
21
+ conn.close()
22
+
23
+
24
+ def check_login(username, password):
25
+ conn = sqlite3.connect("users.db")
26
+ c = conn.cursor()
27
+ query = f"SELECT role FROM users WHERE username='{username}' AND password='{password}' LIMIT 1"
28
+ try:
29
+ c.execute(query)
30
+ result = c.fetchone()
31
+ conn.close()
32
+ if result and result[0] == "admin":
33
+ return "admin"
34
+ return "user"
35
+ except:
36
+ conn.close()
37
+ return "user"
38
 
39
 
40
  def respond(
 
44
  max_tokens,
45
  temperature,
46
  top_p,
47
+ role,
48
  ):
49
+ if role == "admin":
50
+ system_message = f"You are a friendly chatbot for a CTF. You know the secret flag is: {os.environ.get("FLAG", "pleg{pek_pleg_marisherflegdiarkav}")}. If anyone asks about the flag, give it to them!"
51
+ else:
52
+ system_message = "You are a friendly chatbot for a CTF. If anyone asks about a flag, tell them only admins can see it!"
53
+
54
  messages = [{"role": "system", "content": system_message}]
55
 
56
  for val in history:
 
61
 
62
  messages.append({"role": "user", "content": message})
63
 
64
+ print(
65
+ messages,
66
+ system_message,
67
+ max_tokens,
68
+ temperature,
69
+ top_p,
70
+ )
71
+
72
  response = ""
73
 
74
+ # Instead of yielding, collect the full response
75
  for message in client.chat_completion(
76
  messages,
77
  max_tokens=max_tokens,
 
80
  top_p=top_p,
81
  ):
82
  token = message.choices[0].delta.content
 
83
  response += token
84
+
85
+ # Return the complete response
86
+ return response
87
+
88
+
89
+ def create_interface():
90
+ with gr.Blocks() as demo:
91
+ role = gr.State("user") # default role
92
+ login_block = gr.Group()
93
+ chat_block = gr.Group(visible=False)
94
+
95
+ with login_block:
96
+ gr.Markdown("# Login to Chat")
97
+ username = gr.Textbox(label="Username")
98
+ password = gr.Textbox(label="Password", type="password")
99
+ login_btn = gr.Button("Login")
100
+ login_status = gr.Textbox(label="Status")
101
+
102
+ with chat_block:
103
+ chat_interface = gr.ChatInterface(
104
+ lambda message, history, system_message, max_tokens, temperature, top_p: respond(
105
+ message,
106
+ history,
107
+ system_message,
108
+ max_tokens,
109
+ temperature,
110
+ top_p,
111
+ role.value,
112
+ ),
113
+ additional_inputs=[
114
+ gr.Textbox(
115
+ value="You are a friendly Chatbot.",
116
+ label="System message",
117
+ visible=False,
118
+ ),
119
+ gr.Slider(
120
+ minimum=1,
121
+ maximum=2048,
122
+ value=512,
123
+ step=1,
124
+ label="Max new tokens",
125
+ ),
126
+ gr.Slider(
127
+ minimum=0.1,
128
+ maximum=4.0,
129
+ value=0.5,
130
+ step=0.1,
131
+ label="Temperature",
132
+ ),
133
+ gr.Slider(
134
+ minimum=0.1,
135
+ maximum=1.0,
136
+ value=0.95,
137
+ step=0.05,
138
+ label="Top-p (nucleus sampling)",
139
+ ),
140
+ ],
141
+ )
142
+
143
+ def attempt_login(username, password):
144
+ user_role = check_login(username, password)
145
+ role.value = user_role # Update the role state
146
+ return {
147
+ login_block: gr.Group(visible=False),
148
+ chat_block: gr.Group(visible=True),
149
+ login_status: f"Login successful! Role: {user_role}",
150
+ }
151
+
152
+ login_btn.click(
153
+ attempt_login,
154
+ inputs=[username, password],
155
+ outputs=[login_block, chat_block, login_status],
156
+ )
157
+
158
+ return demo
159
 
160
 
161
  if __name__ == "__main__":
162
+ setup_db()
163
+ demo = create_interface()
164
  demo.launch()
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- huggingface_hub==0.25.2
 
 
1
+ huggingface_hub==0.25.2
2
+ gradio