sitammeur commited on
Commit
c3b8348
·
verified ·
1 Parent(s): 79e3c7c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -10
app.py CHANGED
@@ -19,8 +19,7 @@ from exception import CustomExceptionHandling
19
 
20
 
21
  # Download gguf model files
22
- llm = None
23
- llm_model = None
24
 
25
  hf_hub_download(
26
  repo_id="bartowski/google_gemma-3-1b-it-GGUF",
@@ -38,6 +37,10 @@ title = "Gemma3 Llama.cpp"
38
  description = """SmolLM2, a family of three small language models, performs well in instruction following and reasoning. The largest model significantly improves over its predecessor through advanced training techniques."""
39
 
40
 
 
 
 
 
41
  def respond(
42
  message: str,
43
  history: List[Tuple[str, str]],
@@ -88,7 +91,7 @@ def respond(
88
  # Create the agent
89
  agent = LlamaCppAgent(
90
  provider,
91
- # system_prompt=f"{system_message}",
92
  predefined_messages_formatter_type=MessagesFormatterType.GEMMA_2,
93
  debug_output=True,
94
  )
@@ -148,16 +151,16 @@ demo = gr.ChatInterface(
148
  "google_gemma-3-1b-it-Q6_K.gguf",
149
  "google_gemma-3-1b-it-Q5_K_M.gguf",
150
  ],
151
- value="google_gemma-3-1b-it-Q6_K.gguf",
152
  label="Model",
153
  info="Select the AI model to use for chat",
154
  ),
155
- # gr.Textbox(
156
- # value="You are a helpful AI assistant focused on accurate and ethical responses.",
157
- # label="System Prompt",
158
- # info="Define the AI assistant's personality and behavior",
159
- # lines=2,
160
- # ),
161
  gr.Slider(
162
  minimum=512,
163
  maximum=2048,
 
19
 
20
 
21
  # Download gguf model files
22
+ huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
 
23
 
24
  hf_hub_download(
25
  repo_id="bartowski/google_gemma-3-1b-it-GGUF",
 
37
  description = """SmolLM2, a family of three small language models, performs well in instruction following and reasoning. The largest model significantly improves over its predecessor through advanced training techniques."""
38
 
39
 
40
+ # Download gguf model files
41
+ llm = None
42
+ llm_model = None
43
+
44
  def respond(
45
  message: str,
46
  history: List[Tuple[str, str]],
 
91
  # Create the agent
92
  agent = LlamaCppAgent(
93
  provider,
94
+ system_prompt=f"{system_message}",
95
  predefined_messages_formatter_type=MessagesFormatterType.GEMMA_2,
96
  debug_output=True,
97
  )
 
151
  "google_gemma-3-1b-it-Q6_K.gguf",
152
  "google_gemma-3-1b-it-Q5_K_M.gguf",
153
  ],
154
+ value="google_gemma-3-1b-it-Q5_K_M.gguf",
155
  label="Model",
156
  info="Select the AI model to use for chat",
157
  ),
158
+ gr.Textbox(
159
+ value="You are a helpful AI assistant focused on accurate and ethical responses.",
160
+ label="System Prompt",
161
+ info="Define the AI assistant's personality and behavior",
162
+ lines=2,
163
+ ),
164
  gr.Slider(
165
  minimum=512,
166
  maximum=2048,