Kkordik commited on
Commit
1f764fa
·
verified ·
1 Parent(s): baa1806

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -7
app.py CHANGED
@@ -1,20 +1,21 @@
1
  import gradio as gr
2
  from huggingface_hub import snapshot_download
3
  from pathlib import Path
 
4
  from mistral.cli.chat import load_model, generate_stream
5
 
6
- # Download the model
7
  mistral_models_path = Path.home().joinpath('mistral_models', 'mamba-codestral-7B-v0.1')
8
  mistral_models_path.mkdir(parents=True, exist_ok=True)
9
 
10
  snapshot_download(repo_id="mistralai/mamba-codestral-7B-v0.1",
11
  allow_patterns=["params.json", "consolidated.safetensors", "tokenizer.model.v3"],
12
  local_dir=mistral_models_path)
 
13
 
14
- # Load the model
15
- model = load_model(str(mistral_models_path))
16
 
 
17
  def generate_response(message, history):
 
18
  history_mistral_format = [
19
  {"role": "user" if i % 2 == 0 else "assistant", "content": m}
20
  for i, m in enumerate(sum(history, []))
@@ -24,12 +25,17 @@ def generate_response(message, history):
24
  response = ""
25
  for chunk in generate_stream(model, history_mistral_format, max_tokens=256):
26
  response += chunk
27
- yield response
 
 
 
 
 
28
 
29
  iface = gr.ChatInterface(
30
- generate_response,
31
- title="Mamba Codestral Chat",
32
- description="Chat with the Mamba Codestral 7B model.",
33
  )
34
 
35
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  from huggingface_hub import snapshot_download
3
  from pathlib import Path
4
+ import spaces
5
  from mistral.cli.chat import load_model, generate_stream
6
 
 
7
  mistral_models_path = Path.home().joinpath('mistral_models', 'mamba-codestral-7B-v0.1')
8
  mistral_models_path.mkdir(parents=True, exist_ok=True)
9
 
10
  snapshot_download(repo_id="mistralai/mamba-codestral-7B-v0.1",
11
  allow_patterns=["params.json", "consolidated.safetensors", "tokenizer.model.v3"],
12
  local_dir=mistral_models_path)
13
+ MODEL_PATH = str(mistral_models_path)
14
 
 
 
15
 
16
+ @spaces.GPU()
17
  def generate_response(message, history):
18
+ model = load_model(MODEL_PATH)
19
  history_mistral_format = [
20
  {"role": "user" if i % 2 == 0 else "assistant", "content": m}
21
  for i, m in enumerate(sum(history, []))
 
25
  response = ""
26
  for chunk in generate_stream(model, history_mistral_format, max_tokens=256):
27
  response += chunk
28
+ return response
29
+
30
+ # Gradio interface
31
+ def chat_interface(message, history):
32
+ response = generate_response(message, history, model)
33
+ return response
34
 
35
  iface = gr.ChatInterface(
36
+ chat_interface,
37
+ title="Mamba Codestral Chat (ZeroGPU)",
38
+ description="Chat with the Mamba Codestral 7B model using Hugging Face Spaces ZeroGPU feature.",
39
  )
40
 
41
  if __name__ == "__main__":