Christoph Holthaus commited on
Commit
def624a
·
1 Parent(s): 9307da2

model hf path

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -7,13 +7,13 @@ import psutil
7
  import os
8
 
9
  # load like this - use tne variable everywhere
10
- model_path=os.getenv("MODEL_PATH")
11
  # show warning, when empty and briefs description of how to set it
12
  # also add link to "how to search" with link to bloke by default + example search link + example full value (mistral base?)
13
  # info about ram requirements
14
 
15
  # Initing things
16
- print(f"debug: init model: {model_path}")
17
  #llm = Llama(model_path="./model.bin") # LLaMa model
18
  print("! INITING DONE !")
19
 
@@ -39,7 +39,7 @@ import spaces
39
  import torch
40
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
41
 
42
- DESCRIPTION = "# Mistral-7B"
43
 
44
  if torch.cuda.is_available():
45
  DESCRIPTION += "\n<p>This space is using CPU only. Use a different one if you want to go fast and use GPU. </p>"
 
7
  import os
8
 
9
  # load like this - use tne variable everywhere
10
+ model_hf_path=os.getenv("MODEL_HF_PATH")
11
  # show warning, when empty and briefs description of how to set it
12
  # also add link to "how to search" with link to bloke by default + example search link + example full value (mistral base?)
13
  # info about ram requirements
14
 
15
  # Initing things
16
+ print(f"debug: init model: {model_hf_path}")
17
  #llm = Llama(model_path="./model.bin") # LLaMa model
18
  print("! INITING DONE !")
19
 
 
39
  import torch
40
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
41
 
42
+ DESCRIPTION = f"# Test model: {model_hf_path}"
43
 
44
  if torch.cuda.is_available():
45
  DESCRIPTION += "\n<p>This space is using CPU only. Use a different one if you want to go fast and use GPU. </p>"