ZennethKenneth commited on
Commit
1e82180
·
verified ·
1 Parent(s): 35b2994

trying meta llama

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -8,7 +8,7 @@ For more information on `huggingface_hub` Inference API support, please check th
8
  # requires space hardware update to use large models (TODO)
9
  # client = InferenceClient("mistralai/Mistral-Large-Instruct-2407")
10
  # Note change in instantiation***
11
- text_generator = pipeline("text-generation", model="EleutherAI/gpt-neo-125M")
12
 
13
  def respond(message, history, system_message, max_tokens, temperature, top_p):
14
  # Construct the prompt with system message, history, and user input
@@ -43,7 +43,6 @@ athena = gr.ChatInterface(
43
  - https://plan.com/features/productivity-and-performance
44
  - https://plan.com/features/security-and-connectivity
45
  - https://plan.com/features/connectivity-and-cost
46
-
47
  """,
48
  label="System message"),
49
  gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max new tokens"),
 
8
  # requires space hardware update to use large models (TODO)
9
  # client = InferenceClient("mistralai/Mistral-Large-Instruct-2407")
10
  # Note change in instantiation***
11
+ text_generator = pipeline("text-generation", model="meta-llama/Meta-Llama-3.1-8B")
12
 
13
  def respond(message, history, system_message, max_tokens, temperature, top_p):
14
  # Construct the prompt with system message, history, and user input
 
43
  - https://plan.com/features/productivity-and-performance
44
  - https://plan.com/features/security-and-connectivity
45
  - https://plan.com/features/connectivity-and-cost
 
46
  """,
47
  label="System message"),
48
  gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max new tokens"),