LLMproj1 commited on
Commit
c138012
·
verified ·
1 Parent(s): dc9cf2c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -1,12 +1,12 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
 
 
 
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
@@ -27,7 +27,7 @@ def respond(
27
 
28
  response = ""
29
 
30
- for message in client.chat_completion(
31
  messages,
32
  max_tokens=max_tokens,
33
  stream=True,
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ from peft import PeftModel, PeftConfig
5
+ from transformers import AutoModelForCausalLM
 
 
 
6
 
7
+ config = PeftConfig.from_pretrained("LLMproj1/mypersona-llama3-8b")
8
+ base_model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3-8b-bnb-4bit")
9
+ model = PeftModel.from_pretrained(base_model, "LLMproj1/mypersona-llama3-8b")
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
 
27
 
28
  response = ""
29
 
30
+ for message in model.generate(
31
  messages,
32
  max_tokens=max_tokens,
33
  stream=True,