Renjith95 commited on
Commit
453eab3
·
verified ·
1 Parent(s): 65486d6
Files changed (1) hide show
  1. app.py +47 -35
app.py CHANGED
@@ -1,23 +1,50 @@
1
  import os
2
  import gradio as gr
3
- from huggingface_hub import InferenceClient
4
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
5
- import torch
 
6
  # Load your model and tokenizer
7
  model_name = "Renjith95/renj-portfolio-finetuned-model" # Replace with your model name
8
- auth_token = os.getenv("HF_TOKEN") # Get token from environment variable
9
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=auth_token)
10
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, use_auth_token=auth_token)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
 
 
 
 
13
  """
14
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
15
  """
16
- # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
17
-
18
-
19
- def respond(message, history, system_message, max_tokens, temperature, top_p):
20
- messages = [{"role": "system", "content": system_message}]
21
  for user_msg, assistant_msg in history:
22
  messages.append({"role": "user", "content": user_msg})
23
  messages.append({"role": "assistant", "content": assistant_msg})
@@ -32,35 +59,20 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
32
 
33
  outputs = model.generate(
34
  input_ids=inputs,
35
- max_new_tokens=max_tokens,
36
  use_cache=True,
37
- temperature=temperature,
38
- top_p=top_p,
39
  )
40
- response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
41
-
42
- # Assuming your model's response is the last part after the user's message
43
- response = response.split(message)[-1].strip()
44
- yield response
45
- """
46
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
47
- """
48
  demo = gr.ChatInterface(
49
  respond,
50
- additional_inputs=[
51
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
52
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
53
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
54
- gr.Slider(
55
- minimum=0.1,
56
- maximum=1.0,
57
- value=0.95,
58
- step=0.05,
59
- label="Top-p (nucleus sampling)",
60
- ),
61
- ],
62
  )
63
 
64
-
65
  if __name__ == "__main__":
66
  demo.launch(share = True)
 
1
  import os
2
  import gradio as gr
3
+ from transformers import TextStreamer
4
+ from peft import PeftModel
5
+ from unsloth import FastLanguageModel
6
+
7
  # Load your model and tokenizer
8
  model_name = "Renjith95/renj-portfolio-finetuned-model" # Replace with your model name
9
+ auth_token = os.getenv("HF_TOKEN") # Now this should work
10
+ # print("Auth token:", auth_token) # To verify it's loaded
11
+
12
+ # Loading the base model and applying the local adapter.
13
+ max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
14
+ dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
15
+ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
16
+
17
+ # 4bit pre quantized models we support for 4x faster downloading + no OOMs.
18
+ fourbit_models = [
19
+ "unsloth/mistral-7b-bnb-4bit",
20
+ "unsloth/mistral-7b-instruct-v0.2-bnb-4bit",
21
+ "unsloth/llama-2-7b-bnb-4bit",
22
+ "unsloth/llama-2-13b-bnb-4bit",
23
+ "unsloth/codellama-34b-bnb-4bit",
24
+ "unsloth/tinyllama-bnb-4bit",
25
+ "unsloth/gemma-7b-bnb-4bit", # New Google 6 trillion tokens model 2.5x faster!
26
+ "unsloth/gemma-2b-bnb-4bit",
27
+ ] # More models at https://huggingface.co/unsloth
28
+
29
+ model, tokenizer = FastLanguageModel.from_pretrained(
30
+ model_name = "unsloth/mistral-7b-instruct-v0.3-bnb-4bit", # Choose ANY! eg teknium/OpenHermes-2.5-Mistral-7B
31
+ max_seq_length = max_seq_length,
32
+ dtype = dtype,
33
+ load_in_4bit = load_in_4bit,
34
+ token = auth_token, # use one if using gated models like meta-llama/Llama-2-7b-hf
35
+ )
36
+ model = PeftModel.from_pretrained(model, "Renjith95/renj-portfolio-finetuned-adapter", use_auth_token=auth_token)
37
+ FastLanguageModel.for_inference(model)
38
 
39
 
40
+ # tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=auth_token)
41
+ # model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, use_auth_token=auth_token)
42
+ text_streamer = TextStreamer(tokenizer, skip_prompt = True)
43
  """
44
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
45
  """
46
+ def respond(message, history):
47
+ messages = []
 
 
 
48
  for user_msg, assistant_msg in history:
49
  messages.append({"role": "user", "content": user_msg})
50
  messages.append({"role": "assistant", "content": assistant_msg})
 
59
 
60
  outputs = model.generate(
61
  input_ids=inputs,
62
+ max_new_tokens=512,
63
  use_cache=True,
64
+ temperature=0.7,
65
+ top_p=0.95,
66
  )
67
+
68
+ response = tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True)
69
+ return response
70
+
 
 
 
 
71
  demo = gr.ChatInterface(
72
  respond,
73
+ title="Renj Chatbot",
74
+ description="Ask me anything about my portfolio and projects."
 
 
 
 
 
 
 
 
 
 
75
  )
76
 
 
77
  if __name__ == "__main__":
78
  demo.launch(share = True)