bmas10 commited on
Commit
3818808
·
verified ·
1 Parent(s): be7cd6f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -1,26 +1,26 @@
1
  # Week - 3 Assignment - Integrate Traditional Chatbot with AI Service Project (Transformers) Praveen Kumar Parimi
2
 
3
  #importing the required libraries including transformers
 
4
  import gradio as gr
5
  from huggingface_hub import InferenceClient
6
  from transformers import pipeline,AutoModelForCausalLM, AutoTokenizer
7
  import torch
8
 
9
 
10
- # Load model and tokenizer
11
  model_name = "meta-llama/Llama-2-7b-chat-hf"
12
- tokenizer = AutoTokenizer.from_pretrained(model_name)
13
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
 
 
 
 
14
 
15
  def chat(input_text, history=[]):
16
  history.append(input_text)
17
  prompt = "\n".join(history) + "\nAI:" # Simple conversational format
18
  inputs = tokenizer(prompt, retur
19
 
20
-
21
-
22
-
23
-
24
  """
25
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
26
  """
@@ -31,7 +31,6 @@ The transformer model used here is Microsoft-trained Phi-3.5-mini-instruct
31
  """
32
 
33
  #model_name = "microsoft/Phi-3.5-mini-instruct"
34
- model_name="meta-llama/Llama-2-7b-chat-hf"
35
 
36
  chat_model = pipeline("text-generation", model=model_name)
37
 
 
1
  # Week - 3 Assignment - Integrate Traditional Chatbot with AI Service Project (Transformers) Praveen Kumar Parimi
2
 
3
  #importing the required libraries including transformers
4
+ import base64
5
  import gradio as gr
6
  from huggingface_hub import InferenceClient
7
  from transformers import pipeline,AutoModelForCausalLM, AutoTokenizer
8
  import torch
9
 
10
 
 
11
  model_name = "meta-llama/Llama-2-7b-chat-hf"
12
+ access_token = base64.b64decode('aGZfcGlSVlFEZ0lTSUFOUm1abnVtQXZyTVlET1FnR0R3VVpqaQ==') # Replace with your token
13
+
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=access_token)
15
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto", use_auth_token=access_token)
16
+ # Load model and tokenizer
17
+
18
 
19
  def chat(input_text, history=[]):
20
  history.append(input_text)
21
  prompt = "\n".join(history) + "\nAI:" # Simple conversational format
22
  inputs = tokenizer(prompt, retur
23
 
 
 
 
 
24
  """
25
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
26
  """
 
31
  """
32
 
33
  #model_name = "microsoft/Phi-3.5-mini-instruct"
 
34
 
35
  chat_model = pipeline("text-generation", model=model_name)
36