GenAICoder commited on
Commit
1d7efc1
·
verified ·
1 Parent(s): baaa643

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -17,7 +17,7 @@ from transformers import pipeline
17
  #from transformers import AutoModelForCausalLM
18
 
19
 
20
- #access_token = os.getenv("HUGGINGFACE_API_KEY")
21
 
22
 
23
 
@@ -87,7 +87,8 @@ def get_conversational_chain(retriever):
87
  llm= pipeline("text-generation", model="nvidia/Llama3-ChatQA-1.5-8B")
88
  #repo_id='meta-llama/Meta-Llama-3-70B'
89
  #repo_id = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
90
- #llm = HuggingFaceEndpoint(repo_id=repo_id, temperature=0.3,token = access_token)
 
91
  #tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
92
  #llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
93
  #llm = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-128k-instruct", trust_remote_code=True, token=access_token)
 
17
  #from transformers import AutoModelForCausalLM
18
 
19
 
20
+ access_token = os.getenv("HUGGINGFACE_API_KEY")
21
 
22
 
23
 
 
87
  llm= pipeline("text-generation", model="nvidia/Llama3-ChatQA-1.5-8B")
88
  #repo_id='meta-llama/Meta-Llama-3-70B'
89
  #repo_id = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
90
+ repo_id= 'nvidia/Llama3-ChatQA-1.5-8B'
91
+ llm = HuggingFaceEndpoint(repo_id=repo_id, temperature=0.3,token = access_token)
92
  #tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
93
  #llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
94
  #llm = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-128k-instruct", trust_remote_code=True, token=access_token)