Chandranshu Jain commited on
Commit
65a8918
·
verified ·
1 Parent(s): ed4bbdc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -16,7 +16,7 @@ from langchain_community.embeddings import HuggingFaceEmbeddings
16
  #from transformers import AutoModelForCausalLM
17
 
18
 
19
- access_token = os.getenv("HUGGINGFACE_API_KEY")
20
 
21
 
22
 
@@ -66,7 +66,7 @@ def text_splitter(text):
66
 
67
  #GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
68
  #COHERE_API_KEY = os.getenv("COHERE_API_KEY")
69
- HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
70
 
71
  def get_conversational_chain(retriever):
72
  prompt_template = """
@@ -82,10 +82,10 @@ def get_conversational_chain(retriever):
82
  Answer:
83
  """
84
  #model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3, google_api_key=GOOGLE_API_KEY)
85
- repo_id ='google/gemma-1.1-2b-it'
86
  #repo_id='meta-llama/Meta-Llama-3-70B'
87
  #repo_id = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
88
- llm = HuggingFaceEndpoint(repo_id=repo_id, temperature=0.3,token = access_token)
89
  #tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
90
  #llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
91
  #llm = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-128k-instruct", trust_remote_code=True, token=access_token)
 
16
  #from transformers import AutoModelForCausalLM
17
 
18
 
19
+ #access_token = os.getenv("HUGGINGFACE_API_KEY")
20
 
21
 
22
 
 
66
 
67
  #GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
68
  #COHERE_API_KEY = os.getenv("COHERE_API_KEY")
69
+ #HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
70
 
71
  def get_conversational_chain(retriever):
72
  prompt_template = """
 
82
  Answer:
83
  """
84
  #model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3, google_api_key=GOOGLE_API_KEY)
85
+ llm ='google/gemma-1.1-2b-it'
86
  #repo_id='meta-llama/Meta-Llama-3-70B'
87
  #repo_id = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
88
+ #llm = HuggingFaceEndpoint(repo_id=repo_id, temperature=0.3,token = access_token)
89
  #tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
90
  #llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
91
  #llm = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-128k-instruct", trust_remote_code=True, token=access_token)