Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -97,26 +97,16 @@ def get_conversational_chain(retriever):
|
|
97 |
Answer:
|
98 |
"""
|
99 |
#model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3, google_api_key=GOOGLE_API_KEY)
|
100 |
-
#llm = pipeline(task="text-generation", model='google/gemma-1.1-2b-it')
|
101 |
-
#llm= pipeline("text-generation", model="nvidia/Llama3-ChatQA-1.5-8B")
|
102 |
#repo_id='meta-llama/Meta-Llama-3-70B'
|
103 |
#repo_id = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
|
104 |
#repo_id= 'nvidia/Llama3-ChatQA-1.5-8B'
|
105 |
#repo_id= 'google/gemma-1.1-2b-it'
|
106 |
-
|
107 |
-
#tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it")
|
108 |
-
#llm = AutoModelForCausalLM.from_pretrained("google/gemma-1.1-2b-it")
|
109 |
-
#llm = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-128k-instruct", trust_remote_code=True, token=access_token)
|
110 |
-
#llm = pipeline("text-generation", model="google/gemma-1.1-2b-it")
|
111 |
-
#llm = HuggingFacePipeline.from_model_id(
|
112 |
-
#model_id="gpt2",
|
113 |
-
#task="text-generation",
|
114 |
-
#pipeline_kwargs={"max_new_tokens": 10})
|
115 |
-
#from langchain_community.llms import HuggingFaceHub
|
116 |
|
117 |
llm = HuggingFaceHub(
|
118 |
#repo_id="HuggingFaceH4/zephyr-7b-beta",
|
119 |
-
repo_id = "mistralai/Mistral-7B-v0.1",
|
|
|
120 |
huggingfacehub_api_token=os.getenv("HUGGINGFACE_API_KEY"),
|
121 |
task="text-generation",
|
122 |
)
|
|
|
97 |
Answer:
|
98 |
"""
|
99 |
#model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3, google_api_key=GOOGLE_API_KEY)
|
|
|
|
|
100 |
#repo_id='meta-llama/Meta-Llama-3-70B'
|
101 |
#repo_id = 'mistralai/Mixtral-8x7B-Instruct-v0.1'
|
102 |
#repo_id= 'nvidia/Llama3-ChatQA-1.5-8B'
|
103 |
#repo_id= 'google/gemma-1.1-2b-it'
|
104 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
106 |
llm = HuggingFaceHub(
|
107 |
#repo_id="HuggingFaceH4/zephyr-7b-beta",
|
108 |
+
#repo_id = "mistralai/Mistral-7B-v0.1",
|
109 |
+
repo_id="microsoft/Phi-3-small-128k-instruct",
|
110 |
huggingfacehub_api_token=os.getenv("HUGGINGFACE_API_KEY"),
|
111 |
task="text-generation",
|
112 |
)
|