Spaces:
Sleeping
Sleeping
clementsan
commited on
Commit
·
fa4eb33
1
Parent(s):
1ef8d7c
Raise errors for phi-2 and llama-2 models
Browse files
app.py
CHANGED
|
@@ -109,6 +109,7 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
|
|
| 109 |
model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
|
| 110 |
)
|
| 111 |
elif llm_model == "microsoft/phi-2":
|
|
|
|
| 112 |
llm = HuggingFaceHub(
|
| 113 |
repo_id=llm_model,
|
| 114 |
model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
|
|
@@ -118,6 +119,12 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
|
|
| 118 |
repo_id=llm_model,
|
| 119 |
model_kwargs={"temperature": temperature, "max_new_tokens": 250, "top_k": top_k}
|
| 120 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
else:
|
| 122 |
llm = HuggingFaceHub(
|
| 123 |
repo_id=llm_model,
|
|
|
|
| 109 |
model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "load_in_8bit": True}
|
| 110 |
)
|
| 111 |
elif llm_model == "microsoft/phi-2":
|
| 112 |
+
raise gr.Error("phi-2 model requires 'trust_remote_code=True', currently not supported by langchain HuggingFaceHub...")
|
| 113 |
llm = HuggingFaceHub(
|
| 114 |
repo_id=llm_model,
|
| 115 |
model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k, "trust_remote_code": True, "torch_dtype": "auto"}
|
|
|
|
| 119 |
repo_id=llm_model,
|
| 120 |
model_kwargs={"temperature": temperature, "max_new_tokens": 250, "top_k": top_k}
|
| 121 |
)
|
| 122 |
+
elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
|
| 123 |
+
raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...")
|
| 124 |
+
llm = HuggingFaceHub(
|
| 125 |
+
repo_id=llm_model,
|
| 126 |
+
model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
|
| 127 |
+
)
|
| 128 |
else:
|
| 129 |
llm = HuggingFaceHub(
|
| 130 |
repo_id=llm_model,
|