Upload folder using huggingface_hub
Browse files- Test_RAG.py +2 -2
Test_RAG.py
CHANGED
|
@@ -60,7 +60,7 @@ if not text_example_cn_path.exists():
|
|
| 60 |
f.write(content.read())
|
| 61 |
|
| 62 |
model_language = "English"
|
| 63 |
-
llm_model_id= "
|
| 64 |
llm_model_configuration = SUPPORTED_LLM_MODELS[model_language][llm_model_id]
|
| 65 |
print(f"Selected LLM model {llm_model_id}")
|
| 66 |
prepare_int4_model = True # Prepare INT4 model
|
|
@@ -291,7 +291,7 @@ if llm_model_id == "red-pajama-3b-chat" and "GPU" in core.available_devices and
|
|
| 291 |
ov_config["INFERENCE_PRECISION_HINT"] = "f32"
|
| 292 |
|
| 293 |
llm = HuggingFacePipeline.from_model_id(
|
| 294 |
-
model_id=
|
| 295 |
task="text-generation",
|
| 296 |
backend="openvino",
|
| 297 |
model_kwargs={
|
|
|
|
| 60 |
f.write(content.read())
|
| 61 |
|
| 62 |
model_language = "English"
|
| 63 |
+
llm_model_id= "llama-3-8b-instruct"
|
| 64 |
llm_model_configuration = SUPPORTED_LLM_MODELS[model_language][llm_model_id]
|
| 65 |
print(f"Selected LLM model {llm_model_id}")
|
| 66 |
prepare_int4_model = True # Prepare INT4 model
|
|
|
|
| 291 |
ov_config["INFERENCE_PRECISION_HINT"] = "f32"
|
| 292 |
|
| 293 |
llm = HuggingFacePipeline.from_model_id(
|
| 294 |
+
model_id="meta-llama/Meta-Llama-3-8B",
|
| 295 |
task="text-generation",
|
| 296 |
backend="openvino",
|
| 297 |
model_kwargs={
|