MuntasirHossain commited on
Commit
02ca476
·
verified ·
1 Parent(s): 514501e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -14
app.py CHANGED
@@ -15,7 +15,7 @@ from langchain.memory import ConversationBufferMemory
15
  from langchain_community.llms import HuggingFaceHub, HuggingFaceEndpoint
16
  import torch
17
 
18
- list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.2"]
19
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
20
 
21
  # Load and split PDF document
@@ -43,20 +43,20 @@ def create_db(splits):
43
 
44
  # Initialize langchain LLM chain
45
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
46
- if llm_model == "meta-llama/Meta-Llama-3-8B-Instruct":
47
- # llm = HuggingFaceEndpoint(
48
- # repo_id=llm_model,
49
- # huggingfacehub_api_token = api_token,
50
- # temperature = temperature,
51
- # max_new_tokens = max_tokens,
52
- # top_k = top_k,
53
- # )
54
-
55
- llm = HuggingFaceHub(
56
- repo_id = llm_model,
57
  huggingfacehub_api_token = api_token,
58
- task="text-generation",
59
- )
 
 
 
 
 
 
 
 
60
  else:
61
  llm = HuggingFaceEndpoint(
62
  huggingfacehub_api_token = api_token,
 
15
  from langchain_community.llms import HuggingFaceHub, HuggingFaceEndpoint
16
  import torch
17
 
18
+ list_llm = ["meta-llama/Llama-3.1-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.2"] # meta-llama/Meta-Llama-3-8B-Instruct
19
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
20
 
21
  # Load and split PDF document
 
43
 
44
  # Initialize langchain LLM chain
45
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
46
+ if llm_model == "meta-llama/Llama-3.1-8B-Instruct":
47
+ llm = HuggingFaceEndpoint(
48
+ repo_id=llm_model,
 
 
 
 
 
 
 
 
49
  huggingfacehub_api_token = api_token,
50
+ temperature = temperature,
51
+ max_new_tokens = max_tokens,
52
+ top_k = top_k,
53
+ )
54
+
55
+ # llm = HuggingFaceHub(
56
+ # repo_id = llm_model,
57
+ # huggingfacehub_api_token = api_token,
58
+ # task="text-generation",
59
+ # )
60
  else:
61
  llm = HuggingFaceEndpoint(
62
  huggingfacehub_api_token = api_token,