kevinhug commited on
Commit
3b2c402
·
1 Parent(s): 39fa46c
Files changed (2) hide show
  1. graphrag.py +7 -3
  2. requirements.txt +4 -2
graphrag.py CHANGED
@@ -8,15 +8,19 @@ llm = ChatOpenAI(temperature=0, model_name="gpt-4-turbo")
8
 
9
  from langchain_ollama.llms import OllamaLLM
10
  llm = OllamaLLM(temperature=0,model="llama3.2")
11
- """
12
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
13
 
 
 
 
 
14
  # SEE: https://huggingface.co/docs/hub/security-tokens
15
  # We just need a token with read permissions for this demo
16
  HF_TOKEN= os.environ["HF_TOKEN"]
17
 
18
- llm = HuggingFaceInferenceAPI(temperature=0.2, model_name="meta-llama/Llama-3.2-1B")
19
-
20
 
21
  import networkx as nx
22
  import matplotlib.pyplot as plt
 
8
 
9
  from langchain_ollama.llms import OllamaLLM
10
  llm = OllamaLLM(temperature=0,model="llama3.2")
11
+
12
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
13
 
14
+ llm = HuggingFaceInferenceAPI(temperature=0.2, model_name="meta-llama/Llama-3.2-1B")
15
+
16
+ """
17
+
18
  # SEE: https://huggingface.co/docs/hub/security-tokens
19
  # We just need a token with read permissions for this demo
20
  HF_TOKEN= os.environ["HF_TOKEN"]
21
 
22
+ from llama_index.llms.litellm import LiteLLM
23
+ llm = LiteLLM("huggingface/meta-llama/Llama-3.2-1B")
24
 
25
  import networkx as nx
26
  import matplotlib.pyplot as plt
requirements.txt CHANGED
@@ -11,8 +11,10 @@ llama-index
11
  faiss-cpu
12
  tavily-python
13
 
14
- llama-index-llms-huggingface-api
15
- huggingface_hub[inference]
 
 
16
 
17
  networkx
18
  matplotlib
 
11
  faiss-cpu
12
  tavily-python
13
 
14
+ llama-index-llms-litellm
15
+
16
+ #llama-index-llms-huggingface-api
17
+ #huggingface_hub[inference]
18
 
19
  networkx
20
  matplotlib