fix-dependencies-issue

#74
by Se7en258 - opened
unit2/llama-index/agents.ipynb CHANGED
@@ -21,11 +21,11 @@
21
  },
22
  {
23
  "cell_type": "code",
24
- "execution_count": 43,
25
  "metadata": {},
26
  "outputs": [],
27
  "source": [
28
- "!pip install llama-index datasets llama-index-callbacks-arize-phoenix llama-index-vector-stores-chroma llama-index-llms-huggingface-api -U -q"
29
  ]
30
  },
31
  {
@@ -167,7 +167,7 @@
167
  },
168
  {
169
  "cell_type": "code",
170
- "execution_count": 46,
171
  "metadata": {},
172
  "outputs": [],
173
  "source": [
@@ -175,7 +175,7 @@
175
  "\n",
176
  "from llama_index.core import VectorStoreIndex\n",
177
  "from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI\n",
178
- "from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding\n",
179
  "from llama_index.core.tools import QueryEngineTool\n",
180
  "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
181
  "\n",
@@ -185,7 +185,7 @@
185
  "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
186
  "\n",
187
  "# Create a query engine\n",
188
- "embed_model = HuggingFaceInferenceAPIEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
189
  "llm = HuggingFaceInferenceAPI(model_name=\"Qwen/Qwen2.5-Coder-32B-Instruct\")\n",
190
  "index = VectorStoreIndex.from_vector_store(\n",
191
  " vector_store=vector_store, embed_model=embed_model\n",
 
21
  },
22
  {
23
  "cell_type": "code",
24
+ "execution_count": null,
25
  "metadata": {},
26
  "outputs": [],
27
  "source": [
28
+ "!pip install llama-index llama-index-vector-stores-chroma llama-index-llms-huggingface-api llama-index-embeddings-huggingface -U -q"
29
  ]
30
  },
31
  {
 
167
  },
168
  {
169
  "cell_type": "code",
170
+ "execution_count": null,
171
  "metadata": {},
172
  "outputs": [],
173
  "source": [
 
175
  "\n",
176
  "from llama_index.core import VectorStoreIndex\n",
177
  "from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI\n",
178
+ "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
179
  "from llama_index.core.tools import QueryEngineTool\n",
180
  "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
181
  "\n",
 
185
  "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
186
  "\n",
187
  "# Create a query engine\n",
188
+ "embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
189
  "llm = HuggingFaceInferenceAPI(model_name=\"Qwen/Qwen2.5-Coder-32B-Instruct\")\n",
190
  "index = VectorStoreIndex.from_vector_store(\n",
191
  " vector_store=vector_store, embed_model=embed_model\n",
unit2/llama-index/components.ipynb CHANGED
@@ -23,7 +23,7 @@
23
  "metadata": {},
24
  "outputs": [],
25
  "source": [
26
- "!pip install llama-index datasets llama-index-callbacks-arize-phoenix llama-index-vector-stores-chroma llama-index-llms-huggingface-api -U -q"
27
  ]
28
  },
29
  {
@@ -113,12 +113,12 @@
113
  "cell_type": "markdown",
114
  "metadata": {},
115
  "source": [
116
- "Now we have a list of `Document` objects, we can use the `IngestionPipeline` to create nodes from the documents and prepare them for the `QueryEngine`. We will use the `SentenceSplitter` to split the documents into smaller chunks and the `HuggingFaceInferenceAPIEmbedding` to embed the chunks."
117
  ]
118
  },
119
  {
120
  "cell_type": "code",
121
- "execution_count": 16,
122
  "metadata": {},
123
  "outputs": [
124
  {
@@ -142,7 +142,7 @@
142
  }
143
  ],
144
  "source": [
145
- "from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding\n",
146
  "from llama_index.core.node_parser import SentenceSplitter\n",
147
  "from llama_index.core.ingestion import IngestionPipeline\n",
148
  "\n",
@@ -150,7 +150,7 @@
150
  "pipeline = IngestionPipeline(\n",
151
  " transformations=[\n",
152
  " SentenceSplitter(),\n",
153
- " HuggingFaceInferenceAPIEmbedding(model_name=\"BAAI/bge-small-en-v1.5\"),\n",
154
  " ]\n",
155
  ")\n",
156
  "\n",
@@ -175,7 +175,7 @@
175
  },
176
  {
177
  "cell_type": "code",
178
- "execution_count": 18,
179
  "metadata": {},
180
  "outputs": [
181
  {
@@ -200,7 +200,7 @@
200
  "pipeline = IngestionPipeline(\n",
201
  " transformations=[\n",
202
  " SentenceSplitter(),\n",
203
- " HuggingFaceInferenceAPIEmbedding(model_name=\"BAAI/bge-small-en-v1.5\"),\n",
204
  " ],\n",
205
  " vector_store=vector_store,\n",
206
  ")\n",
@@ -218,14 +218,14 @@
218
  },
219
  {
220
  "cell_type": "code",
221
- "execution_count": 19,
222
  "metadata": {},
223
  "outputs": [],
224
  "source": [
225
  "from llama_index.core import VectorStoreIndex\n",
226
- "from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding\n",
227
  "\n",
228
- "embed_model = HuggingFaceInferenceAPIEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
229
  "index = VectorStoreIndex.from_vector_store(\n",
230
  " vector_store=vector_store, embed_model=embed_model\n",
231
  ")"
 
23
  "metadata": {},
24
  "outputs": [],
25
  "source": [
26
+ "!pip install llama-index datasets llama-index-callbacks-arize-phoenix arize-phoenix llama-index-vector-stores-chroma llama-index-llms-huggingface-api llama-index-embeddings-huggingface -U -q"
27
  ]
28
  },
29
  {
 
113
  "cell_type": "markdown",
114
  "metadata": {},
115
  "source": [
116
+ "Now we have a list of `Document` objects, we can use the `IngestionPipeline` to create nodes from the documents and prepare them for the `QueryEngine`. We will use the `SentenceSplitter` to split the documents into smaller chunks and the `HuggingFaceEmbedding` to embed the chunks."
117
  ]
118
  },
119
  {
120
  "cell_type": "code",
121
+ "execution_count": null,
122
  "metadata": {},
123
  "outputs": [
124
  {
 
142
  }
143
  ],
144
  "source": [
145
+ "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
146
  "from llama_index.core.node_parser import SentenceSplitter\n",
147
  "from llama_index.core.ingestion import IngestionPipeline\n",
148
  "\n",
 
150
  "pipeline = IngestionPipeline(\n",
151
  " transformations=[\n",
152
  " SentenceSplitter(),\n",
153
+ " HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\"),\n",
154
  " ]\n",
155
  ")\n",
156
  "\n",
 
175
  },
176
  {
177
  "cell_type": "code",
178
+ "execution_count": null,
179
  "metadata": {},
180
  "outputs": [
181
  {
 
200
  "pipeline = IngestionPipeline(\n",
201
  " transformations=[\n",
202
  " SentenceSplitter(),\n",
203
+ " HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\"),\n",
204
  " ],\n",
205
  " vector_store=vector_store,\n",
206
  ")\n",
 
218
  },
219
  {
220
  "cell_type": "code",
221
+ "execution_count": null,
222
  "metadata": {},
223
  "outputs": [],
224
  "source": [
225
  "from llama_index.core import VectorStoreIndex\n",
226
+ "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
227
  "\n",
228
+ "embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
229
  "index = VectorStoreIndex.from_vector_store(\n",
230
  " vector_store=vector_store, embed_model=embed_model\n",
231
  ")"
unit2/llama-index/tools.ipynb CHANGED
@@ -18,11 +18,11 @@
18
  },
19
  {
20
  "cell_type": "code",
21
- "execution_count": 1,
22
  "metadata": {},
23
  "outputs": [],
24
  "source": [
25
- "!pip install llama-index datasets llama-index-callbacks-arize-phoenix llama-index-vector-stores-chroma llama-index-llms-huggingface-api -U -q"
26
  ]
27
  },
28
  {
@@ -86,7 +86,7 @@
86
  },
87
  {
88
  "cell_type": "code",
89
- "execution_count": 8,
90
  "metadata": {},
91
  "outputs": [
92
  {
@@ -105,14 +105,14 @@
105
  "\n",
106
  "from llama_index.core import VectorStoreIndex\n",
107
  "from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI\n",
108
- "from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding\n",
109
  "from llama_index.core.tools import QueryEngineTool\n",
110
  "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
111
  "\n",
112
  "db = chromadb.PersistentClient(path=\"./alfred_chroma_db\")\n",
113
  "chroma_collection = db.get_or_create_collection(\"alfred\")\n",
114
  "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
115
- "embed_model = HuggingFaceInferenceAPIEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
116
  "llm = HuggingFaceInferenceAPI(model_name=\"meta-llama/Llama-3.2-3B-Instruct\")\n",
117
  "index = VectorStoreIndex.from_vector_store(\n",
118
  " vector_store=vector_store, embed_model=embed_model\n",
 
18
  },
19
  {
20
  "cell_type": "code",
21
+ "execution_count": null,
22
  "metadata": {},
23
  "outputs": [],
24
  "source": [
25
+ "!pip install llama-index llama-index-vector-stores-chroma llama-index-llms-huggingface-api llama-index-embeddings-huggingface llama-index-tools-google -U -q"
26
  ]
27
  },
28
  {
 
86
  },
87
  {
88
  "cell_type": "code",
89
+ "execution_count": null,
90
  "metadata": {},
91
  "outputs": [
92
  {
 
105
  "\n",
106
  "from llama_index.core import VectorStoreIndex\n",
107
  "from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI\n",
108
+ "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
109
  "from llama_index.core.tools import QueryEngineTool\n",
110
  "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
111
  "\n",
112
  "db = chromadb.PersistentClient(path=\"./alfred_chroma_db\")\n",
113
  "chroma_collection = db.get_or_create_collection(\"alfred\")\n",
114
  "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
115
+ "embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
116
  "llm = HuggingFaceInferenceAPI(model_name=\"meta-llama/Llama-3.2-3B-Instruct\")\n",
117
  "index = VectorStoreIndex.from_vector_store(\n",
118
  " vector_store=vector_store, embed_model=embed_model\n",
unit2/llama-index/workflows.ipynb CHANGED
@@ -18,11 +18,11 @@
18
  },
19
  {
20
  "cell_type": "code",
21
- "execution_count": 11,
22
  "metadata": {},
23
  "outputs": [],
24
  "source": [
25
- "!pip install llama-index datasets llama-index-callbacks-arize-phoenix llama-index-vector-stores-chroma llama-index-utils-workflow llama-index-llms-huggingface-api pyvis -U -q"
26
  ]
27
  },
28
  {
@@ -321,7 +321,9 @@
321
  {
322
  "data": {
323
  "text/plain": [
324
- "AgentOutput(response=ChatMessage(role=<MessageRole.ASSISTANT: 'assistant'>, additional_kwargs={}, blocks=[TextBlock(block_type='text', text='I have handed off the request to an agent who can help you with adding 5 and 3. Please wait for their response.')]), tool_calls=[ToolCallResult(tool_name='handoff', tool_kwargs={'to_agent': 'addition_agent', 'reason': 'Add 5 and 3'}, tool_id='call_F97vcIcsvZjfAAOBzzIifW3y', tool_output=ToolOutput(content='Agent addition_agent is now handling the request due to the following reason: Add 5 and 3.\\nPlease continue with the current request.', tool_name='handoff', raw_input={'args': (), 'kwargs': {'to_agent': 'addition_agent', 'reason': 'Add 5 and 3'}}, raw_output='Agent addition_agent is now handling the request due to the following reason: Add 5 and 3.\\nPlease continue with the current request.', is_error=False), return_direct=True), ToolCallResult(tool_name='handoff', tool_kwargs={'to_agent': 'addition_agent', 'reason': 'Add 5 and 3'}, tool_id='call_jf49ktFRs09xYdOsnApAk2zz', tool_output=ToolOutput(content='Agent addition_agent is now handling the request due to the following reason: Add 5 and 3.\\nPlease continue with the current request.', tool_name='handoff', raw_input={'args': (), 'kwargs': {'to_agent': 'addition_agent', 'reason': 'Add 5 and 3'}}, raw_output='Agent addition_agent is now handling the request due to the following reason: Add 5 and 3.\\nPlease continue with the current request.', is_error=False), return_direct=True)], raw={'id': 'chatcmpl-B6Cy54VQkvlG3VOrmdzCzgwcJmVOc', 'choices': [{'delta': {'content': None, 'function_call': None, 'refusal': None, 'role': None, 'tool_calls': None}, 'finish_reason': 'stop', 'index': 0, 'logprobs': None}], 'created': 1740819517, 'model': 'gpt-3.5-turbo-0125', 'object': 'chat.completion.chunk', 'service_tier': 'default', 'system_fingerprint': None, 'usage': None}, current_agent_name='addition_agent')"
 
 
325
  ]
326
  },
327
  "execution_count": 33,
@@ -331,6 +333,7 @@
331
  ],
332
  "source": [
333
  "from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI\n",
 
334
  "\n",
335
  "# Define some tools\n",
336
  "def add(a: int, b: int) -> int:\n",
@@ -367,7 +370,8 @@
367
  ")\n",
368
  "\n",
369
  "# Run the system\n",
370
- "response = await workflow.run(user_msg=\"Can you add 5 and 3?\")"
 
371
  ]
372
  }
373
  ],
 
18
  },
19
  {
20
  "cell_type": "code",
21
+ "execution_count": null,
22
  "metadata": {},
23
  "outputs": [],
24
  "source": [
25
+ "!pip install llama-index llama-index-vector-stores-chroma llama-index-utils-workflow llama-index-llms-huggingface-api pyvis -U -q"
26
  ]
27
  },
28
  {
 
321
  {
322
  "data": {
323
  "text/plain": [
324
+ "AgentOutput(response=ChatMessage(role=<MessageRole.ASSISTANT: 'assistant'>, additional_kwargs={}, blocks=[TextBlock(block_type='text', text='5 and 3 add up to 8.')]), tool_calls=[ToolCallResult(tool_name='handoff', tool_kwargs={'to_agent': 'add_agent', 'reason': 'The user wants to add two numbers, and the add_agent is better suited for this task.'}, tool_id='831895e7-3502-4642-92ea-8626e21ed83b', tool_output=ToolOutput(content='Agent add_agent is now handling the request due to the following reason: The user wants to add two numbers, and the add_agent is better suited for this task..\n",
325
+ "Please continue with the current request.', tool_name='handoff', raw_input={'args': (), 'kwargs': {'to_agent': 'add_agent', 'reason': 'The user wants to add two numbers, and the add_agent is better suited for this task.'}}, raw_output='Agent add_agent is now handling the request due to the following reason: The user wants to add two numbers, and the add_agent is better suited for this task..\n",
326
+ "Please continue with the current request.', is_error=False), return_direct=True), ToolCallResult(tool_name='add', tool_kwargs={'a': 5, 'b': 3}, tool_id='c29dc3f7-eaa7-4ba7-b49b-90908f860cc5', tool_output=ToolOutput(content='8', tool_name='add', raw_input={'args': (), 'kwargs': {'a': 5, 'b': 3}}, raw_output=8, is_error=False), return_direct=False)], raw=ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(role='assistant', content='.', tool_call_id=None, tool_calls=None), index=0, finish_reason=None, logprobs=None)], created=1744553546, id='', model='Qwen/Qwen2.5-Coder-32B-Instruct', system_fingerprint='3.2.1-sha-4d28897', usage=None, object='chat.completion.chunk'), current_agent_name='add_agent')"
327
  ]
328
  },
329
  "execution_count": 33,
 
333
  ],
334
  "source": [
335
  "from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI\n",
336
+ "from llama_index.core.agent.workflow import AgentWorkflow\n",
337
  "\n",
338
  "# Define some tools\n",
339
  "def add(a: int, b: int) -> int:\n",
 
370
  ")\n",
371
  "\n",
372
  "# Run the system\n",
373
+ "response = await workflow.run(user_msg=\"Can you add 5 and 3?\")\n",
374
+ "response"
375
  ]
376
  }
377
  ],