jayavibhav commited on
Commit
8a8fed6
·
verified ·
1 Parent(s): 99cb511

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2109
app.py CHANGED
@@ -11,2115 +11,9 @@ import re
11
  import time
12
 
13
  #prompts
14
- modal_params = """# All possible @app.function() parameters
15
- @app.function(
16
- # Resource Configuration
17
- cpu=float, # CPU cores (e.g., 4.0)
18
- cpu=(float, float), # (request, limit) tuple
19
- memory=int, # Memory in MB
20
- memory=(int, int), # (request, limit) tuple
21
- gpu="T4" | "L4" | "A10G" | "A100-40GB" | "A100-80GB" | "L40S" | "H100" | "H200" | "B200",
22
- gpu="GPU_TYPE:count", # Multiple GPUs
23
- gpu=["GPU_TYPE", "GPU_TYPE:count"], # GPU fallback list
24
- ephemeral_disk=int, # Ephemeral disk in MB
25
-
26
- # Container Configuration
27
- image=modal.Image, # Container image
28
- secrets=[modal.Secret], # List of secrets
29
- volumes={"/path": modal.Volume}, # Volume mounts
30
- cloud_bucket_mounts={}, # Cloud storage mounts
31
-
32
- # Networking & Concurrency
33
- allow_concurrent_inputs=int, # Concurrent requests per container
34
- concurrency_limit=int, # Max concurrent containers
35
- container_idle_timeout=int, # Idle timeout in seconds
36
- timeout=int, # Function timeout in seconds
37
-
38
- # Scheduling & Retry
39
- schedule=modal.Cron | modal.Period, # Scheduling
40
- retries=int, # Retry attempts
41
- retry=modal.Retry, # Custom retry policy
42
-
43
- # Environment & Runtime
44
- environment=dict, # Environment variables
45
- keep_warm=int, # Warm containers count
46
- architecture="x86_64" | "arm64", # CPU architecture
47
- cloud="aws" | "gcp", # Cloud provider
48
-
49
- # Batch Processing
50
- batch_max_size=int, # Max batch size
51
-
52
- # Metadata
53
- name=str, # Function name
54
- is_generator=bool, # Generator function flag
55
- )
56
- """
57
-
58
- modal_demo = """
59
- import modal
60
-
61
- app = modal.App("gradio-app")
62
- web_image = modal.Image.debian_slim(python_version="3.12").pip_install(
63
- "fastapi[standard]==0.115.4",
64
- "gradio~=5.7.1",
65
- "pillow~=10.2.0",
66
- )
67
-
68
- @app.function(
69
- image=web_image,
70
- min_containers=1,
71
- scaledown_window=60 * 20,
72
- # gradio requires sticky sessions
73
- # so we limit the number of concurrent containers to 1
74
- # and allow it to scale to 100 concurrent inputs
75
- max_containers=1,
76
- )
77
- @modal.concurrent(max_inputs=100)
78
- @modal.asgi_app()
79
- def ui():
80
-
81
- import gradio as gr
82
- from fastapi import FastAPI
83
- from gradio.routes import mount_gradio_app
84
- import time
85
-
86
- def greet(name, intensity):
87
- time.sleep(5) # Simulating processing time
88
- return "Hello, " + name + "!" * int(intensity)
89
-
90
- demo = gr.Interface(
91
- fn=greet,
92
- inputs=["text", "slider"],
93
- outputs=["text"],
94
- )
95
- demo.queue(max_size=5) # Enable queue for handling multiple request
96
-
97
- return mount_gradio_app(app=FastAPI(), blocks=demo, path="/")
98
- """
99
-
100
- all_nodes = '''
101
- example workflow
102
- {
103
- "workflow_id": "simple-chatbot-v1",
104
- "workflow_name": "Simple Chatbot",
105
- "nodes": [
106
- {
107
- "id": "ChatInput-1",
108
- "type": "ChatInput",
109
- "data": {
110
- "display_name": "User's Question",
111
- "template": {
112
- "input_value": {
113
- "display_name": "Input",
114
- "type": "string",
115
- "value": "What is the capital of France?",
116
- "is_handle": true
117
- }
118
- }
119
- },
120
- "resources": {
121
- "cpu": 0.1,
122
- "memory": "128Mi",
123
- "gpu": "none"
124
- }
125
- },
126
- {
127
- "id": "Prompt-1",
128
- "type": "Prompt",
129
- "data": {
130
- "display_name": "System Prompt",
131
- "template": {
132
- "prompt_template": {
133
- "display_name": "Template",
134
- "type": "string",
135
- "value": "You are a helpful geography expert. The user asked: {input_value}",
136
- "is_handle": true
137
- }
138
- }
139
- },
140
- "resources": {
141
- "cpu": 0.1,
142
- "memory": "128Mi",
143
- "gpu": "none"
144
- }
145
- },
146
- {
147
- "id": "OpenAI-1",
148
- "type": "OpenAIModel",
149
- "data": {
150
- "display_name": "OpenAI gpt-4o-mini",
151
- "template": {
152
- "model": {
153
- "display_name": "Model",
154
- "type": "options",
155
- "options": ["gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo"],
156
- "value": "gpt-4o-mini"
157
- },
158
- "api_key": {
159
- "display_name": "API Key",
160
- "type": "SecretStr",
161
- "required": true,
162
- "env_var": "OPENAI_API_KEY"
163
- },
164
- "prompt": {
165
- "display_name": "Prompt",
166
- "type": "string",
167
- "is_handle": true
168
- }
169
- }
170
- },
171
- "resources": {
172
- "cpu": 0.5,
173
- "memory": "256Mi",
174
- "gpu": "none"
175
- }
176
- },
177
- {
178
- "id": "ChatOutput-1",
179
- "type": "ChatOutput",
180
- "data": {
181
- "display_name": "Final Answer",
182
- "template": {
183
- "response": {
184
- "display_name": "Response",
185
- "type": "string",
186
- "is_handle": true
187
- }
188
- }
189
- },
190
- "resources": {
191
- "cpu": 0.1,
192
- "memory": "128Mi",
193
- "gpu": "none"
194
- }
195
- }
196
- ],
197
- "edges": [
198
- {
199
- "source": "ChatInput-1",
200
- "source_handle": "input_value",
201
- "target": "Prompt-1",
202
- "target_handle": "prompt_template"
203
- },
204
- {
205
- "source": "Prompt-1",
206
- "source_handle": "prompt_template",
207
- "target": "OpenAI-1",
208
- "target_handle": "prompt"
209
- },
210
- {
211
- "source": "OpenAI-1",
212
- "source_handle": "response",
213
- "target": "ChatOutput-1",
214
- "target_handle": "response"
215
- }
216
- ]
217
- }
218
-
219
-
220
- ## input node
221
- {
222
- "id": "Input-1",
223
- "type": "Input",
224
- "data": {
225
- "display_name": "Source Data",
226
- "template": {
227
- "data_type": {
228
- "display_name": "Data Type",
229
- "type": "options",
230
- "options": ["string", "image", "video", "audio", "file"],
231
- "value": "string"
232
- },
233
- "value": {
234
- "display_name": "Value or Path",
235
- "type": "string",
236
- "value": "This is the initial text."
237
- },
238
- "data": {
239
- "display_name": "Output Data",
240
- "type": "object",
241
- "is_handle": true
242
- }
243
- }
244
- },
245
- "resources": {
246
- "cpu": 0.1,
247
- "memory": "128Mi",
248
- "gpu": "none"
249
- }
250
- }
251
-
252
-
253
- from typing import Any, Dict
254
-
255
- def process_input(data_type: str, value: Any) -> Dict[str, Any]:
256
- Packages the source data and its type for downstream nodes.
257
- """
258
- # The output is a dictionary containing both the type and the data/path.
259
- # This gives the next node context on how to handle the value.
260
- """
261
- output_package = {
262
- "type": data_type,
263
- "value": value
264
- }
265
- return {"data": output_package}
266
-
267
- process_input("string", "hi")
268
-
269
- ## output node
270
- """{
271
- "id": "Output-1",
272
- "type": "Output",
273
- "data": {
274
- "display_name": "Final Result",
275
- "template": {
276
- "input_data": {
277
- "display_name": "Input Data",
278
- "type": "object",
279
- "is_handle": true
280
- }
281
- }
282
- },
283
- "resources": {
284
- "cpu": 0.1,
285
- "memory": "128Mi",
286
- "gpu": "none"
287
- }
288
- }"""
289
-
290
- from typing import Any, Dict
291
-
292
- def process_output(input_data: Dict[str, Any]) -> None:
293
- """
294
- Receives the final data package and prints its contents.
295
- """
296
- # Unpacks the dictionary received from the upstream node.
297
- data_type = input_data.get("type", "unknown")
298
- value = input_data.get("value", "No value provided")
299
-
300
- # print("--- Final Workflow Output ---")
301
- # print(f" Data Type: {data_type}")
302
- # print(f" Value/Path: {value}")
303
- # print("-----------------------------")
304
-
305
- dont print output, just return it
306
-
307
- process_output({'type': 'string', 'value': 'hi'})
308
-
309
- ## api request node
310
- """{
311
- "id": "APIRequest-1",
312
- "type": "APIRequest",
313
- "data": {
314
- "display_name": "Get User Data",
315
- "template": {
316
- "url": {
317
- "display_name": "URL",
318
- "type": "string",
319
- "value": "https://api.example.com/users/1"
320
- },
321
- "method": {
322
- "display_name": "Method",
323
- "type": "options",
324
- "options": ["GET", "POST", "PUT", "DELETE"],
325
- "value": "GET"
326
- },
327
- "headers": {
328
- "display_name": "Headers (JSON)",
329
- "type": "string",
330
- "value": "{\"Authorization\": \"Bearer YOUR_TOKEN\"}"
331
- },
332
- "body": {
333
- "display_name": "Request Body",
334
- "type": "object",
335
- "is_handle": true
336
- },
337
- "response": {
338
- "display_name": "Response Data",
339
- "type": "object",
340
- "is_handle": true
341
- }
342
- }
343
- },
344
- "resources": {
345
- "cpu": 0.2,
346
- "memory": "256Mi",
347
- "gpu": "none"
348
- }
349
- }"""
350
-
351
- import requests
352
- import json
353
- from typing import Any, Dict
354
-
355
- def process_api_request(url: str, method: str, headers: str, body: Dict = None) -> Dict[str, Any]:
356
- """
357
- Performs an HTTP request and returns the JSON response.
358
- """
359
- try:
360
- parsed_headers = json.loads(headers)
361
- except json.JSONDecodeError:
362
- print("Warning: Headers are not valid JSON. Using empty headers.")
363
- parsed_headers = {}
364
-
365
- try:
366
- response = requests.request(
367
- method=method,
368
- url=url,
369
- headers=parsed_headers,
370
- json=body,
371
- timeout=10 # 10-second timeout
372
- )
373
- # Raise an exception for bad status codes (4xx or 5xx)
374
- response.raise_for_status()
375
-
376
- # The output is a dictionary containing the JSON response.
377
- return {"response": response.json()}
378
-
379
- except requests.exceptions.RequestException as e:
380
- print(f"Error during API request: {e}")
381
- # Return an error structure on failure
382
- return {"response": {"error": str(e), "status_code": getattr(e.response, 'status_code', 500)}}
383
-
384
- url = "https://jsonplaceholder.typicode.com/posts"
385
- method = "GET"
386
- headers = "{}" # empty JSON headers
387
- body = None # GET requests typically don't send a JSON body
388
-
389
- result = process_api_request(url, method, headers, body)
390
- print(result)
391
-
392
- url = "https://jsonplaceholder.typicode.com/posts"
393
- method = "POST"
394
- headers = '{"Content-Type": "application/json"}'
395
- body = {
396
- "title": "foo",
397
- "body": "bar",
398
- "userId": 1
399
- }
400
-
401
- result = process_api_request(url, method, headers, body)
402
- print(result)
403
-
404
- ## react agent tool
405
- import os
406
- import asyncio
407
- from typing import List, Dict, Any
408
- from llama_index.core.agent import ReActAgent
409
- from llama_index.core.tools import FunctionTool
410
- from llama_index.llms.openai import OpenAI
411
- from duckduckgo_search import DDGS
412
-
413
- # Set your API key
414
- # os.environ["OPENAI_API_KEY"] = "your-api-key-here"
415
-
416
- class WorkflowReActAgent:
417
- """Complete working ReAct Agent with your workflow tools"""
418
-
419
- def __init__(self, llm_model: str = "gpt-4o-mini"):
420
- self.llm = OpenAI(model=llm_model, temperature=0.1)
421
- self.tools = self._create_tools()
422
- self.agent = ReActAgent.from_tools(
423
- tools=self.tools,
424
- llm=self.llm,
425
- verbose=True,
426
- max_iterations=8 # Reasonable limit
427
- )
428
-
429
- def _create_tools(self) -> List[FunctionTool]:
430
- """Create tools that actually work and get used"""
431
-
432
- # 🔍 Web Search Tool (using your exact implementation)
433
- def web_search(query: str) -> str:
434
- """Search the web for current information"""
435
- try:
436
- with DDGS() as ddgs:
437
- results = []
438
- gen = ddgs.text(query, safesearch="Off")
439
- for i, result in enumerate(gen):
440
- if i >= 3: # Limit results
441
- break
442
- results.append(f"• {result.get('title', '')}: {result.get('body', '')[:150]}...")
443
-
444
- if results:
445
- return f"Search results: {'; '.join(results)}"
446
- else:
447
- return f"No results found for '{query}'"
448
-
449
- except Exception as e:
450
- return f"Search error: {str(e)}"
451
-
452
- # 🧮 Calculator Tool
453
- def calculate(expression: str) -> str:
454
- """Calculate mathematical expressions safely"""
455
- try:
456
- # Simple and safe evaluation
457
- allowed_chars = "0123456789+-*/().,_ "
458
- if all(c in allowed_chars for c in expression):
459
- result = eval(expression)
460
- return f"Result: {result}"
461
- else:
462
- return f"Invalid expression: {expression}"
463
- except Exception as e:
464
- return f"Math error: {str(e)}"
465
-
466
- # 🐍 Python Executor Tool
467
- def execute_python(code: str) -> str:
468
- """Execute Python code and return results"""
469
- import sys
470
- from io import StringIO
471
- import traceback
472
-
473
- old_stdout = sys.stdout
474
- sys.stdout = StringIO()
475
-
476
- try:
477
- local_scope = {}
478
- exec(code, {"__builtins__": __builtins__}, local_scope)
479
-
480
- output = sys.stdout.getvalue()
481
-
482
- # Get result from the last line if it's an expression
483
- lines = code.strip().split('\n')
484
- if lines:
485
- try:
486
- result = eval(lines[-1], {}, local_scope)
487
- return f"Result: {result}\nOutput: {output}".strip()
488
- except:
489
- pass
490
-
491
- return f"Output: {output}".strip() if output else "Code executed successfully"
492
-
493
- except Exception as e:
494
- return f"Error: {str(e)}"
495
- finally:
496
- sys.stdout = old_stdout
497
-
498
- # 🌐 API Request Tool
499
- def api_request(url: str, method: str = "GET") -> str:
500
- """Make HTTP API requests"""
501
- import requests
502
- try:
503
- response = requests.request(method, url, timeout=10)
504
- return f"Status: {response.status_code}\nResponse: {response.text[:300]}..."
505
- except Exception as e:
506
- return f"API error: {str(e)}"
507
-
508
- # Convert to FunctionTool objects
509
- return [
510
- FunctionTool.from_defaults(
511
- fn=web_search,
512
- name="web_search",
513
- description="Search the web for current information on any topic"
514
- ),
515
- FunctionTool.from_defaults(
516
- fn=calculate,
517
- name="calculate",
518
- description="Calculate mathematical expressions and equations"
519
- ),
520
- FunctionTool.from_defaults(
521
- fn=execute_python,
522
- name="execute_python",
523
- description="Execute Python code for data processing and calculations"
524
- ),
525
- FunctionTool.from_defaults(
526
- fn=api_request,
527
- name="api_request",
528
- description="Make HTTP requests to APIs and web services"
529
- )
530
- ]
531
-
532
- def chat(self, message: str) -> str:
533
- """Chat with the ReAct agent"""
534
- try:
535
- response = self.agent.chat(message)
536
- return str(response.response)
537
- except Exception as e:
538
- return f"Agent error: {str(e)}"
539
-
540
- # 🚀 Usage Examples
541
- def main():
542
- """Test the working ReAct agent"""
543
-
544
- agent = WorkflowReActAgent()
545
-
546
- test_queries = [
547
- "What's the current Bitcoin price and calculate 10% of it?",
548
- "Search for news about SpaceX and tell me the latest",
549
- "Calculate the compound interest: 1000 * (1.05)^10",
550
- "Search for Python programming tips",
551
- "What's 15 factorial divided by 12 factorial?",
552
- "Find information about the latest iPhone and calculate its price in EUR if 1 USD = 0.92 EUR"
553
- ]
554
-
555
- print("🤖 WorkflowReActAgent Ready!")
556
- print("=" * 60)
557
-
558
- for i, query in enumerate(test_queries, 1):
559
- print(f"\n🔸 Query {i}: {query}")
560
- print("-" * 50)
561
-
562
- response = agent.chat(query)
563
- print(f"🎯 Response: {response}")
564
- print("\n" + "="*60)
565
-
566
- if __name__ == "__main__":
567
- main()
568
-
569
-
570
- ## web search tool
571
- """{
572
- "id": "WebSearch-1",
573
- "type": "WebSearch",
574
- "data": {
575
- "display_name": "Search for News",
576
- "template": {
577
- "query": {
578
- "display_name": "Search Query",
579
- "type": "string",
580
- "is_handle": true
581
- },
582
- "results": {
583
- "display_name": "Search Results",
584
- "type": "object",
585
- "is_handle": true
586
- }
587
- }
588
- },
589
- "resources": {
590
- "cpu": 0.2,
591
- "memory": "256Mi",
592
- "gpu": "none"
593
- }
594
- }"""
595
-
596
- # First, install duckduckgo_search:
597
- # pip install duckduckgo_search
598
-
599
- import json
600
- from typing import Any, Dict, List
601
- from duckduckgo_search import DDGS
602
-
603
- def process_web_search(query: str, max_results: int = 10) -> Dict[str, Any]:
604
- if not query:
605
- return {"results": []}
606
-
607
- try:
608
- # Use the DDGS client and its text() method
609
- with DDGS() as ddgs:
610
- gen = ddgs.text(query, safesearch="Off")
611
- # Collect up to max_results items
612
- results: List[Dict[str, str]] = [
613
- {"title": r.get("title", ""), "link": r.get("href", ""), "snippet": r.get("body", "")}
614
- for _, r in zip(range(max_results), gen)
615
- ]
616
- return {"results": results}
617
-
618
- except Exception as e:
619
- return {"results": {"error": str(e)}}
620
-
621
-
622
- # import json
623
- # from typing import Any
624
- # from llama_index.tools import BaseTool, ToolMetadata
625
-
626
- # class DuckDuckGoSearchTool(BaseTool):
627
- # """A LlamaIndex tool that proxies to process_web_search."""
628
- # metadata = ToolMetadata(
629
- # name="duckduckgo_search",
630
- # description="Performs a web search via DuckDuckGo and returns JSON results."
631
- # )
632
-
633
- # def __init__(self, max_results: int = 10):
634
- # self.max_results = max_results
635
-
636
- # def _run(self, query: str) -> str:
637
- # # Call our search function and return a JSON string
638
- # results = process_web_search(query, max_results=self.max_results)
639
- # return json.dumps(results)
640
-
641
- # async def _arun(self, query: str) -> str:
642
- # # Async agents can await this
643
- # results = process_web_search(query, max_results=self.max_results)
644
- # return json.dumps(results)
645
-
646
- # from llama_index import GPTVectorStoreIndex, ServiceContext
647
- # from llama_index.agent.react import ReactAgent
648
- # from llama_index.tools import ToolConfig
649
-
650
- # # 1. Instantiate the tool
651
- # search_tool = DuckDuckGoSearchTool(max_results=5)
652
-
653
- # # 2. Create an agent and register tools
654
- # agent = ReactAgent(
655
- # tools=[search_tool],
656
- # service_context=ServiceContext.from_defaults()
657
- # )
658
-
659
- # # 3. Run the agent with a natural‐language prompt
660
- # response = agent.run("What are the top news about renewable energy?")
661
- # print(response)
662
-
663
-
664
- process_web_search(query="devil may cry")
665
-
666
-
667
- ## execute python node
668
- """{
669
- "id": "ExecutePython-1",
670
- "type": "ExecutePython",
671
- "data": {
672
- "display_name": "Custom Data Processing",
673
- "template": {
674
- "code": {
675
- "display_name": "Python Code",
676
- "type": "string",
677
- "value": "def process(data):\n # Example: Extract titles from search results\n titles = [item['title'] for item in data]\n # The 'result' variable will be the output\n result = ', '.join(titles)\n return result"
678
- },
679
- "input_vars": {
680
- "display_name": "Input Variables",
681
- "type": "object",
682
- "is_handle": true
683
- },
684
- "output_vars": {
685
- "display_name": "Output Variables",
686
- "type": "object",
687
- "is_handle": true
688
- }
689
- }
690
- },
691
- "resources": {
692
- "cpu": 0.5,
693
- "memory": "512Mi",
694
- "gpu": "none"
695
- }
696
- }"""
697
-
698
- import sys
699
- import traceback
700
- from typing import Any, Dict
701
-
702
- def process_execute_python(code: str, input_vars: Dict[str, Any] = None) -> Dict[str, Any]:
703
- """
704
- Executes a string of Python code within an isolated scope.
705
- - If the code defines `process(data)`, calls it with `input_vars`.
706
- - Otherwise, executes the code top-level and returns any printed output.
707
- """
708
- if input_vars is None:
709
- input_vars = {}
710
-
711
- # Capture stdout
712
- from io import StringIO
713
- old_stdout = sys.stdout
714
- sys.stdout = StringIO()
715
-
716
- local_scope: Dict[str, Any] = {}
717
- try:
718
- # Execute user code
719
- exec(code, {}, local_scope)
720
-
721
- if "process" in local_scope and callable(local_scope["process"]):
722
- result = local_scope["process"](input_vars)
723
- else:
724
- # No process(): run as script
725
- # (re-exec under a fresh namespace to capture prints)
726
- exec(code, {}, {})
727
- result = None
728
-
729
- output = sys.stdout.getvalue()
730
- return {"output_vars": result, "stdout": output}
731
-
732
- except Exception:
733
- err = traceback.format_exc()
734
- return {"output_vars": None, "error": err}
735
-
736
- finally:
737
- sys.stdout = old_stdout
738
-
739
- # 1. Code with process():
740
- code1 = """
741
- def process(data):
742
- return {"sum": data.get("x",0) + data.get("y",0)}
743
- """
744
- print(process_execute_python(code1, {"x":5, "y":7}))
745
- # → {'output_vars': {'sum': 12}, 'stdout': ''}
746
-
747
- # 2. Standalone code:
748
- code2 = 'print("Hello, world!")'
749
- print(process_execute_python(code2))
750
- # → {'output_vars': None, 'stdout': 'Hello, world!\n'}
751
-
752
- # import json
753
- # from typing import Any
754
- # from llama_index.tools import BaseTool, ToolMetadata
755
-
756
- # class ExecutePythonTool(BaseTool):
757
- # """Executes arbitrary Python code strings in an isolated scope."""
758
- # metadata = ToolMetadata(
759
- # name="execute_python",
760
- # description="Runs user-supplied Python code. Requires optional `process(data)` or runs script."
761
- # )
762
-
763
- # def _run(self, code: str) -> str:
764
- # # Call the executor and serialize the dict result
765
- # result = process_execute_python(code)
766
- # return json.dumps(result)
767
-
768
- # async def _arun(self, code: str) -> str:
769
- # result = process_execute_python(code)
770
- # return json.dumps(result)
771
-
772
- # from llama_index.agent.react import ReactAgent
773
- # from llama_index import ServiceContext
774
-
775
- # tool = ExecutePythonTool()
776
- # agent = ReactAgent(tools=[tool], service_context=ServiceContext.from_defaults())
777
-
778
- # # Agent will call `execute_python` when needed.
779
- # response = agent.run("Please run the Python code: print('Test')")
780
- # print(response)
781
-
782
-
783
- ## conditional logix
784
- """{
785
- "id": "ConditionalLogic-1",
786
- "type": "ConditionalLogic",
787
- "data": {
788
- "display_name": "Check User Role",
789
- "template": {
790
- "operator": {
791
- "display_name": "Operator",
792
- "type": "options",
793
- "options": ["==", "!=", ">", "<", ">=", "<=", "contains", "not contains"],
794
- "value": "=="
795
- },
796
- "comparison_value": {
797
- "display_name": "Comparison Value",
798
- "type": "string",
799
- "value": "admin"
800
- },
801
- "input_value": {
802
- "display_name": "Input to Check",
803
- "type": "any",
804
- "is_handle": true
805
- },
806
- "true_output": {
807
- "display_name": "Path if True",
808
- "type": "any",
809
- "is_handle": true
810
- },
811
- "false_output": {
812
- "display_name": "Path if False",
813
- "type": "any",
814
- "is_handle": true
815
- }
816
- }
817
- },
818
- "resources": {
819
- "cpu": 0.1,
820
- "memory": "128Mi",
821
- "gpu": "none"
822
- }
823
- }"""
824
-
825
- from typing import Any, Dict
826
-
827
- def process_conditional_logic(operator: str, comparison_value: str, input_value: Any) -> Dict[str, Any]:
828
- """
829
- Evaluates a condition and returns the input value on the appropriate output handle.
830
- """
831
- result = False
832
- # Attempt to convert types for numeric comparison
833
- try:
834
- num_input = float(input_value)
835
- num_comp = float(comparison_value)
836
- except (ValueError, TypeError):
837
- num_input, num_comp = None, None
838
-
839
- # Evaluate condition
840
- if operator == '==' : result = input_value == comparison_value
841
- elif operator == '!=': result = input_value != comparison_value
842
- elif operator == '>' and num_input is not None: result = num_input > num_comp
843
- elif operator == '<' and num_input is not None: result = num_input < num_comp
844
- elif operator == '>=' and num_input is not None: result = num_input >= num_comp
845
- elif operator == '<=' and num_input is not None: result = num_input <= num_comp
846
- elif operator == 'contains': result = str(comparison_value) in str(input_value)
847
- elif operator == 'not contains': result = str(comparison_value) not in str(input_value)
848
-
849
- # Return the input data on the correct output handle based on the result
850
- if result:
851
- # The key "true_output" matches the source_handle in the workflow edge
852
- return {"true_output": input_value}
853
- else:
854
- # The key "false_output" matches the source_handle in the workflow edge
855
- return {"false_output": input_value}
856
-
857
- ## wait node
858
- """{
859
- "id": "Wait-1",
860
- "type": "Wait",
861
- "data": {
862
- "display_name": "Wait for 5 Seconds",
863
- "template": {
864
- "duration": {
865
- "display_name": "Duration (seconds)",
866
- "type": "number",
867
- "value": 5
868
- },
869
- "passthrough_input": {
870
- "display_name": "Passthrough Data In",
871
- "type": "any",
872
- "is_handle": true
873
- },
874
- "passthrough_output": {
875
- "display_name": "Passthrough Data Out",
876
- "type": "any",
877
- "is_handle": true
878
- }
879
- }
880
- },
881
- "resources": {
882
- "cpu": 0.1,
883
- "memory": "128Mi",
884
- "gpu": "none"
885
- }
886
- }"""
887
-
888
- import time
889
- from typing import Any, Dict
890
-
891
- def process_wait(duration: int, passthrough_input: Any = None) -> Dict[str, Any]:
892
- """
893
- Pauses execution for a given duration and then passes data through.
894
- """
895
- time.sleep(duration)
896
- # The output key "passthrough_output" matches the source_handle
897
- return {"passthrough_output": passthrough_input}
898
-
899
- ## chat node
900
- """{
901
- "id": "ChatModel-1",
902
- "type": "ChatModel",
903
- "data": {
904
- "display_name": "AI Assistant",
905
- "template": {
906
- "provider": {
907
- "display_name": "Provider",
908
- "type": "options",
909
- "options": ["OpenAI", "Anthropic"],
910
- "value": "OpenAI"
911
- },
912
- "model": {
913
- "display_name": "Model Name",
914
- "type": "string",
915
- "value": "gpt-4o-mini"
916
- },
917
- "api_key": {
918
- "display_name": "API Key",
919
- "type": "SecretStr",
920
- "required": true,
921
- "env_var": "OPENAI_API_KEY"
922
- },
923
- "system_prompt": {
924
- "display_name": "System Prompt (Optional)",
925
- "type": "string",
926
- "value": "You are a helpful assistant."
927
- },
928
- "prompt": {
929
- "display_name": "Prompt",
930
- "type": "string",
931
- "is_handle": true
932
- },
933
- "response": {
934
- "display_name": "Response",
935
- "type": "string",
936
- "is_handle": true
937
- }
938
- }
939
- },
940
- "resources": {
941
- "cpu": 0.5,
942
- "memory": "256Mi",
943
- "gpu": "none"
944
- }
945
- }"""
946
-
947
- import os
948
- from typing import Any, Dict
949
- from openai import OpenAI
950
- from anthropic import Anthropic
951
-
952
- def process_chat_model(provider: str, model: str, api_key: str, prompt: str, system_prompt: str = "") -> Dict[str, Any]:
953
- """
954
- Calls the specified chat model provider with a given prompt.
955
- """
956
- response_text = ""
957
- if provider == "OpenAI":
958
- client = OpenAI(api_key=api_key)
959
- messages = []
960
- if system_prompt:
961
- messages.append({"role": "system", "content": system_prompt})
962
- messages.append({"role": "user", "content": prompt})
963
-
964
- completion = client.chat.completions.create(model=model, messages=messages)
965
- response_text = completion.choices[0].message.content
966
-
967
- elif provider == "Anthropic":
968
- client = Anthropic(api_key=api_key)
969
- message = client.messages.create(
970
- model=model,
971
- max_tokens=2048,
972
- system=system_prompt,
973
- messages=[{"role": "user", "content": prompt}]
974
- )
975
- response_text = message.content[0].text
976
-
977
- return {"response": response_text}
978
-
979
-
980
-
981
- def test_openai():
982
- openai_key = os.getenv("OPENAI_API_KEY")
983
- if not openai_key:
984
- raise RuntimeError("Set the OPENAI_API_KEY environment variable.")
985
- result = process_chat_model(
986
- provider="OpenAI",
987
- model="gpt-3.5-turbo",
988
- api_key=openai_key,
989
- system_prompt="You are a helpful assistant.",
990
- prompt="What's the capital of France?"
991
- )
992
- print("OpenAI response:", result["response"])
993
-
994
-
995
- def test_anthropic():
996
- anthropic_key = os.getenv("ANTHROPIC_API_KEY")
997
- if not anthropic_key:
998
- raise RuntimeError("Set the ANTHROPIC_API_KEY environment variable.")
999
- result = process_chat_model(
1000
- provider="Anthropic",
1001
- model="claude-sonnet-4-20250514",
1002
- api_key=anthropic_key,
1003
- system_prompt="You are a concise assistant.",
1004
- prompt="List three benefits of renewable energy."
1005
- )
1006
- print("Anthropic response:", result["response"])
1007
-
1008
-
1009
- if __name__ == "__main__":
1010
- test_openai()
1011
- test_anthropic()
1012
-
1013
- ## rag node 1 knowledge base
1014
- """{
1015
- "id": "KnowledgeBase-1",
1016
- "type": "KnowledgeBase",
1017
- "data": {
1018
- "display_name": "Create Product Docs KB",
1019
- "template": {
1020
- "kb_name": {
1021
- "display_name": "Knowledge Base Name",
1022
- "type": "string",
1023
- "value": "product-docs-v1"
1024
- },
1025
- "source_type": {
1026
- "display_name": "Source Type",
1027
- "type": "options",
1028
- "options": ["Directory", "URL"],
1029
- "value": "URL"
1030
- },
1031
- "path_or_url": {
1032
- "display_name": "Path or URL",
1033
- "type": "string",
1034
- "value": "https://docs.modal.com/get-started"
1035
- },
1036
- "knowledge_base": {
1037
- "display_name": "Knowledge Base Out",
1038
- "type": "object",
1039
- "is_handle": true
1040
- }
1041
- }
1042
- },
1043
- "resources": {
1044
- "cpu": 2.0,
1045
- "memory": "1Gi",
1046
- "gpu": "none"
1047
- }
1048
- }"""
1049
-
1050
- import os
1051
- from typing import Any, Dict
1052
- from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Settings
1053
- from llama_index.readers.web import SimpleWebPageReader
1054
- from llama_index.embeddings.huggingface import HuggingFaceEmbedding
1055
-
1056
- def process_knowledge_base(kb_name: str, source_type: str, path_or_url: str) -> Dict[str, Any]:
1057
- """
1058
- Creates and persists a LlamaIndex VectorStoreIndex.
1059
- """
1060
- # Use a high-quality, local model for embeddings
1061
- Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
1062
-
1063
- if source_type == "URL":
1064
- documents = SimpleWebPageReader(html_to_text=True).load_data([path_or_url])
1065
- else:
1066
- documents = SimpleDirectoryReader(input_dir=path_or_url).load_data()
1067
-
1068
- index = VectorStoreIndex.from_documents(documents)
1069
-
1070
- storage_path = os.path.join("./storage", kb_name)
1071
- index.storage_context.persist(persist_dir=storage_path)
1072
-
1073
- # Return a reference object to the persisted index
1074
- return {"knowledge_base": {"name": kb_name, "path": storage_path}}
1075
-
1076
- ## rag node 2 query
1077
- """{
1078
- "id": "RAGQuery-1",
1079
- "type": "RAGQuery",
1080
- "data": {
1081
- "display_name": "Retrieve & Augment Prompt",
1082
- "template": {
1083
- "query": {
1084
- "display_name": "Original Query",
1085
- "type": "string",
1086
- "is_handle": true
1087
- },
1088
- "knowledge_base": {
1089
- "display_name": "Knowledge Base",
1090
- "type": "object",
1091
- "is_handle": true
1092
- },
1093
- "rag_prompt": {
1094
- "display_name": "Augmented Prompt Out",
1095
- "type": "string",
1096
- "is_handle": true
1097
- }
1098
- }
1099
- },
1100
- "resources": {
1101
- "cpu": 1.0,
1102
- "memory": "512Mi",
1103
- "gpu": "none"
1104
- }
1105
- }"""
1106
-
1107
- from typing import Any, Dict
1108
- from llama_index.core import StorageContext, load_index_from_storage, Settings
1109
- from llama_index.embeddings.huggingface import HuggingFaceEmbedding
1110
-
1111
- def process_rag_query(query: str, knowledge_base: Dict) -> Dict[str, Any]:
1112
- """
1113
- Retrieves context from a knowledge base and creates an augmented prompt.
1114
- """
1115
- Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
1116
-
1117
- # Load the index from the path provided by the KnowledgeBase node
1118
- storage_context = StorageContext.from_defaults(persist_dir=knowledge_base['path'])
1119
- index = load_index_from_storage(storage_context)
1120
-
1121
- retriever = index.as_retriever(similarity_top_k=3)
1122
- retrieved_nodes = retriever.retrieve(query)
1123
-
1124
- # Combine the retrieved text into a single context block
1125
- context_str = "\n\n".join([node.get_content() for node in retrieved_nodes])
1126
-
1127
- # Construct the final prompt for the ChatModel
1128
- rag_prompt_template = (
1129
- "Use the following context to answer the question. "
1130
- "If the answer is not in the context, say you don't know.\n\n"
1131
- "Context:\n{context}\n\n"
1132
- "Question: {question}"
1133
- )
1134
-
1135
- final_prompt = rag_prompt_template.format(context=context_str, question=query)
1136
-
1137
- return {"rag_prompt": final_prompt}
1138
-
1139
- # --- Demo Execution ---
1140
- if __name__ == "__main__":
1141
- # 1. Build the KB from Modal docs
1142
- kb_result = process_knowledge_base(
1143
- kb_name="product-docs-v1",
1144
- source_type="URL",
1145
- path_or_url="https://modal.com/docs/guide"
1146
- )
1147
- print("Knowledge Base Created:", kb_result)
1148
-
1149
- # 2. Run a RAG query
1150
- user_query = "How do I get started with Modal?"
1151
- rag_result = process_rag_query(user_query, kb_result["knowledge_base"])
1152
- print("\nAugmented RAG Prompt:\n", rag_result["rag_prompt"])
1153
-
1154
- ## speech to text
1155
- """{
1156
- "id": "HFSpeechToText-1",
1157
- "type": "HFSpeechToText",
1158
- "data": {
1159
- "display_name": "Transcribe Audio (Whisper)",
1160
- "template": {
1161
- "model_id": {
1162
- "display_name": "Model ID",
1163
- "type": "string",
1164
- "value": "openai/whisper-large-v3"
1165
- },
1166
- "audio_input": {
1167
- "display_name": "Audio Input",
1168
- "type": "object",
1169
- "is_handle": true
1170
- },
1171
- "transcribed_text": {
1172
- "display_name": "Transcribed Text",
1173
- "type": "string",
1174
- "is_handle": true
1175
- }
1176
- }
1177
- },
1178
- "resources": {
1179
- "cpu": 1.0,
1180
- "memory": "4Gi",
1181
- "gpu": "T4"
1182
- }
1183
- }"""
1184
-
1185
- import torch
1186
- from transformers import pipeline
1187
- from typing import Any, Dict
1188
-
1189
- # --- In a real Modal app, this would be structured like this: ---
1190
- #
1191
- # import modal
1192
- # image = modal.Image.debian_slim().pip_install("transformers", "torch", "librosa")
1193
- # stub = modal.Stub("speech-to-text-model")
1194
- #
1195
- # @stub.cls(gpu="T4", image=image)
1196
- # class WhisperModel:
1197
- # def __init__(self):
1198
- # device = "cuda" if torch.cuda.is_available() else "cpu"
1199
- # self.pipe = pipeline(
1200
- # "automatic-speech-recognition",
1201
- # model="openai/whisper-large-v3",
1202
- # torch_dtype=torch.float16,
1203
- # device=device,
1204
- # )
1205
- #
1206
- # @modal.method()
1207
- # def run_inference(self, audio_path):
1208
- # # The function logic from below would be here.
1209
- # ...
1210
- # -------------------------------------------------------------------
1211
-
1212
-
1213
- def process_hf_speech_to_text(model_id: str, audio_input: Dict[str, Any]) -> Dict[str, Any]:
1214
- """
1215
- Transcribes an audio file using a Hugging Face ASR pipeline.
1216
-
1217
- NOTE: This function simulates the inference part of a stateful Modal class.
1218
- The model pipeline should be loaded only once.
1219
- """
1220
- if audio_input.get("type") != "audio":
1221
- raise ValueError("Input must be of type 'audio'.")
1222
-
1223
- audio_path = audio_input["value"]
1224
-
1225
- # --- This part would be inside the Modal class method ---
1226
-
1227
- # In a real implementation, 'pipe' would be a class attribute (self.pipe)
1228
- # loaded in the __init__ or @enter method.
1229
- device = "cuda" if torch.cuda.is_available() else "cpu"
1230
- pipe = pipeline(
1231
- "automatic-speech-recognition",
1232
- model=model_id,
1233
- torch_dtype=torch.float16,
1234
- device=device,
1235
- )
1236
-
1237
- outputs = pipe(
1238
- audio_path,
1239
- chunk_length_s=30,
1240
- batch_size=24,
1241
- return_timestamps=True,
1242
- )
1243
-
1244
- return {"transcribed_text": outputs["text"]}
1245
-
1246
- ## text to speech
1247
- """{
1248
- "id": "HFTextToSpeech-1",
1249
- "type": "HFTextToSpeech",
1250
- "data": {
1251
- "display_name": "Generate Speech",
1252
- "template": {
1253
- "model_id": {
1254
- "display_name": "Model ID",
1255
- "type": "string",
1256
- "value": "microsoft/speecht5_tts"
1257
- },
1258
- "text_input": {
1259
- "display_name": "Text Input",
1260
- "type": "string",
1261
- "is_handle": true
1262
- },
1263
- "audio_output": {
1264
- "display_name": "Audio Output",
1265
- "type": "object",
1266
- "is_handle": true
1267
- }
1268
- }
1269
- },
1270
- "resources": {
1271
- "cpu": 1.0,
1272
- "memory": "4Gi",
1273
- "gpu": "T4"
1274
- }
1275
- }"""
1276
-
1277
- import torch
1278
- from transformers import pipeline
1279
- import soundfile as sf
1280
- from typing import Any, Dict
1281
-
1282
- def process_hf_text_to_speech(model_id: str, text_input: str) -> Dict[str, Any]:
1283
- """
1284
- Synthesizes speech from text using a Hugging Face TTS pipeline.
1285
-
1286
- NOTE: Simulates the inference part of a stateful Modal class.
1287
- """
1288
- # --- This part would be inside the Modal class method ---
1289
-
1290
- # The pipeline and embeddings would be loaded once in the class.
1291
- pipe = pipeline("text-to-speech", model=model_id, device="cuda")
1292
-
1293
- # SpeechT5 requires speaker embeddings for voice characteristics
1294
- from transformers import SpeechT5HifiGan
1295
- vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to("cuda")
1296
-
1297
- # A dummy embedding for a generic voice
1298
- import numpy as np
1299
- speaker_embedding = np.random.rand(1, 512).astype(np.float32)
1300
-
1301
- speech = pipe(text_input, forward_params={"speaker_embeddings": speaker_embedding})
1302
-
1303
- # Save the output to a file and return the path
1304
- output_path = "/tmp/output.wav"
1305
- sf.write(output_path, speech["audio"], samplerate=speech["sampling_rate"])
1306
-
1307
- return {"audio_output": {"type": "audio", "value": output_path}}
1308
-
1309
- ## text generation
1310
- """{
1311
- "id": "HFTextGeneration-1",
1312
- "type": "HFTextGeneration",
1313
- "data": {
1314
- "display_name": "Generate with Mistral",
1315
- "template": {
1316
- "model_id": {
1317
- "display_name": "Model ID",
1318
- "type": "string",
1319
- "value": "mistralai/Mistral-7B-Instruct-v0.2"
1320
- },
1321
- "max_new_tokens": {
1322
- "display_name": "Max New Tokens",
1323
- "type": "number",
1324
- "value": 256
1325
- },
1326
- "prompt": {
1327
- "display_name": "Prompt",
1328
- "type": "string",
1329
- "is_handle": true
1330
- },
1331
- "generated_text": {
1332
- "display_name": "Generated Text",
1333
- "type": "string",
1334
- "is_handle": true
1335
- }
1336
- }
1337
- },
1338
- "resources": {
1339
- "cpu": 2.0,
1340
- "memory": "24Gi",
1341
- "gpu": "A10G"
1342
- }
1343
- }"""
1344
-
1345
- import torch
1346
- from transformers import pipeline
1347
- from typing import Any, Dict
1348
-
1349
- def process_hf_text_generation(model_id: str, prompt: str, max_new_tokens: int) -> Dict[str, Any]:
1350
- """
1351
- Generates text from a prompt using a Hugging Face LLM.
1352
-
1353
- NOTE: Simulates the inference part of a stateful Modal class.
1354
- """
1355
- # --- This part would be inside the Modal class method ---
1356
-
1357
- # The pipeline is loaded once on container start.
1358
- pipe = pipeline(
1359
- "text-generation",
1360
- model=model_id,
1361
- torch_dtype=torch.bfloat16,
1362
- device_map="auto",
1363
- )
1364
-
1365
- messages = [{"role": "user", "content": prompt}]
1366
-
1367
- # The pipeline needs the prompt to be formatted correctly for instruct models
1368
- formatted_prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
1369
-
1370
- outputs = pipe(
1371
- formatted_prompt,
1372
- max_new_tokens=max_new_tokens,
1373
- do_sample=True,
1374
- temperature=0.7,
1375
- top_k=50,
1376
- top_p=0.95,
1377
- )
1378
-
1379
- # Extract only the generated part of the text
1380
- generated_text = outputs[0]["generated_text"]
1381
- # The output includes the prompt, so we remove it.
1382
- response_text = generated_text[len(formatted_prompt):]
1383
-
1384
- return {"generated_text": response_text}
1385
-
1386
- ## image generation
1387
- """{
1388
- "id": "HFImageGeneration-1",
1389
- "type": "HFImageGeneration",
1390
- "data": {
1391
- "display_name": "Generate Image (SDXL)",
1392
- "template": {
1393
- "model_id": {
1394
- "display_name": "Base Model ID",
1395
- "type": "string",
1396
- "value": "stabilityai/stable-diffusion-xl-base-1.0"
1397
- },
1398
- "lora_id": {
1399
- "display_name": "LoRA Model ID (Optional)",
1400
- "type": "string",
1401
- "value": "nerijs/pixel-art-xl"
1402
- },
1403
- "prompt": {
1404
- "display_name": "Prompt",
1405
- "type": "string",
1406
- "is_handle": true
1407
- },
1408
- "image_output": {
1409
- "display_name": "Image Output",
1410
- "type": "object",
1411
- "is_handle": true
1412
- }
1413
- }
1414
- },
1415
- "resources": {
1416
- "cpu": 2.0,
1417
- "memory": "24Gi",
1418
- "gpu": "A10G"
1419
- }
1420
- }"""
1421
-
1422
- import torch
1423
- from diffusers import StableDiffusionXLPipeline
1424
- from typing import Any, Dict
1425
-
1426
- def process_hf_image_generation(model_id: str, prompt: str, lora_id: str = None) -> Dict[str, Any]:
1427
- """
1428
- Generates an image using a Stable Diffusion pipeline, with optional LoRA.
1429
-
1430
- NOTE: Simulates the inference part of a stateful Modal class.
1431
- """
1432
- # --- This part would be inside the Modal class method ---
1433
-
1434
- # The base pipeline is loaded once.
1435
- pipe = StableDiffusionXLPipeline.from_pretrained(
1436
- model_id,
1437
- torch_dtype=torch.float16,
1438
- variant="fp16",
1439
- use_safetensors=True
1440
- ).to("cuda")
1441
-
1442
- # If a LoRA is specified, load and fuse it.
1443
- # In a real app, this logic would be more complex to handle multiple LoRAs.
1444
- if lora_id:
1445
- pipe.load_lora_weights(lora_id)
1446
- pipe.fuse_lora()
1447
-
1448
- # Generate the image
1449
- image = pipe(prompt=prompt).images[0]
1450
-
1451
- output_path = "/tmp/generated_image.png"
1452
- image.save(output_path)
1453
-
1454
- return {"image_output": {"type": "image", "value": output_path}}
1455
-
1456
- ## captioning image to text
1457
- """{
1458
- "id": "HFVisionModel-1",
1459
- "type": "HFVisionModel",
1460
- "data": {
1461
- "display_name": "Describe Image",
1462
- "template": {
1463
- "task": {
1464
- "display_name": "Task",
1465
- "type": "options",
1466
- "options": ["image-to-text"],
1467
- "value": "image-to-text"
1468
- },
1469
- "model_id": {
1470
- "display_name": "Model ID",
1471
- "type": "string",
1472
- "value": "Salesforce/blip-image-captioning-large"
1473
- },
1474
- "image_input": {
1475
- "display_name": "Image Input",
1476
- "type": "object",
1477
- "is_handle": true
1478
- },
1479
- "result": {
1480
- "display_name": "Result",
1481
- "type": "string",
1482
- "is_handle": true
1483
- }
1484
- }
1485
- },
1486
- "resources": {
1487
- "cpu": 1.0,
1488
- "memory": "8Gi",
1489
- "gpu": "T4"
1490
- }
1491
- }"""
1492
-
1493
- from transformers import pipeline
1494
- from PIL import Image
1495
- from typing import Any, Dict
1496
-
1497
- def process_hf_vision_model(task: str, model_id: str, image_input: Dict[str, Any]) -> Dict[str, Any]:
1498
- """
1499
- Performs a vision-based task, like image captioning.
1500
-
1501
- NOTE: Simulates the inference part of a stateful Modal class.
1502
- """
1503
- if image_input.get("type") != "image":
1504
- raise ValueError("Input must be of type 'image'.")
1505
-
1506
- image_path = image_input["value"]
1507
-
1508
- # --- This part would be inside the Modal class method ---
1509
-
1510
- # The pipeline is loaded once.
1511
- pipe = pipeline(task, model=model_id, device="cuda")
1512
-
1513
- # Open the image file
1514
- image = Image.open(image_path)
1515
-
1516
- result = pipe(image)
1517
-
1518
- # The output format for this pipeline is a list of dicts
1519
- # e.g., [{'generated_text': 'a cat sitting on a couch'}]
1520
- output_text = result[0]['generated_text']
1521
-
1522
- return {"result": output_text}
1523
-
1524
- import os
1525
- from openai import OpenAI
1526
-
1527
- client = OpenAI(
1528
- base_url="https://api.studio.nebius.com/v1/",
1529
- api_key=os.environ.get("NEBIUS_API_KEY")
1530
- )
1531
-
1532
- response = client.images.generate(
1533
- model="black-forest-labs/flux-dev",
1534
- response_format="b64_json",
1535
- extra_body={
1536
- "response_extension": "png",
1537
- "width": 1024,
1538
- "height": 1024,
1539
- "num_inference_steps": 28,
1540
- "negative_prompt": "",
1541
- "seed": -1
1542
- },
1543
- prompt="pokemon"
1544
- )
1545
-
1546
- print(response.to_json())
1547
-
1548
-
1549
- ## nebius image generation
1550
- """{
1551
- "id": "NebiusImage-1",
1552
- "type": "NebiusImage",
1553
- "data": {
1554
- "display_name": "Nebius Image Generation",
1555
- "template": {
1556
- "model": {
1557
- "display_name": "Model",
1558
- "type": "options",
1559
- "options": [
1560
- "black-forest-labs/flux-dev",
1561
- "black-forest-labs/flux-schnell",
1562
- "stability-ai/sdxl"
1563
- ],
1564
- "value": "black-forest-labs/flux-dev"
1565
- },
1566
- "api_key": {
1567
- "display_name": "Nebius API Key",
1568
- "type": "SecretStr",
1569
- "required": true,
1570
- "env_var": "NEBIUS_API_KEY"
1571
- },
1572
- "prompt": {
1573
- "display_name": "Prompt",
1574
- "type": "string",
1575
- "is_handle": true
1576
- },
1577
- "negative_prompt": {
1578
- "display_name": "Negative Prompt (Optional)",
1579
- "type": "string",
1580
- "value": ""
1581
- },
1582
- "width": {
1583
- "display_name": "Width",
1584
- "type": "number",
1585
- "value": 1024
1586
- },
1587
- "height": {
1588
- "display_name": "Height",
1589
- "type": "number",
1590
- "value": 1024
1591
- },
1592
- "num_inference_steps": {
1593
- "display_name": "Inference Steps",
1594
- "type": "number",
1595
- "value": 28
1596
- },
1597
- "seed": {
1598
- "display_name": "Seed",
1599
- "type": "number",
1600
- "value": -1
1601
- },
1602
- "image_output": {
1603
- "display_name": "Image Output",
1604
- "type": "object",
1605
- "is_handle": true
1606
- }
1607
- }
1608
- },
1609
- "resources": {
1610
- "cpu": 0.2,
1611
- "memory": "256Mi",
1612
- "gpu": "none"
1613
- }
1614
- }"""
1615
-
1616
- import os
1617
- import base64
1618
- from typing import Any, Dict
1619
- from openai import OpenAI
1620
-
1621
- def process_nebius_image(
1622
- model: str,
1623
- api_key: str,
1624
- prompt: str,
1625
- negative_prompt: str = "",
1626
- width: int = 1024,
1627
- height: int = 1024,
1628
- num_inference_steps: int = 28,
1629
- seed: int = -1
1630
- ) -> Dict[str, Any]:
1631
- """
1632
- Generates an image using the Nebius AI Studio API.
1633
- """
1634
- if not api_key:
1635
- raise ValueError("Nebius API key is missing.")
1636
-
1637
- client = OpenAI(
1638
- base_url="https://api.studio.nebius.com/v1/",
1639
- api_key=api_key
1640
- )
1641
-
1642
- try:
1643
- response = client.images.generate(
1644
- model=model,
1645
- response_format="b64_json",
1646
- prompt=prompt,
1647
- extra_body={
1648
- "response_extension": "png",
1649
- "width": width,
1650
- "height": height,
1651
- "num_inference_steps": num_inference_steps,
1652
- "negative_prompt": negative_prompt,
1653
- "seed": seed
1654
- }
1655
- )
1656
-
1657
- # Extract the base64 encoded string
1658
- b64_data = response.data[0].b64_json
1659
-
1660
- # Decode the string and save the image to a file
1661
- image_bytes = base64.b64decode(b64_data)
1662
- output_path = "/tmp/nebius_image.png"
1663
- with open(output_path, "wb") as f:
1664
- f.write(image_bytes)
1665
-
1666
- # Return a data package with the path to the generated image
1667
- return {"image_output": {"type": "image", "value": output_path}}
1668
-
1669
- except Exception as e:
1670
- print(f"Error calling Nebius API: {e}")
1671
- return {"image_output": {"error": str(e)}}
1672
-
1673
- ## mcp new
1674
- """{
1675
- "id": "MCPConnection-1",
1676
- "type": "MCPConnection",
1677
- "data": {
1678
- "display_name": "MCP Server Connection",
1679
- "template": {
1680
- "server_url": {
1681
- "display_name": "MCP Server URL",
1682
- "type": "string",
1683
- "value": "http://localhost:8000/sse",
1684
- "info": "URL to MCP server (HTTP/SSE or stdio command)"
1685
- },
1686
- "connection_type": {
1687
- "display_name": "Connection Type",
1688
- "type": "dropdown",
1689
- "options": ["http", "stdio"],
1690
- "value": "http"
1691
- },
1692
- "allowed_tools": {
1693
- "display_name": "Allowed Tools (Optional)",
1694
- "type": "list",
1695
- "info": "Filter specific tools. Leave empty for all tools"
1696
- },
1697
- "api_key": {
1698
- "display_name": "API Key (Optional)",
1699
- "type": "SecretStr",
1700
- "env_var": "MCP_API_KEY"
1701
- },
1702
- "mcp_tools_output": {
1703
- "display_name": "MCP Tools Output",
1704
- "type": "list",
1705
- "is_handle": true
1706
- }
1707
- }
1708
- },
1709
- "resources": {
1710
- "cpu": 0.1,
1711
- "memory": "128Mi",
1712
- "gpu": "none"
1713
- }
1714
- }
1715
- """
1716
-
1717
- """{
1718
- "id": "MCPAgent-1",
1719
- "type": "MCPAgent",
1720
- "data": {
1721
- "display_name": "MCP-Powered AI Agent",
1722
- "template": {
1723
- "mcp_tools_input": {
1724
- "display_name": "MCP Tools Input",
1725
- "type": "list",
1726
- "is_handle": true
1727
- },
1728
- "llm_model": {
1729
- "display_name": "LLM Model",
1730
- "type": "dropdown",
1731
- "options": ["gpt-4", "gpt-3.5-turbo", "gpt-4o", "gpt-4o-mini"],
1732
- "value": "gpt-4o-mini"
1733
- },
1734
- "system_prompt": {
1735
- "display_name": "System Prompt",
1736
- "type": "text",
1737
- "value": "You are a helpful AI assistant with access to various tools. Use the available tools to help answer user questions accurately.",
1738
- "multiline": true
1739
- },
1740
- "user_query": {
1741
- "display_name": "User Query",
1742
- "type": "string",
1743
- "is_handle": true
1744
- },
1745
- "max_iterations": {
1746
- "display_name": "Max Iterations",
1747
- "type": "int",
1748
- "value": 10
1749
- },
1750
- "agent_response": {
1751
- "display_name": "Agent Response",
1752
- "type": "string",
1753
- "is_handle": true
1754
- }
1755
- }
1756
- },
1757
- "resources": {
1758
- "cpu": 0.5,
1759
- "memory": "512Mi",
1760
- "gpu": "none"
1761
- }
1762
- }
1763
- """
1764
-
1765
- import asyncio
1766
- import os
1767
- from typing import List, Optional, Dict, Any
1768
- from llama_index.tools.mcp import BasicMCPClient, McpToolSpec, get_tools_from_mcp_url, aget_tools_from_mcp_url
1769
- from llama_index.core.tools import FunctionTool
1770
-
1771
- class MCPConnectionNode:
1772
- """Node to connect to MCP servers and retrieve tools"""
1773
-
1774
- def __init__(self):
1775
- self.client = None
1776
- self.tools = []
1777
-
1778
- async def execute(self,
1779
- server_url: str,
1780
- connection_type: str = "http",
1781
- allowed_tools: Optional[List[str]] = None,
1782
- api_key: Optional[str] = None) -> Dict[str, Any]:
1783
- """
1784
- Connect to MCP server and retrieve available tools
1785
- """
1786
- try:
1787
- # Set API key if provided
1788
- if api_key:
1789
- os.environ["MCP_API_KEY"] = api_key
1790
-
1791
- print(f"🔌 Connecting to MCP server: {server_url}")
1792
-
1793
- if connection_type == "http":
1794
- # Use LlamaIndex's built-in function to get tools[2]
1795
- tools = await aget_tools_from_mcp_url(
1796
- server_url,
1797
- allowed_tools=allowed_tools
1798
- )
1799
- else:
1800
- # For stdio connections
1801
- self.client = BasicMCPClient(server_url)
1802
- mcp_tool_spec = McpToolSpec(
1803
- client=self.client,
1804
- allowed_tools=allowed_tools
1805
- )
1806
- tools = await mcp_tool_spec.to_tool_list_async()
1807
-
1808
- self.tools = tools
1809
-
1810
- print(f"✅ Successfully connected! Retrieved {len(tools)} tools:")
1811
- for tool in tools:
1812
- print(f" - {tool.metadata.name}: {tool.metadata.description}")
1813
-
1814
- return {
1815
- "success": True,
1816
- "tools_count": len(tools),
1817
- "tool_names": [tool.metadata.name for tool in tools],
1818
- "mcp_tools_output": tools
1819
- }
1820
-
1821
- except Exception as e:
1822
- print(f"❌ Connection failed: {str(e)}")
1823
- return {
1824
- "success": False,
1825
- "error": str(e),
1826
- "mcp_tools_output": []
1827
- }
1828
-
1829
- # Example usage
1830
- async def mcp_connection_demo():
1831
- node = MCPConnectionNode()
1832
-
1833
- # Using a public MCP server (you'll need to replace with actual public servers)
1834
- result = await node.execute(
1835
- server_url="http://localhost:8000/sse", # Replace with public MCP server
1836
- connection_type="http",
1837
- allowed_tools=None # Get all tools
1838
- )
1839
-
1840
- return result
1841
- from llama_index.core.agent import FunctionCallingAgentWorker, AgentRunner
1842
- from llama_index.llms.openai import OpenAI
1843
- from llama_index.core.tools import FunctionTool
1844
- from typing import List, Dict, Any
1845
- import os
1846
-
1847
- class MCPAgentNode:
1848
- """Node to create and run MCP-powered AI agents"""
1849
-
1850
- def __init__(self):
1851
- self.agent = None
1852
- self.tools = []
1853
-
1854
- async def execute(self,
1855
- mcp_tools_input: List[FunctionTool],
1856
- user_query: str,
1857
- llm_model: str = "gpt-4o-mini",
1858
- system_prompt: str = "You are a helpful AI assistant.",
1859
- max_iterations: int = 10) -> Dict[str, Any]:
1860
- """
1861
- Create and run MCP-powered agent using FunctionCallingAgent
1862
- """
1863
- try:
1864
- if not mcp_tools_input:
1865
- return {
1866
- "success": False,
1867
- "error": "No MCP tools provided",
1868
- "agent_response": "No tools available to process the query."
1869
- }
1870
-
1871
- print(f"🤖 Creating agent with {len(mcp_tools_input)} tools...")
1872
-
1873
- # Initialize LLM[1]
1874
- llm = OpenAI(
1875
- model=llm_model,
1876
- api_key=os.getenv("OPENAI_API_KEY"),
1877
- temperature=0.1
1878
- )
1879
-
1880
- # Create function calling agent (more reliable than ReAct)[2]
1881
- agent_worker = FunctionCallingAgentWorker.from_tools(
1882
- tools=mcp_tools_input,
1883
- llm=llm,
1884
- verbose=True,
1885
- system_prompt=system_prompt
1886
- )
1887
-
1888
- self.agent = AgentRunner(agent_worker)
1889
-
1890
- print(f"💭 Processing query: {user_query}")
1891
-
1892
- # Execute the query
1893
- response = self.agent.chat(user_query)
1894
-
1895
- return {
1896
- "success": True,
1897
- "agent_response": str(response.response),
1898
- "user_query": user_query,
1899
- "tools_used": len(mcp_tools_input)
1900
- }
1901
-
1902
- except Exception as e:
1903
- print(f"❌ Agent execution failed: {str(e)}")
1904
- return {
1905
- "success": False,
1906
- "error": str(e),
1907
- "agent_response": f"Sorry, I encountered an error while processing your query: {str(e)}"
1908
- }
1909
-
1910
- # Example usage
1911
- async def mcp_agent_demo(tools: List[FunctionTool]):
1912
- node = MCPAgentNode()
1913
-
1914
- result = await node.execute(
1915
- mcp_tools_input=tools,
1916
- user_query="What tools do you have available and what can you help me with?",
1917
- llm_model="gpt-4o-mini",
1918
- system_prompt="You are a helpful AI assistant. Use your available tools to provide accurate and useful responses."
1919
- )
1920
-
1921
- return result
1922
-
1923
-
1924
- example
1925
-
1926
- import asyncio
1927
- import os
1928
- from typing import List, Dict, Any
1929
- from llama_index.core.tools import FunctionTool
1930
- from llama_index.core.agent import FunctionCallingAgentWorker, AgentRunner
1931
- from llama_index.llms.openai import OpenAI
1932
-
1933
- class CompleteMCPWorkflowDemo:
1934
- """Complete demo of MCP workflow with connection and agent nodes"""
1935
-
1936
- def __init__(self):
1937
- self.connection_node = MCPConnectionNode()
1938
- self.agent_node = MCPAgentNode()
1939
-
1940
- # Set your OpenAI API key
1941
- # os.environ["OPENAI_API_KEY"] = "your-openai-api-key-here"
1942
-
1943
- async def create_mock_mcp_tools(self) -> List[FunctionTool]:
1944
- """
1945
- Create mock MCP tools that simulate a real MCP server
1946
- Replace this with actual MCP server connection when available
1947
- """
1948
- def get_weather(city: str, country: str = "US") -> str:
1949
- """Get current weather information for a city"""
1950
- weather_data = {
1951
- "london": "Cloudy, 15°C, humidity 80%",
1952
- "paris": "Sunny, 22°C, humidity 45%",
1953
- "tokyo": "Rainy, 18°C, humidity 90%",
1954
- "new york": "Partly cloudy, 20°C, humidity 55%"
1955
- }
1956
- result = weather_data.get(city.lower(), f"Weather data not available for {city}")
1957
- return f"Weather in {city}, {country}: {result}"
1958
-
1959
- def search_news(topic: str, limit: int = 5) -> str:
1960
- """Search for latest news on a given topic"""
1961
- news_items = [
1962
- f"Breaking: New developments in {topic}",
1963
- f"Analysis: {topic} trends for 2025",
1964
- f"Expert opinion on {topic} industry changes",
1965
- f"Research shows {topic} impact on society",
1966
- f"Global {topic} market outlook"
1967
- ]
1968
- return f"Top {limit} news articles about {topic}:\n" + "\n".join(news_items[:limit])
1969
-
1970
- def calculate_math(expression: str) -> str:
1971
- """Calculate mathematical expressions safely"""
1972
- try:
1973
- # Simple and safe evaluation
1974
- allowed_chars = "0123456789+-*/().,_ "
1975
- if all(c in allowed_chars for c in expression):
1976
- result = eval(expression)
1977
- return f"Result: {expression} = {result}"
1978
- else:
1979
- return f"Invalid expression: {expression}"
1980
- except Exception as e:
1981
- return f"Error calculating {expression}: {str(e)}"
1982
-
1983
- def get_company_info(company: str) -> str:
1984
- """Get basic company information"""
1985
- companies = {
1986
- "openai": "OpenAI - AI research company, creator of GPT models",
1987
- "microsoft": "Microsoft - Technology corporation, cloud computing and software",
1988
- "google": "Google - Search engine and technology company",
1989
- "amazon": "Amazon - E-commerce and cloud computing platform"
1990
- }
1991
- return companies.get(company.lower(), f"Company information not found for {company}")
1992
-
1993
- # Convert to FunctionTool objects[2]
1994
- tools = [
1995
- FunctionTool.from_defaults(fn=get_weather),
1996
- FunctionTool.from_defaults(fn=search_news),
1997
- FunctionTool.from_defaults(fn=calculate_math),
1998
- FunctionTool.from_defaults(fn=get_company_info)
1999
- ]
2000
-
2001
- return tools
2002
-
2003
- async def run_complete_workflow(self):
2004
- """
2005
- Run the complete MCP workflow demonstration
2006
- """
2007
- print("🚀 Starting Complete MCP Workflow Demo")
2008
- print("=" * 60)
2009
-
2010
- # Step 1: Setup MCP Connection (simulated)
2011
- print("\n📡 Step 1: Setting up MCP Connection...")
2012
-
2013
- # In real implementation, this would connect to actual MCP server
2014
- mock_tools = await self.create_mock_mcp_tools()
2015
-
2016
- connection_result = {
2017
- "success": True,
2018
- "tools_count": len(mock_tools),
2019
- "tool_names": [tool.metadata.name for tool in mock_tools],
2020
- "mcp_tools_output": mock_tools
2021
- }
2022
-
2023
- if connection_result["success"]:
2024
- print(f"✅ MCP Connection successful!")
2025
- print(f"📋 Retrieved {connection_result['tools_count']} tools:")
2026
- for tool_name in connection_result['tool_names']:
2027
- print(f" - {tool_name}")
2028
- else:
2029
- print(f"❌ MCP Connection failed: {connection_result.get('error')}")
2030
- return
2031
-
2032
- # Step 2: Create and test MCP Agent
2033
- print(f"\n🤖 Step 2: Creating MCP-Powered Agent...")
2034
-
2035
- test_queries = [
2036
- "What's the weather like in London?",
2037
- "Search for news about artificial intelligence",
2038
- "Calculate 15 * 8 + 32",
2039
- "Tell me about OpenAI company",
2040
- "What tools do you have and what can you help me with?"
2041
- ]
2042
-
2043
- for i, query in enumerate(test_queries, 1):
2044
- print(f"\n💬 Query {i}: {query}")
2045
- print("-" * 40)
2046
-
2047
- agent_result = await self.agent_node.execute(
2048
- mcp_tools_input=connection_result["mcp_tools_output"],
2049
- user_query=query,
2050
- llm_model="gpt-4o-mini",
2051
- system_prompt="""You are a helpful AI assistant with access to weather, news, calculation, and company information tools.
2052
-
2053
- When a user asks a question:
2054
- 1. Determine which tool(s) can help answer their question
2055
- 2. Use the appropriate tool(s) to gather information
2056
- 3. Provide a clear, helpful response based on the tool results
2057
-
2058
- Always be informative and explain what tools you used.""",
2059
- max_iterations=5
2060
- )
2061
-
2062
- if agent_result["success"]:
2063
- print(f"🎯 Agent Response:")
2064
- print(f"{agent_result['agent_response']}")
2065
- else:
2066
- print(f"❌ Agent Error: {agent_result['error']}")
2067
-
2068
- print("\n" + "="*50)
2069
-
2070
- # Function to connect to real MCP servers when available
2071
- async def connect_to_real_mcp_server(server_url: str):
2072
- """
2073
- Example of connecting to a real MCP server
2074
- Replace server_url with actual public MCP servers
2075
- """
2076
- try:
2077
- from llama_index.tools.mcp import aget_tools_from_mcp_url
2078
-
2079
- print(f"🔌 Attempting to connect to: {server_url}")
2080
- tools = await aget_tools_from_mcp_url(server_url)
2081
-
2082
- print(f"✅ Connected successfully! Found {len(tools)} tools:")
2083
- for tool in tools:
2084
- print(f" - {tool.metadata.name}: {tool.metadata.description}")
2085
-
2086
- return tools
2087
-
2088
- except Exception as e:
2089
- print(f"❌ Failed to connect to {server_url}: {e}")
2090
- return []
2091
-
2092
- # Main execution
2093
- async def main():
2094
- """Run the complete demo"""
2095
-
2096
- # Option 1: Run with mock tools (works immediately)
2097
- print("🎮 Running MCP Workflow Demo with Mock Tools")
2098
- demo = CompleteMCPWorkflowDemo()
2099
- await demo.run_complete_workflow()
2100
-
2101
- # Option 2: Try connecting to real MCP servers (uncomment when available)
2102
- # real_servers = [
2103
- # "http://your-mcp-server.com:8000/sse",
2104
- # "https://api.example.com/mcp"
2105
- # ]
2106
- #
2107
- # for server_url in real_servers:
2108
- # tools = await connect_to_real_mcp_server(server_url)
2109
- # if tools:
2110
- # # Use real tools with agent
2111
- # agent_node = MCPAgentNode()
2112
- # result = await agent_node.execute(
2113
- # mcp_tools_input=tools,
2114
- # user_query="What can you help me with?",
2115
- # llm_model="gpt-4o-mini"
2116
- # )
2117
- # print(f"Real MCP Agent Response: {result}")
2118
-
2119
- if __name__ == "__main__":
2120
- asyncio.run(main())
2121
- '''
2122
- # all nodes plus python code
2123
 
2124
  external_modal_config = ""
2125
 
 
11
  import time
12
 
13
  #prompts
14
+ modal_params = open("modal_params.txt", 'r').open()
15
+ modal_demo = open("modal_demo.txt", 'r').open()
16
+ all_nodes = open("all_nodes.txt", 'r').open()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  external_modal_config = ""
19