Tai Truong
fix readme
d202ada
{
"data": {
"edges": [
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "OpenAIModel",
"id": "OpenAIModel-jhEkE",
"name": "text_output",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "input_value",
"id": "ChatOutput-FlJ4C",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "reactflow__edge-OpenAIModel-jhEkE{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-jhEkEœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-FlJ4C{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-FlJ4Cœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"selected": false,
"source": "OpenAIModel-jhEkE",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-jhEkEœ, œnameœ: œtext_outputœ, œoutput_typesœ: [œMessageœ]}",
"target": "ChatOutput-FlJ4C",
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-FlJ4Cœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "Prompt",
"id": "Prompt-Lmlx2",
"name": "prompt",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "system_message",
"id": "OpenAIModel-cn7cK",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "reactflow__edge-Prompt-Lmlx2{œdataTypeœ:œPromptœ,œidœ:œPrompt-Lmlx2œ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-cn7cK{œfieldNameœ:œsystem_messageœ,œidœ:œOpenAIModel-cn7cKœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"source": "Prompt-Lmlx2",
"sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-Lmlx2œ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}",
"target": "OpenAIModel-cn7cK",
"targetHandle": "{œfieldNameœ: œsystem_messageœ, œidœ: œOpenAIModel-cn7cKœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "ChatInput",
"id": "ChatInput-MbPd9",
"name": "message",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "input_value",
"id": "OpenAIModel-cn7cK",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "reactflow__edge-ChatInput-MbPd9{œdataTypeœ:œChatInputœ,œidœ:œChatInput-MbPd9œ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-cn7cK{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-cn7cKœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"source": "ChatInput-MbPd9",
"sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-MbPd9œ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}",
"target": "OpenAIModel-cn7cK",
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-cn7cKœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "Prompt",
"id": "Prompt-TbFnC",
"name": "prompt",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "system_message",
"id": "OpenAIModel-xWhtK",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "reactflow__edge-Prompt-TbFnC{œdataTypeœ:œPromptœ,œidœ:œPrompt-TbFnCœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-xWhtK{œfieldNameœ:œsystem_messageœ,œidœ:œOpenAIModel-xWhtKœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"source": "Prompt-TbFnC",
"sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-TbFnCœ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}",
"target": "OpenAIModel-xWhtK",
"targetHandle": "{œfieldNameœ: œsystem_messageœ, œidœ: œOpenAIModel-xWhtKœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "OpenAIModel",
"id": "OpenAIModel-cn7cK",
"name": "text_output",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "input_value",
"id": "OpenAIModel-xWhtK",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "reactflow__edge-OpenAIModel-cn7cK{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-cn7cKœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-xWhtK{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-xWhtKœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"source": "OpenAIModel-cn7cK",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-cn7cKœ, œnameœ: œtext_outputœ, œoutput_typesœ: [œMessageœ]}",
"target": "OpenAIModel-xWhtK",
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-xWhtKœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "Prompt",
"id": "Prompt-3CcjN",
"name": "prompt",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "system_message",
"id": "OpenAIModel-jhEkE",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "reactflow__edge-Prompt-3CcjN{œdataTypeœ:œPromptœ,œidœ:œPrompt-3CcjNœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-jhEkE{œfieldNameœ:œsystem_messageœ,œidœ:œOpenAIModel-jhEkEœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"source": "Prompt-3CcjN",
"sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-3CcjNœ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}",
"target": "OpenAIModel-jhEkE",
"targetHandle": "{œfieldNameœ: œsystem_messageœ, œidœ: œOpenAIModel-jhEkEœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "OpenAIModel",
"id": "OpenAIModel-xWhtK",
"name": "text_output",
"output_types": [
"Message"
]
},
"targetHandle": {
"fieldName": "input_value",
"id": "OpenAIModel-jhEkE",
"inputTypes": [
"Message"
],
"type": "str"
}
},
"id": "reactflow__edge-OpenAIModel-xWhtK{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-xWhtKœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-jhEkE{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-jhEkEœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
"source": "OpenAIModel-xWhtK",
"sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-xWhtKœ, œnameœ: œtext_outputœ, œoutput_typesœ: [œMessageœ]}",
"target": "OpenAIModel-jhEkE",
"targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-jhEkEœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}"
}
],
"nodes": [
{
"data": {
"description": "Create a prompt template with dynamic variables.",
"display_name": "Prompt",
"id": "Prompt-TbFnC",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {
"template": []
},
"description": "Create a prompt template with dynamic variables.",
"display_name": "Prompt",
"documentation": "",
"edited": false,
"field_order": [
"template"
],
"frozen": false,
"icon": "prompts",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Prompt Message",
"method": "build_prompt",
"name": "prompt",
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
"advanced": false,
"display_name": "Template",
"dynamic": false,
"info": "",
"list": false,
"load_from_db": false,
"name": "template",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"type": "prompt",
"value": "You are a seasoned business analyst with a strong background in tech product development and market research. Your analytical skills are unparalleled, allowing you to dissect product concepts and evaluate their market viability with precision. You have a keen eye for identifying potential challenges and opportunities that others might overlook. Your insights have been crucial in shaping successful product strategies for numerous tech companies.\n\nYour task is to:\n\n1. Evaluate the concept in terms of market potential and technical feasibility\n2. Identify two potential challenges for developing this product\n3. Suggest one improvement or expansion to the concept\n\n\nPlease structure your response as follows:\n\nConcept Evaluation:\n[concept_evaluation]\n\nPotential Challenges:\n1. [challenge_1]\n2. [challenge_2]\n...\n\nImprovement Suggestion:\n[improvement_suggestion]\n\nProvide an objective and well-founded analysis, considering market and technological factors in your evaluation.\n"
},
"tool_placeholder": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Tool Placeholder",
"dynamic": false,
"info": "A placeholder input for tool mode.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "tool_placeholder",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": true,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
}
},
"tool_mode": false
},
"type": "Prompt"
},
"dragging": false,
"height": 260,
"id": "Prompt-TbFnC",
"position": {
"x": 1921.9168573384,
"y": 1162.4082184281983
},
"positionAbsolute": {
"x": 1921.9168573384,
"y": 1162.4082184281983
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"id": "ChatInput-MbPd9",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"category": "inputs",
"conditional_paths": [],
"custom_fields": {},
"description": "Get chat inputs from the Playground.",
"display_name": "Chat Input",
"documentation": "",
"edited": false,
"field_order": [
"input_value",
"should_store_message",
"sender",
"sender_name",
"session_id",
"files",
"background_color",
"chat_icon",
"text_color"
],
"frozen": false,
"icon": "MessagesSquare",
"key": "ChatInput",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Message",
"method": "message_response",
"name": "message",
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"background_color": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Background Color",
"dynamic": false,
"info": "The background color of the icon.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "background_color",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"chat_icon": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Icon",
"dynamic": false,
"info": "The icon of the message.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "chat_icon",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, FileInput, MessageTextInput, MultilineInput, Output\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_USER, MESSAGE_SENDER_USER\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\"background_color\": background_color, \"text_color\": text_color, \"icon\": icon},\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"files": {
"_input_type": "FileInput",
"advanced": true,
"display_name": "Files",
"dynamic": false,
"fileTypes": [
"txt",
"md",
"mdx",
"csv",
"json",
"yaml",
"yml",
"xml",
"html",
"htm",
"pdf",
"docx",
"py",
"sh",
"sql",
"js",
"ts",
"tsx",
"jpg",
"jpeg",
"png",
"bmp",
"image"
],
"file_path": "",
"info": "Files to be sent with the message.",
"list": true,
"name": "files",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "file",
"value": ""
},
"input_value": {
"_input_type": "MultilineInput",
"advanced": false,
"display_name": "Text",
"dynamic": false,
"info": "Message to be passed as input.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"multiline": true,
"name": "input_value",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "The growing demand for personalized, AI-driven mental health support tools that can provide real-time interventions and track long-term emotional well-being."
},
"sender": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"display_name": "Sender Type",
"dynamic": false,
"info": "Type of sender.",
"name": "sender",
"options": [
"Machine",
"User"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "str",
"value": "User"
},
"sender_name": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Sender Name",
"dynamic": false,
"info": "Name of the sender.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "sender_name",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "User"
},
"session_id": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Session ID",
"dynamic": false,
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "session_id",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"should_store_message": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Store Messages",
"dynamic": false,
"info": "Store the message in the history.",
"list": false,
"name": "should_store_message",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"text_color": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Text Color",
"dynamic": false,
"info": "The text color of the name",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "text_color",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
}
}
},
"type": "ChatInput"
},
"dragging": false,
"height": 234,
"id": "ChatInput-MbPd9",
"position": {
"x": 1178.0239685549568,
"y": 879.9087836229152
},
"positionAbsolute": {
"x": 1178.0239685549568,
"y": 879.9087836229152
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"description": "Display a chat message in the Playground.",
"display_name": "Chat Output",
"id": "ChatOutput-FlJ4C",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Display a chat message in the Playground.",
"display_name": "Chat Output",
"documentation": "",
"edited": false,
"field_order": [
"input_value",
"should_store_message",
"sender",
"sender_name",
"session_id",
"data_template",
"background_color",
"chat_icon",
"text_color"
],
"frozen": false,
"icon": "MessagesSquare",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Message",
"method": "message_response",
"name": "message",
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"background_color": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Background Color",
"dynamic": false,
"info": "The background color of the icon.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "background_color",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"chat_icon": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Icon",
"dynamic": false,
"info": "The icon of the message.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "chat_icon",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MessageTextInput, Output\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import MESSAGE_SENDER_AI, MESSAGE_SENDER_NAME_AI, MESSAGE_SENDER_USER\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n\n inputs = [\n MessageInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n source_dict[\"source\"] = source\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n message = self.input_value if isinstance(self.input_value, Message) else Message(text=self.input_value)\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
},
"data_template": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Data Template",
"dynamic": false,
"info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "data_template",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "{text}"
},
"input_value": {
"_input_type": "MessageInput",
"advanced": false,
"display_name": "Text",
"dynamic": false,
"info": "Message to be passed as output.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "input_value",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"sender": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"display_name": "Sender Type",
"dynamic": false,
"info": "Type of sender.",
"name": "sender",
"options": [
"Machine",
"User"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "Machine"
},
"sender_name": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Sender Name",
"dynamic": false,
"info": "Name of the sender.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "sender_name",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "AI"
},
"session_id": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Session ID",
"dynamic": false,
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "session_id",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"should_store_message": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Store Messages",
"dynamic": false,
"info": "Store the message in the history.",
"list": false,
"name": "should_store_message",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"text_color": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Text Color",
"dynamic": false,
"info": "The text color of the name",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "text_color",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
}
},
"tool_mode": false
},
"type": "ChatOutput"
},
"dragging": false,
"height": 234,
"id": "ChatOutput-FlJ4C",
"position": {
"x": 3363.868906129255,
"y": 1189.5351768654318
},
"positionAbsolute": {
"x": 3363.868906129255,
"y": 1189.5351768654318
},
"selected": true,
"type": "genericNode",
"width": 320
},
{
"data": {
"description": "Create a prompt template with dynamic variables.",
"display_name": "Prompt",
"id": "Prompt-3CcjN",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {
"template": []
},
"description": "Create a prompt template with dynamic variables.",
"display_name": "Prompt",
"documentation": "",
"edited": false,
"field_order": [
"template"
],
"frozen": false,
"icon": "prompts",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Prompt Message",
"method": "build_prompt",
"name": "prompt",
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
"advanced": false,
"display_name": "Template",
"dynamic": false,
"info": "",
"list": false,
"load_from_db": false,
"name": "template",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"type": "prompt",
"value": "You are an accomplished product manager with a track record of bringing innovative tech products from concept to market. Your strategic thinking and ability to balance technical feasibility with market demands have resulted in several successful product launches. You excel at distilling complex ideas into clear, actionable plans and have a talent for identifying the most critical features that will drive product adoption and success.\n\nBased on the analysis of the innovative product, create a simplified development plan that includes:\n\n1. Product overview (1-2 sentences)\n2. Three main features to be developed\n3. A basic market launch strategy\n\n\nPlease structure your plan as follows:\n\nProduct Overview:\n[product_overview]\n\nMain Features:\n1. [feature_1]\n2. [feature_2]\n3. [feature_3]\n...\n\nLaunch Strategy:\n[launch_strategy]\n\nYour plan should be concise, realistic, and aligned with the information provided in the previous steps.\n"
},
"tool_placeholder": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Tool Placeholder",
"dynamic": false,
"info": "A placeholder input for tool mode.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "tool_placeholder",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": true,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
}
},
"tool_mode": false
},
"type": "Prompt"
},
"dragging": false,
"height": 260,
"id": "Prompt-3CcjN",
"position": {
"x": 2647.8305106628454,
"y": 1161.2328062686402
},
"positionAbsolute": {
"x": 2647.8305106628454,
"y": 1161.2328062686402
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"description": "Generates text using OpenAI LLMs.",
"display_name": "OpenAI",
"id": "OpenAIModel-cn7cK",
"node": {
"base_classes": [
"LanguageModel",
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Generates text using OpenAI LLMs.",
"display_name": "OpenAI",
"documentation": "",
"edited": false,
"field_order": [
"input_value",
"system_message",
"stream",
"max_tokens",
"model_kwargs",
"json_mode",
"output_schema",
"model_name",
"openai_api_base",
"api_key",
"temperature",
"seed",
"output_parser"
],
"frozen": false,
"icon": "OpenAI",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Text",
"method": "text_response",
"name": "text_output",
"required_inputs": [],
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Language Model",
"method": "build_model",
"name": "model_output",
"required_inputs": [],
"selected": "LanguageModel",
"types": [
"LanguageModel"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"api_key": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "OpenAI API Key",
"dynamic": false,
"info": "The OpenAI API Key to use for the OpenAI model.",
"input_types": [
"Message"
],
"load_from_db": true,
"name": "api_key",
"password": true,
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled. [DEPRECATED]\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n SliderInput(\n name=\"temperature\", display_name=\"Temperature\", value=0.1, range_spec=RangeSpec(min=0, max=2, step=0.01)\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key).get_secret_value() if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n"
},
"input_value": {
"_input_type": "MessageInput",
"advanced": false,
"display_name": "Input",
"dynamic": false,
"info": "",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "input_value",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"json_mode": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "JSON Mode",
"dynamic": false,
"info": "If True, it will output JSON regardless of passing a schema.",
"list": false,
"name": "json_mode",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"max_tokens": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Max Tokens",
"dynamic": false,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"list": false,
"name": "max_tokens",
"placeholder": "",
"range_spec": {
"max": 128000,
"min": 0,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": ""
},
"model_kwargs": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Model Kwargs",
"dynamic": false,
"info": "Additional keyword arguments to pass to the model.",
"list": false,
"name": "model_kwargs",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"model_name": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"display_name": "Model Name",
"dynamic": false,
"info": "",
"name": "model_name",
"options": [
"gpt-4o-mini",
"gpt-4o",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "gpt-4o-mini"
},
"openai_api_base": {
"_input_type": "StrInput",
"advanced": true,
"display_name": "OpenAI API Base",
"dynamic": false,
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.",
"list": false,
"load_from_db": false,
"name": "openai_api_base",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"output_parser": {
"_input_type": "HandleInput",
"advanced": true,
"display_name": "Output Parser",
"dynamic": false,
"info": "The parser to use to parse the output of the model",
"input_types": [
"OutputParser"
],
"list": false,
"name": "output_parser",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"output_schema": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Schema",
"dynamic": false,
"info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled. [DEPRECATED]",
"list": true,
"name": "output_schema",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"seed": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Seed",
"dynamic": false,
"info": "The seed controls the reproducibility of the job.",
"list": false,
"name": "seed",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 1
},
"stream": {
"_input_type": "BoolInput",
"advanced": false,
"display_name": "Stream",
"dynamic": false,
"info": "Stream the response from the model. Streaming works only in Chat.",
"list": false,
"name": "stream",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"system_message": {
"_input_type": "MessageTextInput",
"advanced": false,
"display_name": "System Message",
"dynamic": false,
"info": "System message to pass to the model.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "system_message",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"temperature": {
"_input_type": "FloatInput",
"advanced": false,
"display_name": "Temperature",
"dynamic": false,
"info": "",
"list": false,
"name": "temperature",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "float",
"value": 0.1
}
},
"tool_mode": false
},
"type": "OpenAIModel"
},
"dragging": false,
"height": 630,
"id": "OpenAIModel-cn7cK",
"position": {
"x": 1561.5122766985614,
"y": 805.4323582784689
},
"positionAbsolute": {
"x": 1561.5122766985614,
"y": 805.4323582784689
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"description": "Generates text using OpenAI LLMs.",
"display_name": "OpenAI",
"id": "OpenAIModel-jhEkE",
"node": {
"base_classes": [
"LanguageModel",
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Generates text using OpenAI LLMs.",
"display_name": "OpenAI",
"documentation": "",
"edited": false,
"field_order": [
"input_value",
"system_message",
"stream",
"max_tokens",
"model_kwargs",
"json_mode",
"output_schema",
"model_name",
"openai_api_base",
"api_key",
"temperature",
"seed",
"output_parser"
],
"frozen": false,
"icon": "OpenAI",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Text",
"method": "text_response",
"name": "text_output",
"required_inputs": [],
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Language Model",
"method": "build_model",
"name": "model_output",
"required_inputs": [],
"selected": "LanguageModel",
"types": [
"LanguageModel"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"api_key": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "OpenAI API Key",
"dynamic": false,
"info": "The OpenAI API Key to use for the OpenAI model.",
"input_types": [
"Message"
],
"load_from_db": true,
"name": "api_key",
"password": true,
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled. [DEPRECATED]\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n SliderInput(\n name=\"temperature\", display_name=\"Temperature\", value=0.1, range_spec=RangeSpec(min=0, max=2, step=0.01)\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key).get_secret_value() if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n"
},
"input_value": {
"_input_type": "MessageInput",
"advanced": false,
"display_name": "Input",
"dynamic": false,
"info": "",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "input_value",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"json_mode": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "JSON Mode",
"dynamic": false,
"info": "If True, it will output JSON regardless of passing a schema.",
"list": false,
"name": "json_mode",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"max_tokens": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Max Tokens",
"dynamic": false,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"list": false,
"name": "max_tokens",
"placeholder": "",
"range_spec": {
"max": 128000,
"min": 0,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": ""
},
"model_kwargs": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Model Kwargs",
"dynamic": false,
"info": "Additional keyword arguments to pass to the model.",
"list": false,
"name": "model_kwargs",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"model_name": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"display_name": "Model Name",
"dynamic": false,
"info": "",
"name": "model_name",
"options": [
"gpt-4o-mini",
"gpt-4o",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "gpt-4o-mini"
},
"openai_api_base": {
"_input_type": "StrInput",
"advanced": true,
"display_name": "OpenAI API Base",
"dynamic": false,
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.",
"list": false,
"load_from_db": false,
"name": "openai_api_base",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"output_parser": {
"_input_type": "HandleInput",
"advanced": true,
"display_name": "Output Parser",
"dynamic": false,
"info": "The parser to use to parse the output of the model",
"input_types": [
"OutputParser"
],
"list": false,
"name": "output_parser",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"output_schema": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Schema",
"dynamic": false,
"info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled. [DEPRECATED]",
"list": true,
"name": "output_schema",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"seed": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Seed",
"dynamic": false,
"info": "The seed controls the reproducibility of the job.",
"list": false,
"name": "seed",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 1
},
"stream": {
"_input_type": "BoolInput",
"advanced": false,
"display_name": "Stream",
"dynamic": false,
"info": "Stream the response from the model. Streaming works only in Chat.",
"list": false,
"name": "stream",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"system_message": {
"_input_type": "MessageTextInput",
"advanced": false,
"display_name": "System Message",
"dynamic": false,
"info": "System message to pass to the model.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "system_message",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"temperature": {
"_input_type": "FloatInput",
"advanced": false,
"display_name": "Temperature",
"dynamic": false,
"info": "",
"list": false,
"name": "temperature",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "float",
"value": 0.1
}
},
"tool_mode": false
},
"type": "OpenAIModel"
},
"dragging": false,
"height": 630,
"id": "OpenAIModel-jhEkE",
"position": {
"x": 3013.354174710099,
"y": 800.0470124871745
},
"positionAbsolute": {
"x": 3013.354174710099,
"y": 800.0470124871745
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"description": "Create a prompt template with dynamic variables.",
"display_name": "Prompt",
"id": "Prompt-Lmlx2",
"node": {
"base_classes": [
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {
"template": []
},
"description": "Create a prompt template with dynamic variables.",
"display_name": "Prompt",
"documentation": "",
"edited": false,
"field_order": [
"template"
],
"frozen": false,
"icon": "prompts",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Prompt Message",
"method": "build_prompt",
"name": "prompt",
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n def post_code_processing(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = super().post_code_processing(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n"
},
"template": {
"_input_type": "PromptInput",
"advanced": false,
"display_name": "Template",
"dynamic": false,
"info": "",
"list": false,
"load_from_db": false,
"name": "template",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"type": "prompt",
"value": "You are a visionary product innovator at a cutting-edge tech startup. Your expertise lies in identifying emerging market trends and translating them into groundbreaking product concepts. Your creative thinking and deep understanding of technology allow you to envision products that not only meet current needs but also anticipate future demands. Your ideas often challenge conventional thinking and push the boundaries of what's possible with current technology.\n\nPlease create a product concept, providing:\n\n1. Product name\n2. Brief description (2-3 sentences)\n3. Main innovative feature\n4. Target audience\n\nStructure your response like this:\n\nProduct Name: [product_name]\n\nDescription: [product_description]\n\nMain Innovation: [main_innovation]\n\nTarget Audience: [target_audience]\n\nBe creative and bold in your idea, but keep it realistic and aligned with the provided market trend."
},
"tool_placeholder": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Tool Placeholder",
"dynamic": false,
"info": "A placeholder input for tool mode.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "tool_placeholder",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": true,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
}
},
"tool_mode": false
},
"type": "Prompt"
},
"dragging": false,
"height": 260,
"id": "Prompt-Lmlx2",
"position": {
"x": 1178.7099500302636,
"y": 1167.8586867404465
},
"positionAbsolute": {
"x": 1178.7099500302636,
"y": 1167.8586867404465
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"id": "note-tSZsX",
"node": {
"description": "### Input Examples\n1.\n \"The growing demand for personalized, AI-driven mental health support tools that can provide real-time interventions and track long-term emotional well-being.\"\n\n\n2. \n \"The increasing need for secure and user-friendly decentralized finance (DeFi) platforms that make cryptocurrency investments accessible to non-tech-savvy users.\"\n \n\n3. \n \"The rising popularity of immersive, augmented reality (AR) experiences for remote collaboration and virtual team-building in distributed workforces.\"\n\n\n4. \n \"The expanding market for smart, IoT-enabled urban farming solutions that allow city dwellers to grow their own food efficiently in small spaces.\"\n\n\n5. \n \"The emerging demand for AI-powered personal styling and shopping assistants that consider sustainability, body positivity, and individual style preferences.\"\n\n",
"display_name": "",
"documentation": "",
"template": {
"backgroundColor": "emerald"
}
},
"type": "note"
},
"dragging": false,
"height": 430,
"id": "note-tSZsX",
"position": {
"x": 528.0392006831054,
"y": 973.781986567496
},
"positionAbsolute": {
"x": 528.0392006831054,
"y": 973.781986567496
},
"resizing": false,
"selected": false,
"style": {
"height": 430,
"width": 600
},
"type": "noteNode",
"width": 600
},
{
"data": {
"id": "note-FCbTW",
"node": {
"description": "### Prompt Chaining\n\nThis flow demonstrates fundamental prompt chaining principles:\n\n1. **Chain Structure**\n • User input → First Prompt → LLM\n • First output → Second Prompt → LLM\n • Second output → Final Prompt → LLM\n • Final output\n\n2. **Key Technique Elements**\n • Each prompt is specifically designed to process previous output\n • Output formatting ensures clean handoff between stages\n • Context flows naturally through the chain\n • Each LLM call builds upon previous results\n\n3. **Technical Implementation**\n • Multiple prompt templates working in sequence\n • Strategic input/output connections\n • Consistent message handling between stages\n • Progressive refinement through the chain\n\nThis pattern can be adapted for any use case by modifying the prompt templates while keeping the same chaining structure.",
"display_name": "",
"documentation": "",
"template": {
"backgroundColor": "blue"
}
},
"type": "note"
},
"dragging": false,
"height": 451,
"id": "note-FCbTW",
"position": {
"x": 892.4280059782889,
"y": 406.2411111617474
},
"positionAbsolute": {
"x": 892.4280059782889,
"y": 406.2411111617474
},
"resizing": false,
"selected": false,
"style": {
"height": 451,
"width": 600
},
"type": "noteNode",
"width": 600
},
{
"data": {
"description": "Generates text using OpenAI LLMs.",
"display_name": "OpenAI",
"id": "OpenAIModel-xWhtK",
"node": {
"base_classes": [
"LanguageModel",
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Generates text using OpenAI LLMs.",
"display_name": "OpenAI",
"documentation": "",
"edited": false,
"field_order": [
"input_value",
"system_message",
"stream",
"max_tokens",
"model_kwargs",
"json_mode",
"output_schema",
"model_name",
"openai_api_base",
"api_key",
"temperature",
"seed",
"output_parser"
],
"frozen": false,
"icon": "OpenAI",
"legacy": false,
"lf_version": "1.0.19.post2",
"metadata": {},
"output_types": [],
"outputs": [
{
"cache": true,
"display_name": "Text",
"method": "text_response",
"name": "text_output",
"required_inputs": [],
"selected": "Message",
"types": [
"Message"
],
"value": "__UNDEFINED__"
},
{
"cache": true,
"display_name": "Language Model",
"method": "build_model",
"name": "model_output",
"required_inputs": [],
"selected": "LanguageModel",
"types": [
"LanguageModel"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"api_key": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "OpenAI API Key",
"dynamic": false,
"info": "The OpenAI API Key to use for the OpenAI model.",
"input_types": [
"Message"
],
"load_from_db": true,
"name": "api_key",
"password": true,
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled. [DEPRECATED]\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n SliderInput(\n name=\"temperature\", display_name=\"Temperature\", value=0.1, range_spec=RangeSpec(min=0, max=2, step=0.01)\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key).get_secret_value() if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n"
},
"input_value": {
"_input_type": "MessageInput",
"advanced": false,
"display_name": "Input",
"dynamic": false,
"info": "",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "input_value",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"json_mode": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "JSON Mode",
"dynamic": false,
"info": "If True, it will output JSON regardless of passing a schema.",
"list": false,
"name": "json_mode",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"max_tokens": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Max Tokens",
"dynamic": false,
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
"list": false,
"name": "max_tokens",
"placeholder": "",
"range_spec": {
"max": 128000,
"min": 0,
"step": 0.1,
"step_type": "float"
},
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": ""
},
"model_kwargs": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Model Kwargs",
"dynamic": false,
"info": "Additional keyword arguments to pass to the model.",
"list": false,
"name": "model_kwargs",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"model_name": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"display_name": "Model Name",
"dynamic": false,
"info": "",
"name": "model_name",
"options": [
"gpt-4o-mini",
"gpt-4o",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4",
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125"
],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "gpt-4o-mini"
},
"openai_api_base": {
"_input_type": "StrInput",
"advanced": true,
"display_name": "OpenAI API Base",
"dynamic": false,
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.",
"list": false,
"load_from_db": false,
"name": "openai_api_base",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"output_parser": {
"_input_type": "HandleInput",
"advanced": true,
"display_name": "Output Parser",
"dynamic": false,
"info": "The parser to use to parse the output of the model",
"input_types": [
"OutputParser"
],
"list": false,
"name": "output_parser",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"output_schema": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Schema",
"dynamic": false,
"info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled. [DEPRECATED]",
"list": true,
"name": "output_schema",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"seed": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Seed",
"dynamic": false,
"info": "The seed controls the reproducibility of the job.",
"list": false,
"name": "seed",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "int",
"value": 1
},
"stream": {
"_input_type": "BoolInput",
"advanced": false,
"display_name": "Stream",
"dynamic": false,
"info": "Stream the response from the model. Streaming works only in Chat.",
"list": false,
"name": "stream",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"system_message": {
"_input_type": "MessageTextInput",
"advanced": false,
"display_name": "System Message",
"dynamic": false,
"info": "System message to pass to the model.",
"input_types": [
"Message"
],
"list": false,
"load_from_db": false,
"name": "system_message",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"temperature": {
"_input_type": "FloatInput",
"advanced": false,
"display_name": "Temperature",
"dynamic": false,
"info": "",
"list": false,
"name": "temperature",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "float",
"value": 0.1
}
},
"tool_mode": false
},
"type": "OpenAIModel"
},
"dragging": false,
"height": 630,
"id": "OpenAIModel-xWhtK",
"position": {
"x": 2294.5699142425065,
"y": 796.3787911368381
},
"positionAbsolute": {
"x": 2294.5699142425065,
"y": 796.3787911368381
},
"selected": false,
"type": "genericNode",
"width": 320
}
],
"viewport": {
"x": -648.2477908002534,
"y": -146.07166399594246,
"zoom": 0.5557618545546786
}
},
"description": "starterProjects.basicPromptChaining.description",
"endpoint_name": null,
"gradient": "0",
"icon": "Link",
"id": "b5b0c252-95ae-4b07-8211-67c8b12ea60e",
"is_component": false,
"last_tested_version": "1.0.19.post2",
"name": "starterProjects.basicPromptChaining.name",
"tags": [
"chatbots"
]
}