code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def open_atomic(filepath, *args, **kwargs): """ Open temporary file object that atomically moves to destination upon exiting. Allows reading and writing to and from the same filename. Parameters ---------- filepath : string the file path to be opened fsync : bool whether to force write the file to disk kwargs : mixed Any valid keyword arguments for :code:`open` """ fsync = kwargs.pop('fsync', False) with _tempfile(dir=os.path.dirname(filepath)) as tmppath: with open(tmppath, *args, **kwargs) as f: yield f if fsync: f.flush() os.fsync(f.fileno()) os.rename(tmppath, filepath)
Open temporary file object that atomically moves to destination upon exiting. Allows reading and writing to and from the same filename. Parameters ---------- filepath : string the file path to be opened fsync : bool whether to force write the file to disk kwargs : mixed Any valid keyword arguments for :code:`open`
open_atomic
python
karpathy/arxiv-sanity-preserver
utils.py
https://github.com/karpathy/arxiv-sanity-preserver/blob/master/utils.py
MIT
async def generate_report_plan(state: ReportState, config: RunnableConfig): """Generate the initial report plan with sections. This node: 1. Gets configuration for the report structure and search parameters 2. Generates search queries to gather context for planning 3. Performs web searches using those queries 4. Uses an LLM to generate a structured plan with sections Args: state: Current graph state containing the report topic config: Configuration for models, search APIs, etc. Returns: Dict containing the generated sections """ # Inputs topic = state["topic"] # Get list of feedback on the report plan feedback_list = state.get("feedback_on_report_plan", []) # Concatenate feedback on the report plan into a single string feedback = " /// ".join(feedback_list) if feedback_list else "" # Get configuration configurable = WorkflowConfiguration.from_runnable_config(config) report_structure = configurable.report_structure number_of_queries = configurable.number_of_queries search_api = get_config_value(configurable.search_api) search_api_config = configurable.search_api_config or {} # Get the config dict, default to empty params_to_pass = get_search_params(search_api, search_api_config) # Filter parameters # Convert JSON object to string if necessary if isinstance(report_structure, dict): report_structure = str(report_structure) # Set writer model (model used for query writing) writer_provider = get_config_value(configurable.writer_provider) writer_model_name = get_config_value(configurable.writer_model) writer_model_kwargs = get_config_value(configurable.writer_model_kwargs or {}) writer_model = init_chat_model(model=writer_model_name, model_provider=writer_provider, model_kwargs=writer_model_kwargs) structured_llm = writer_model.with_structured_output(Queries) # Format system instructions system_instructions_query = report_planner_query_writer_instructions.format( topic=topic, report_organization=report_structure, number_of_queries=number_of_queries, today=get_today_str() ) # Generate queries results = await structured_llm.ainvoke([SystemMessage(content=system_instructions_query), HumanMessage(content="Generate search queries that will help with planning the sections of the report.")]) # Web search query_list = [query.search_query for query in results.queries] # Search the web with parameters source_str = await select_and_execute_search(search_api, query_list, params_to_pass) # Format system instructions system_instructions_sections = report_planner_instructions.format(topic=topic, report_organization=report_structure, context=source_str, feedback=feedback) # Set the planner planner_provider = get_config_value(configurable.planner_provider) planner_model = get_config_value(configurable.planner_model) planner_model_kwargs = get_config_value(configurable.planner_model_kwargs or {}) # Report planner instructions planner_message = """Generate the sections of the report. Your response must include a 'sections' field containing a list of sections. Each section must have: name, description, research, and content fields.""" # Run the planner if planner_model == "claude-3-7-sonnet-latest": # Allocate a thinking budget for claude-3-7-sonnet-latest as the planner model planner_llm = init_chat_model(model=planner_model, model_provider=planner_provider, max_tokens=20_000, thinking={"type": "enabled", "budget_tokens": 16_000}) else: # With other models, thinking tokens are not specifically allocated planner_llm = init_chat_model(model=planner_model, model_provider=planner_provider, model_kwargs=planner_model_kwargs) # Generate the report sections structured_llm = planner_llm.with_structured_output(Sections) report_sections = await structured_llm.ainvoke([SystemMessage(content=system_instructions_sections), HumanMessage(content=planner_message)]) # Get sections sections = report_sections.sections return {"sections": sections}
Generate the initial report plan with sections. This node: 1. Gets configuration for the report structure and search parameters 2. Generates search queries to gather context for planning 3. Performs web searches using those queries 4. Uses an LLM to generate a structured plan with sections Args: state: Current graph state containing the report topic config: Configuration for models, search APIs, etc. Returns: Dict containing the generated sections
generate_report_plan
python
langchain-ai/open_deep_research
src/open_deep_research/graph.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/graph.py
MIT
def human_feedback(state: ReportState, config: RunnableConfig) -> Command[Literal["generate_report_plan","build_section_with_web_research"]]: """Get human feedback on the report plan and route to next steps. This node: 1. Formats the current report plan for human review 2. Gets feedback via an interrupt 3. Routes to either: - Section writing if plan is approved - Plan regeneration if feedback is provided Args: state: Current graph state with sections to review config: Configuration for the workflow Returns: Command to either regenerate plan or start section writing """ # Get sections topic = state["topic"] sections = state['sections'] sections_str = "\n\n".join( f"Section: {section.name}\n" f"Description: {section.description}\n" f"Research needed: {'Yes' if section.research else 'No'}\n" for section in sections ) # Get feedback on the report plan from interrupt interrupt_message = f"""Please provide feedback on the following report plan. \n\n{sections_str}\n \nDoes the report plan meet your needs?\nPass 'true' to approve the report plan.\nOr, provide feedback to regenerate the report plan:""" feedback = interrupt(interrupt_message) # If the user approves the report plan, kick off section writing if isinstance(feedback, bool) and feedback is True: # Treat this as approve and kick off section writing return Command(goto=[ Send("build_section_with_web_research", {"topic": topic, "section": s, "search_iterations": 0}) for s in sections if s.research ]) # If the user provides feedback, regenerate the report plan elif isinstance(feedback, str): # Treat this as feedback and append it to the existing list return Command(goto="generate_report_plan", update={"feedback_on_report_plan": [feedback]}) else: raise TypeError(f"Interrupt value of type {type(feedback)} is not supported.")
Get human feedback on the report plan and route to next steps. This node: 1. Formats the current report plan for human review 2. Gets feedback via an interrupt 3. Routes to either: - Section writing if plan is approved - Plan regeneration if feedback is provided Args: state: Current graph state with sections to review config: Configuration for the workflow Returns: Command to either regenerate plan or start section writing
human_feedback
python
langchain-ai/open_deep_research
src/open_deep_research/graph.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/graph.py
MIT
async def generate_queries(state: SectionState, config: RunnableConfig): """Generate search queries for researching a specific section. This node uses an LLM to generate targeted search queries based on the section topic and description. Args: state: Current state containing section details config: Configuration including number of queries to generate Returns: Dict containing the generated search queries """ # Get state topic = state["topic"] section = state["section"] # Get configuration configurable = WorkflowConfiguration.from_runnable_config(config) number_of_queries = configurable.number_of_queries # Generate queries writer_provider = get_config_value(configurable.writer_provider) writer_model_name = get_config_value(configurable.writer_model) writer_model_kwargs = get_config_value(configurable.writer_model_kwargs or {}) writer_model = init_chat_model(model=writer_model_name, model_provider=writer_provider, model_kwargs=writer_model_kwargs) structured_llm = writer_model.with_structured_output(Queries) # Format system instructions system_instructions = query_writer_instructions.format(topic=topic, section_topic=section.description, number_of_queries=number_of_queries, today=get_today_str()) # Generate queries queries = await structured_llm.ainvoke([SystemMessage(content=system_instructions), HumanMessage(content="Generate search queries on the provided topic.")]) return {"search_queries": queries.queries}
Generate search queries for researching a specific section. This node uses an LLM to generate targeted search queries based on the section topic and description. Args: state: Current state containing section details config: Configuration including number of queries to generate Returns: Dict containing the generated search queries
generate_queries
python
langchain-ai/open_deep_research
src/open_deep_research/graph.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/graph.py
MIT
async def search_web(state: SectionState, config: RunnableConfig): """Execute web searches for the section queries. This node: 1. Takes the generated queries 2. Executes searches using configured search API 3. Formats results into usable context Args: state: Current state with search queries config: Search API configuration Returns: Dict with search results and updated iteration count """ # Get state search_queries = state["search_queries"] # Get configuration configurable = WorkflowConfiguration.from_runnable_config(config) search_api = get_config_value(configurable.search_api) search_api_config = configurable.search_api_config or {} # Get the config dict, default to empty params_to_pass = get_search_params(search_api, search_api_config) # Filter parameters # Web search query_list = [query.search_query for query in search_queries] # Search the web with parameters source_str = await select_and_execute_search(search_api, query_list, params_to_pass) return {"source_str": source_str, "search_iterations": state["search_iterations"] + 1}
Execute web searches for the section queries. This node: 1. Takes the generated queries 2. Executes searches using configured search API 3. Formats results into usable context Args: state: Current state with search queries config: Search API configuration Returns: Dict with search results and updated iteration count
search_web
python
langchain-ai/open_deep_research
src/open_deep_research/graph.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/graph.py
MIT
async def write_section(state: SectionState, config: RunnableConfig) -> Command[Literal[END, "search_web"]]: """Write a section of the report and evaluate if more research is needed. This node: 1. Writes section content using search results 2. Evaluates the quality of the section 3. Either: - Completes the section if quality passes - Triggers more research if quality fails Args: state: Current state with search results and section info config: Configuration for writing and evaluation Returns: Command to either complete section or do more research """ # Get state topic = state["topic"] section = state["section"] source_str = state["source_str"] # Get configuration configurable = WorkflowConfiguration.from_runnable_config(config) # Format system instructions section_writer_inputs_formatted = section_writer_inputs.format(topic=topic, section_name=section.name, section_topic=section.description, context=source_str, section_content=section.content) # Generate section writer_provider = get_config_value(configurable.writer_provider) writer_model_name = get_config_value(configurable.writer_model) writer_model_kwargs = get_config_value(configurable.writer_model_kwargs or {}) writer_model = init_chat_model(model=writer_model_name, model_provider=writer_provider, model_kwargs=writer_model_kwargs) section_content = await writer_model.ainvoke([SystemMessage(content=section_writer_instructions), HumanMessage(content=section_writer_inputs_formatted)]) # Write content to the section object section.content = section_content.content # Grade prompt section_grader_message = ("Grade the report and consider follow-up questions for missing information. " "If the grade is 'pass', return empty strings for all follow-up queries. " "If the grade is 'fail', provide specific search queries to gather missing information.") section_grader_instructions_formatted = section_grader_instructions.format(topic=topic, section_topic=section.description, section=section.content, number_of_follow_up_queries=configurable.number_of_queries) # Use planner model for reflection planner_provider = get_config_value(configurable.planner_provider) planner_model = get_config_value(configurable.planner_model) planner_model_kwargs = get_config_value(configurable.planner_model_kwargs or {}) if planner_model == "claude-3-7-sonnet-latest": # Allocate a thinking budget for claude-3-7-sonnet-latest as the planner model reflection_model = init_chat_model(model=planner_model, model_provider=planner_provider, max_tokens=20_000, thinking={"type": "enabled", "budget_tokens": 16_000}).with_structured_output(Feedback) else: reflection_model = init_chat_model(model=planner_model, model_provider=planner_provider, model_kwargs=planner_model_kwargs).with_structured_output(Feedback) # Generate feedback feedback = await reflection_model.ainvoke([SystemMessage(content=section_grader_instructions_formatted), HumanMessage(content=section_grader_message)]) # If the section is passing or the max search depth is reached, publish the section to completed sections if feedback.grade == "pass" or state["search_iterations"] >= configurable.max_search_depth: # Publish the section to completed sections update = {"completed_sections": [section]} if configurable.include_source_str: update["source_str"] = source_str return Command(update=update, goto=END) # Update the existing section with new content and update search queries else: return Command( update={"search_queries": feedback.follow_up_queries, "section": section}, goto="search_web" )
Write a section of the report and evaluate if more research is needed. This node: 1. Writes section content using search results 2. Evaluates the quality of the section 3. Either: - Completes the section if quality passes - Triggers more research if quality fails Args: state: Current state with search results and section info config: Configuration for writing and evaluation Returns: Command to either complete section or do more research
write_section
python
langchain-ai/open_deep_research
src/open_deep_research/graph.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/graph.py
MIT
async def write_final_sections(state: SectionState, config: RunnableConfig): """Write sections that don't require research using completed sections as context. This node handles sections like conclusions or summaries that build on the researched sections rather than requiring direct research. Args: state: Current state with completed sections as context config: Configuration for the writing model Returns: Dict containing the newly written section """ # Get configuration configurable = WorkflowConfiguration.from_runnable_config(config) # Get state topic = state["topic"] section = state["section"] completed_report_sections = state["report_sections_from_research"] # Format system instructions system_instructions = final_section_writer_instructions.format(topic=topic, section_name=section.name, section_topic=section.description, context=completed_report_sections) # Generate section writer_provider = get_config_value(configurable.writer_provider) writer_model_name = get_config_value(configurable.writer_model) writer_model_kwargs = get_config_value(configurable.writer_model_kwargs or {}) writer_model = init_chat_model(model=writer_model_name, model_provider=writer_provider, model_kwargs=writer_model_kwargs) section_content = await writer_model.ainvoke([SystemMessage(content=system_instructions), HumanMessage(content="Generate a report section based on the provided sources.")]) # Write content to section section.content = section_content.content # Write the updated section to completed sections return {"completed_sections": [section]}
Write sections that don't require research using completed sections as context. This node handles sections like conclusions or summaries that build on the researched sections rather than requiring direct research. Args: state: Current state with completed sections as context config: Configuration for the writing model Returns: Dict containing the newly written section
write_final_sections
python
langchain-ai/open_deep_research
src/open_deep_research/graph.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/graph.py
MIT
def gather_completed_sections(state: ReportState): """Format completed sections as context for writing final sections. This node takes all completed research sections and formats them into a single context string for writing summary sections. Args: state: Current state with completed sections Returns: Dict with formatted sections as context """ # List of completed sections completed_sections = state["completed_sections"] # Format completed section to str to use as context for final sections completed_report_sections = format_sections(completed_sections) return {"report_sections_from_research": completed_report_sections}
Format completed sections as context for writing final sections. This node takes all completed research sections and formats them into a single context string for writing summary sections. Args: state: Current state with completed sections Returns: Dict with formatted sections as context
gather_completed_sections
python
langchain-ai/open_deep_research
src/open_deep_research/graph.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/graph.py
MIT
def compile_final_report(state: ReportState, config: RunnableConfig): """Compile all sections into the final report. This node: 1. Gets all completed sections 2. Orders them according to original plan 3. Combines them into the final report Args: state: Current state with all completed sections Returns: Dict containing the complete report """ # Get configuration configurable = WorkflowConfiguration.from_runnable_config(config) # Get sections sections = state["sections"] completed_sections = {s.name: s.content for s in state["completed_sections"]} # Update sections with completed content while maintaining original order for section in sections: section.content = completed_sections[section.name] # Compile final report all_sections = "\n\n".join([s.content for s in sections]) if configurable.include_source_str: return {"final_report": all_sections, "source_str": state["source_str"]} else: return {"final_report": all_sections}
Compile all sections into the final report. This node: 1. Gets all completed sections 2. Orders them according to original plan 3. Combines them into the final report Args: state: Current state with all completed sections Returns: Dict containing the complete report
compile_final_report
python
langchain-ai/open_deep_research
src/open_deep_research/graph.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/graph.py
MIT
def initiate_final_section_writing(state: ReportState): """Create parallel tasks for writing non-research sections. This edge function identifies sections that don't need research and creates parallel writing tasks for each one. Args: state: Current state with all sections and research context Returns: List of Send commands for parallel section writing """ # Kick off section writing in parallel via Send() API for any sections that do not require research return [ Send("write_final_sections", {"topic": state["topic"], "section": s, "report_sections_from_research": state["report_sections_from_research"]}) for s in state["sections"] if not s.research ]
Create parallel tasks for writing non-research sections. This edge function identifies sections that don't need research and creates parallel writing tasks for each one. Args: state: Current state with all sections and research context Returns: List of Send commands for parallel section writing
initiate_final_section_writing
python
langchain-ai/open_deep_research
src/open_deep_research/graph.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/graph.py
MIT
def get_search_tool(config: RunnableConfig): """Get the appropriate search tool based on configuration""" configurable = MultiAgentConfiguration.from_runnable_config(config) search_api = get_config_value(configurable.search_api) # Return None if no search tool is requested if search_api.lower() == "none": return None # TODO: Configure other search functions as tools if search_api.lower() == "tavily": search_tool = tavily_search elif search_api.lower() == "duckduckgo": search_tool = duckduckgo_search else: raise NotImplementedError( f"The search API '{search_api}' is not yet supported in the multi-agent implementation. " f"Currently, only Tavily/DuckDuckGo/None is supported. Please use the graph-based implementation in " f"src/open_deep_research/graph.py for other search APIs, or set search_api to 'tavily', 'duckduckgo', or 'none'." ) tool_metadata = {**(search_tool.metadata or {}), "type": "search"} search_tool.metadata = tool_metadata return search_tool
Get the appropriate search tool based on configuration
get_search_tool
python
langchain-ai/open_deep_research
src/open_deep_research/multi_agent.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/multi_agent.py
MIT
async def get_supervisor_tools(config: RunnableConfig) -> list[BaseTool]: """Get supervisor tools based on configuration""" configurable = MultiAgentConfiguration.from_runnable_config(config) search_tool = get_search_tool(config) tools = [tool(Sections), tool(Introduction), tool(Conclusion), tool(FinishReport)] if configurable.ask_for_clarification: tools.append(tool(Question)) if search_tool is not None: tools.append(search_tool) # Add search tool, if available existing_tool_names = {cast(BaseTool, tool).name for tool in tools} mcp_tools = await _load_mcp_tools(config, existing_tool_names) tools.extend(mcp_tools) return tools
Get supervisor tools based on configuration
get_supervisor_tools
python
langchain-ai/open_deep_research
src/open_deep_research/multi_agent.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/multi_agent.py
MIT
async def get_research_tools(config: RunnableConfig) -> list[BaseTool]: """Get research tools based on configuration""" search_tool = get_search_tool(config) tools = [tool(Section), tool(FinishResearch)] if search_tool is not None: tools.append(search_tool) # Add search tool, if available existing_tool_names = {cast(BaseTool, tool).name for tool in tools} mcp_tools = await _load_mcp_tools(config, existing_tool_names) tools.extend(mcp_tools) return tools
Get research tools based on configuration
get_research_tools
python
langchain-ai/open_deep_research
src/open_deep_research/multi_agent.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/multi_agent.py
MIT
async def supervisor(state: ReportState, config: RunnableConfig): """LLM decides whether to call a tool or not""" # Messages messages = state["messages"] # Get configuration configurable = MultiAgentConfiguration.from_runnable_config(config) supervisor_model = get_config_value(configurable.supervisor_model) # Initialize the model llm = init_chat_model(model=supervisor_model) # If sections have been completed, but we don't yet have the final report, then we need to initiate writing the introduction and conclusion if state.get("completed_sections") and not state.get("final_report"): research_complete_message = {"role": "user", "content": "Research is complete. Now write the introduction and conclusion for the report. Here are the completed main body sections: \n\n" + "\n\n".join([s.content for s in state["completed_sections"]])} messages = messages + [research_complete_message] # Get tools based on configuration supervisor_tool_list = await get_supervisor_tools(config) llm_with_tools = ( llm .bind_tools( supervisor_tool_list, parallel_tool_calls=False, # force at least one tool call tool_choice="any" ) ) # Get system prompt system_prompt = SUPERVISOR_INSTRUCTIONS.format(today=get_today_str()) if configurable.mcp_prompt: system_prompt += f"\n\n{configurable.mcp_prompt}" # Invoke return { "messages": [ await llm_with_tools.ainvoke( [ { "role": "system", "content": system_prompt } ] + messages ) ] }
LLM decides whether to call a tool or not
supervisor
python
langchain-ai/open_deep_research
src/open_deep_research/multi_agent.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/multi_agent.py
MIT
async def supervisor_tools(state: ReportState, config: RunnableConfig) -> Command[Literal["supervisor", "research_team", "__end__"]]: """Performs the tool call and sends to the research agent""" configurable = MultiAgentConfiguration.from_runnable_config(config) result = [] sections_list = [] intro_content = None conclusion_content = None source_str = "" # Get tools based on configuration supervisor_tool_list = await get_supervisor_tools(config) supervisor_tools_by_name = {tool.name: tool for tool in supervisor_tool_list} search_tool_names = { tool.name for tool in supervisor_tool_list if tool.metadata is not None and tool.metadata.get("type") == "search" } # First process all tool calls to ensure we respond to each one (required for OpenAI) for tool_call in state["messages"][-1].tool_calls: # Get the tool tool = supervisor_tools_by_name[tool_call["name"]] # Perform the tool call - use ainvoke for async tools try: observation = await tool.ainvoke(tool_call["args"], config) except NotImplementedError: observation = tool.invoke(tool_call["args"], config) # Append to messages result.append({"role": "tool", "content": observation, "name": tool_call["name"], "tool_call_id": tool_call["id"]}) # Store special tool results for processing after all tools have been called if tool_call["name"] == "Question": # Question tool was called - return to supervisor to ask the question question_obj = cast(Question, observation) result.append({"role": "assistant", "content": question_obj.question}) return Command(goto=END, update={"messages": result}) elif tool_call["name"] == "Sections": sections_list = cast(Sections, observation).sections elif tool_call["name"] == "Introduction": # Format introduction with proper H1 heading if not already formatted observation = cast(Introduction, observation) if not observation.content.startswith("# "): intro_content = f"# {observation.name}\n\n{observation.content}" else: intro_content = observation.content elif tool_call["name"] == "Conclusion": # Format conclusion with proper H2 heading if not already formatted observation = cast(Conclusion, observation) if not observation.content.startswith("## "): conclusion_content = f"## {observation.name}\n\n{observation.content}" else: conclusion_content = observation.content elif tool_call["name"] in search_tool_names and configurable.include_source_str: source_str += cast(str, observation) # After processing all tool calls, decide what to do next if sections_list: # Send the sections to the research agents return Command(goto=[Send("research_team", {"section": s}) for s in sections_list], update={"messages": result}) elif intro_content: # Store introduction while waiting for conclusion # Append to messages to guide the LLM to write conclusion next result.append({"role": "user", "content": "Introduction written. Now write a conclusion section."}) state_update = { "final_report": intro_content, "messages": result, } elif conclusion_content: # Get all sections and combine in proper order: Introduction, Body Sections, Conclusion intro = state.get("final_report", "") body_sections = "\n\n".join([s.content for s in state["completed_sections"]]) # Assemble final report in correct order complete_report = f"{intro}\n\n{body_sections}\n\n{conclusion_content}" # Append to messages to indicate completion result.append({"role": "user", "content": "Report is now complete with introduction, body sections, and conclusion."}) state_update = { "final_report": complete_report, "messages": result, } else: # Default case (for search tools, etc.) state_update = {"messages": result} # Include source string for evaluation if configurable.include_source_str and source_str: state_update["source_str"] = source_str return Command(goto="supervisor", update=state_update)
Performs the tool call and sends to the research agent
supervisor_tools
python
langchain-ai/open_deep_research
src/open_deep_research/multi_agent.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/multi_agent.py
MIT
async def supervisor_should_continue(state: ReportState) -> str: """Decide if we should continue the loop or stop based upon whether the LLM made a tool call""" messages = state["messages"] last_message = messages[-1] # End because the supervisor asked a question or is finished if not last_message.tool_calls or (len(last_message.tool_calls) == 1 and last_message.tool_calls[0]["name"] == "FinishReport"): # Exit the graph return END # If the LLM makes a tool call, then perform an action return "supervisor_tools"
Decide if we should continue the loop or stop based upon whether the LLM made a tool call
supervisor_should_continue
python
langchain-ai/open_deep_research
src/open_deep_research/multi_agent.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/multi_agent.py
MIT
async def research_agent(state: SectionState, config: RunnableConfig): """LLM decides whether to call a tool or not""" # Get configuration configurable = MultiAgentConfiguration.from_runnable_config(config) researcher_model = get_config_value(configurable.researcher_model) # Initialize the model llm = init_chat_model(model=researcher_model) # Get tools based on configuration research_tool_list = await get_research_tools(config) system_prompt = RESEARCH_INSTRUCTIONS.format( section_description=state["section"], number_of_queries=configurable.number_of_queries, today=get_today_str(), ) if configurable.mcp_prompt: system_prompt += f"\n\n{configurable.mcp_prompt}" # Ensure we have at least one user message (required by Anthropic) messages = state.get("messages", []) if not messages: messages = [{"role": "user", "content": f"Please research and write the section: {state['section']}"}] return { "messages": [ # Enforce tool calling to either perform more search or call the Section tool to write the section await llm.bind_tools(research_tool_list, parallel_tool_calls=False, # force at least one tool call tool_choice="any").ainvoke( [ { "role": "system", "content": system_prompt } ] + messages ) ] }
LLM decides whether to call a tool or not
research_agent
python
langchain-ai/open_deep_research
src/open_deep_research/multi_agent.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/multi_agent.py
MIT
async def research_agent_tools(state: SectionState, config: RunnableConfig): """Performs the tool call and route to supervisor or continue the research loop""" configurable = MultiAgentConfiguration.from_runnable_config(config) result = [] completed_section = None source_str = "" # Get tools based on configuration research_tool_list = await get_research_tools(config) research_tools_by_name = {tool.name: tool for tool in research_tool_list} search_tool_names = { tool.name for tool in research_tool_list if tool.metadata is not None and tool.metadata.get("type") == "search" } # Process all tool calls first (required for OpenAI) for tool_call in state["messages"][-1].tool_calls: # Get the tool tool = research_tools_by_name[tool_call["name"]] # Perform the tool call - use ainvoke for async tools try: observation = await tool.ainvoke(tool_call["args"], config) except NotImplementedError: observation = tool.invoke(tool_call["args"], config) # Append to messages result.append({"role": "tool", "content": observation, "name": tool_call["name"], "tool_call_id": tool_call["id"]}) # Store the section observation if a Section tool was called if tool_call["name"] == "Section": completed_section = cast(Section, observation) # Store the source string if a search tool was called if tool_call["name"] in search_tool_names and configurable.include_source_str: source_str += cast(str, observation) # After processing all tools, decide what to do next state_update = {"messages": result} if completed_section: # Write the completed section to state and return to the supervisor state_update["completed_sections"] = [completed_section] if configurable.include_source_str and source_str: state_update["source_str"] = source_str return state_update
Performs the tool call and route to supervisor or continue the research loop
research_agent_tools
python
langchain-ai/open_deep_research
src/open_deep_research/multi_agent.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/multi_agent.py
MIT
async def research_agent_should_continue(state: SectionState) -> str: """Decide if we should continue the loop or stop based upon whether the LLM made a tool call""" messages = state["messages"] last_message = messages[-1] if last_message.tool_calls[0]["name"] == "FinishResearch": # Research is done - return to supervisor return END else: return "research_agent_tools"
Decide if we should continue the loop or stop based upon whether the LLM made a tool call
research_agent_should_continue
python
langchain-ai/open_deep_research
src/open_deep_research/multi_agent.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/multi_agent.py
MIT
def get_config_value(value): """ Helper function to handle string, dict, and enum cases of configuration values """ if isinstance(value, str): return value elif isinstance(value, dict): return value else: return value.value
Helper function to handle string, dict, and enum cases of configuration values
get_config_value
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
def get_search_params(search_api: str, search_api_config: Optional[Dict[str, Any]]) -> Dict[str, Any]: """ Filters the search_api_config dictionary to include only parameters accepted by the specified search API. Args: search_api (str): The search API identifier (e.g., "exa", "tavily"). search_api_config (Optional[Dict[str, Any]]): The configuration dictionary for the search API. Returns: Dict[str, Any]: A dictionary of parameters to pass to the search function. """ # Define accepted parameters for each search API SEARCH_API_PARAMS = { "exa": ["max_characters", "num_results", "include_domains", "exclude_domains", "subpages"], "tavily": ["max_results", "topic"], "perplexity": [], # Perplexity accepts no additional parameters "arxiv": ["load_max_docs", "get_full_documents", "load_all_available_meta"], "pubmed": ["top_k_results", "email", "api_key", "doc_content_chars_max"], "linkup": ["depth"], "googlesearch": ["max_results"], } # Get the list of accepted parameters for the given search API accepted_params = SEARCH_API_PARAMS.get(search_api, []) # If no config provided, return an empty dict if not search_api_config: return {} # Filter the config to only include accepted parameters return {k: v for k, v in search_api_config.items() if k in accepted_params}
Filters the search_api_config dictionary to include only parameters accepted by the specified search API. Args: search_api (str): The search API identifier (e.g., "exa", "tavily"). search_api_config (Optional[Dict[str, Any]]): The configuration dictionary for the search API. Returns: Dict[str, Any]: A dictionary of parameters to pass to the search function.
get_search_params
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
def format_sections(sections: list[Section]) -> str: """ Format a list of sections into a string """ formatted_str = "" for idx, section in enumerate(sections, 1): formatted_str += f""" {'='*60} Section {idx}: {section.name} {'='*60} Description: {section.description} Requires Research: {section.research} Content: {section.content if section.content else '[Not yet written]'} """ return formatted_str
Format a list of sections into a string
format_sections
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
async def tavily_search_async(search_queries, max_results: int = 5, topic: Literal["general", "news", "finance"] = "general", include_raw_content: bool = True): """ Performs concurrent web searches with the Tavily API Args: search_queries (List[str]): List of search queries to process max_results (int): Maximum number of results to return topic (Literal["general", "news", "finance"]): Topic to filter results by include_raw_content (bool): Whether to include raw content in the results Returns: List[dict]: List of search responses from Tavily API: { 'query': str, 'follow_up_questions': None, 'answer': None, 'images': list, 'results': [ # List of search results { 'title': str, # Title of the webpage 'url': str, # URL of the result 'content': str, # Summary/snippet of content 'score': float, # Relevance score 'raw_content': str|None # Full page content if available }, ... ] } """ tavily_async_client = AsyncTavilyClient() search_tasks = [] for query in search_queries: search_tasks.append( tavily_async_client.search( query, max_results=max_results, include_raw_content=include_raw_content, topic=topic ) ) # Execute all searches concurrently search_docs = await asyncio.gather(*search_tasks) return search_docs
Performs concurrent web searches with the Tavily API Args: search_queries (List[str]): List of search queries to process max_results (int): Maximum number of results to return topic (Literal["general", "news", "finance"]): Topic to filter results by include_raw_content (bool): Whether to include raw content in the results Returns: List[dict]: List of search responses from Tavily API: { 'query': str, 'follow_up_questions': None, 'answer': None, 'images': list, 'results': [ # List of search results { 'title': str, # Title of the webpage 'url': str, # URL of the result 'content': str, # Summary/snippet of content 'score': float, # Relevance score 'raw_content': str|None # Full page content if available }, ... ] }
tavily_search_async
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
async def azureaisearch_search_async(search_queries: list[str], max_results: int = 5, topic: str = "general", include_raw_content: bool = True) -> list[dict]: """ Performs concurrent web searches using the Azure AI Search API. Args: search_queries (List[str]): list of search queries to process max_results (int): maximum number of results to return for each query topic (str): semantic topic filter for the search. include_raw_content (bool) Returns: List[dict]: list of search responses from Azure AI Search API, one per query. """ # configure and create the Azure Search client # ensure all environment variables are set if not all(var in os.environ for var in ["AZURE_AI_SEARCH_ENDPOINT", "AZURE_AI_SEARCH_INDEX_NAME", "AZURE_AI_SEARCH_API_KEY"]): raise ValueError("Missing required environment variables for Azure Search API which are: AZURE_AI_SEARCH_ENDPOINT, AZURE_AI_SEARCH_INDEX_NAME, AZURE_AI_SEARCH_API_KEY") endpoint = os.getenv("AZURE_AI_SEARCH_ENDPOINT") index_name = os.getenv("AZURE_AI_SEARCH_INDEX_NAME") credential = AzureKeyCredential(os.getenv("AZURE_AI_SEARCH_API_KEY")) reranker_key = '@search.reranker_score' async with AsyncAzureAISearchClient(endpoint, index_name, credential) as client: async def do_search(query: str) -> dict: # search query paged = await client.search( search_text=query, vector_queries=[{ "fields": "vector", "kind": "text", "text": query, "exhaustive": True }], semantic_configuration_name="fraunhofer-rag-semantic-config", query_type="semantic", select=["url", "title", "chunk", "creationTime", "lastModifiedTime"], top=max_results, ) # async iterator to get all results items = [doc async for doc in paged] # Umwandlung in einfaches Dict-Format results = [ { "title": doc.get("title"), "url": doc.get("url"), "content": doc.get("chunk"), "score": doc.get(reranker_key), "raw_content": doc.get("chunk") if include_raw_content else None } for doc in items ] return {"query": query, "results": results} # parallelize the search queries tasks = [do_search(q) for q in search_queries] return await asyncio.gather(*tasks)
Performs concurrent web searches using the Azure AI Search API. Args: search_queries (List[str]): list of search queries to process max_results (int): maximum number of results to return for each query topic (str): semantic topic filter for the search. include_raw_content (bool) Returns: List[dict]: list of search responses from Azure AI Search API, one per query.
azureaisearch_search_async
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
def perplexity_search(search_queries): """Search the web using the Perplexity API. Args: search_queries (List[SearchQuery]): List of search queries to process Returns: List[dict]: List of search responses from Perplexity API, one per query. Each response has format: { 'query': str, # The original search query 'follow_up_questions': None, 'answer': None, 'images': list, 'results': [ # List of search results { 'title': str, # Title of the search result 'url': str, # URL of the result 'content': str, # Summary/snippet of content 'score': float, # Relevance score 'raw_content': str|None # Full content or None for secondary citations }, ... ] } """ headers = { "accept": "application/json", "content-type": "application/json", "Authorization": f"Bearer {os.getenv('PERPLEXITY_API_KEY')}" } search_docs = [] for query in search_queries: payload = { "model": "sonar-pro", "messages": [ { "role": "system", "content": "Search the web and provide factual information with sources." }, { "role": "user", "content": query } ] } response = requests.post( "https://api.perplexity.ai/chat/completions", headers=headers, json=payload ) response.raise_for_status() # Raise exception for bad status codes # Parse the response data = response.json() content = data["choices"][0]["message"]["content"] citations = data.get("citations", ["https://perplexity.ai"]) # Create results list for this query results = [] # First citation gets the full content results.append({ "title": f"Perplexity Search, Source 1", "url": citations[0], "content": content, "raw_content": content, "score": 1.0 # Adding score to match Tavily format }) # Add additional citations without duplicating content for i, citation in enumerate(citations[1:], start=2): results.append({ "title": f"Perplexity Search, Source {i}", "url": citation, "content": "See primary source for full content", "raw_content": None, "score": 0.5 # Lower score for secondary sources }) # Format response to match Tavily structure search_docs.append({ "query": query, "follow_up_questions": None, "answer": None, "images": [], "results": results }) return search_docs
Search the web using the Perplexity API. Args: search_queries (List[SearchQuery]): List of search queries to process Returns: List[dict]: List of search responses from Perplexity API, one per query. Each response has format: { 'query': str, # The original search query 'follow_up_questions': None, 'answer': None, 'images': list, 'results': [ # List of search results { 'title': str, # Title of the search result 'url': str, # URL of the result 'content': str, # Summary/snippet of content 'score': float, # Relevance score 'raw_content': str|None # Full content or None for secondary citations }, ... ] }
perplexity_search
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
async def exa_search(search_queries, max_characters: Optional[int] = None, num_results=5, include_domains: Optional[List[str]] = None, exclude_domains: Optional[List[str]] = None, subpages: Optional[int] = None): """Search the web using the Exa API. Args: search_queries (List[SearchQuery]): List of search queries to process max_characters (int, optional): Maximum number of characters to retrieve for each result's raw content. If None, the text parameter will be set to True instead of an object. num_results (int): Number of search results per query. Defaults to 5. include_domains (List[str], optional): List of domains to include in search results. When specified, only results from these domains will be returned. exclude_domains (List[str], optional): List of domains to exclude from search results. Cannot be used together with include_domains. subpages (int, optional): Number of subpages to retrieve per result. If None, subpages are not retrieved. Returns: List[dict]: List of search responses from Exa API, one per query. Each response has format: { 'query': str, # The original search query 'follow_up_questions': None, 'answer': None, 'images': list, 'results': [ # List of search results { 'title': str, # Title of the search result 'url': str, # URL of the result 'content': str, # Summary/snippet of content 'score': float, # Relevance score 'raw_content': str|None # Full content or None for secondary citations }, ... ] } """ # Check that include_domains and exclude_domains are not both specified if include_domains and exclude_domains: raise ValueError("Cannot specify both include_domains and exclude_domains") # Initialize Exa client (API key should be configured in your .env file) exa = Exa(api_key = f"{os.getenv('EXA_API_KEY')}") # Define the function to process a single query async def process_query(query): # Use run_in_executor to make the synchronous exa call in a non-blocking way loop = asyncio.get_event_loop() # Define the function for the executor with all parameters def exa_search_fn(): # Build parameters dictionary kwargs = { # Set text to True if max_characters is None, otherwise use an object with max_characters "text": True if max_characters is None else {"max_characters": max_characters}, "summary": True, # This is an amazing feature by EXA. It provides an AI generated summary of the content based on the query "num_results": num_results } # Add optional parameters only if they are provided if subpages is not None: kwargs["subpages"] = subpages if include_domains: kwargs["include_domains"] = include_domains elif exclude_domains: kwargs["exclude_domains"] = exclude_domains return exa.search_and_contents(query, **kwargs) response = await loop.run_in_executor(None, exa_search_fn) # Format the response to match the expected output structure formatted_results = [] seen_urls = set() # Track URLs to avoid duplicates # Helper function to safely get value regardless of if item is dict or object def get_value(item, key, default=None): if isinstance(item, dict): return item.get(key, default) else: return getattr(item, key, default) if hasattr(item, key) else default # Access the results from the SearchResponse object results_list = get_value(response, 'results', []) # First process all main results for result in results_list: # Get the score with a default of 0.0 if it's None or not present score = get_value(result, 'score', 0.0) # Combine summary and text for content if both are available text_content = get_value(result, 'text', '') summary_content = get_value(result, 'summary', '') content = text_content if summary_content: if content: content = f"{summary_content}\n\n{content}" else: content = summary_content title = get_value(result, 'title', '') url = get_value(result, 'url', '') # Skip if we've seen this URL before (removes duplicate entries) if url in seen_urls: continue seen_urls.add(url) # Main result entry result_entry = { "title": title, "url": url, "content": content, "score": score, "raw_content": text_content } # Add the main result to the formatted results formatted_results.append(result_entry) # Now process subpages only if the subpages parameter was provided if subpages is not None: for result in results_list: subpages_list = get_value(result, 'subpages', []) for subpage in subpages_list: # Get subpage score subpage_score = get_value(subpage, 'score', 0.0) # Combine summary and text for subpage content subpage_text = get_value(subpage, 'text', '') subpage_summary = get_value(subpage, 'summary', '') subpage_content = subpage_text if subpage_summary: if subpage_content: subpage_content = f"{subpage_summary}\n\n{subpage_content}" else: subpage_content = subpage_summary subpage_url = get_value(subpage, 'url', '') # Skip if we've seen this URL before if subpage_url in seen_urls: continue seen_urls.add(subpage_url) formatted_results.append({ "title": get_value(subpage, 'title', ''), "url": subpage_url, "content": subpage_content, "score": subpage_score, "raw_content": subpage_text }) # Collect images if available (only from main results to avoid duplication) images = [] for result in results_list: image = get_value(result, 'image') if image and image not in images: # Avoid duplicate images images.append(image) return { "query": query, "follow_up_questions": None, "answer": None, "images": images, "results": formatted_results } # Process all queries sequentially with delay to respect rate limit search_docs = [] for i, query in enumerate(search_queries): try: # Add delay between requests (0.25s = 4 requests per second, well within the 5/s limit) if i > 0: # Don't delay the first request await asyncio.sleep(0.25) result = await process_query(query) search_docs.append(result) except Exception as e: # Handle exceptions gracefully print(f"Error processing query '{query}': {str(e)}") # Add a placeholder result for failed queries to maintain index alignment search_docs.append({ "query": query, "follow_up_questions": None, "answer": None, "images": [], "results": [], "error": str(e) }) # Add additional delay if we hit a rate limit error if "429" in str(e): print("Rate limit exceeded. Adding additional delay...") await asyncio.sleep(1.0) # Add a longer delay if we hit a rate limit return search_docs
Search the web using the Exa API. Args: search_queries (List[SearchQuery]): List of search queries to process max_characters (int, optional): Maximum number of characters to retrieve for each result's raw content. If None, the text parameter will be set to True instead of an object. num_results (int): Number of search results per query. Defaults to 5. include_domains (List[str], optional): List of domains to include in search results. When specified, only results from these domains will be returned. exclude_domains (List[str], optional): List of domains to exclude from search results. Cannot be used together with include_domains. subpages (int, optional): Number of subpages to retrieve per result. If None, subpages are not retrieved. Returns: List[dict]: List of search responses from Exa API, one per query. Each response has format: { 'query': str, # The original search query 'follow_up_questions': None, 'answer': None, 'images': list, 'results': [ # List of search results { 'title': str, # Title of the search result 'url': str, # URL of the result 'content': str, # Summary/snippet of content 'score': float, # Relevance score 'raw_content': str|None # Full content or None for secondary citations }, ... ] }
exa_search
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
async def arxiv_search_async(search_queries, load_max_docs=5, get_full_documents=True, load_all_available_meta=True): """ Performs concurrent searches on arXiv using the ArxivRetriever. Args: search_queries (List[str]): List of search queries or article IDs load_max_docs (int, optional): Maximum number of documents to return per query. Default is 5. get_full_documents (bool, optional): Whether to fetch full text of documents. Default is True. load_all_available_meta (bool, optional): Whether to load all available metadata. Default is True. Returns: List[dict]: List of search responses from arXiv, one per query. Each response has format: { 'query': str, # The original search query 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [ # List of search results { 'title': str, # Title of the paper 'url': str, # URL (Entry ID) of the paper 'content': str, # Formatted summary with metadata 'score': float, # Relevance score (approximated) 'raw_content': str|None # Full paper content if available }, ... ] } """ async def process_single_query(query): try: # Create retriever for each query retriever = ArxivRetriever( load_max_docs=load_max_docs, get_full_documents=get_full_documents, load_all_available_meta=load_all_available_meta ) # Run the synchronous retriever in a thread pool loop = asyncio.get_event_loop() docs = await loop.run_in_executor(None, lambda: retriever.invoke(query)) results = [] # Assign decreasing scores based on the order base_score = 1.0 score_decrement = 1.0 / (len(docs) + 1) if docs else 0 for i, doc in enumerate(docs): # Extract metadata metadata = doc.metadata # Use entry_id as the URL (this is the actual arxiv link) url = metadata.get('entry_id', '') # Format content with all useful metadata content_parts = [] # Primary information if 'Summary' in metadata: content_parts.append(f"Summary: {metadata['Summary']}") if 'Authors' in metadata: content_parts.append(f"Authors: {metadata['Authors']}") # Add publication information published = metadata.get('Published') published_str = published.isoformat() if hasattr(published, 'isoformat') else str(published) if published else '' if published_str: content_parts.append(f"Published: {published_str}") # Add additional metadata if available if 'primary_category' in metadata: content_parts.append(f"Primary Category: {metadata['primary_category']}") if 'categories' in metadata and metadata['categories']: content_parts.append(f"Categories: {', '.join(metadata['categories'])}") if 'comment' in metadata and metadata['comment']: content_parts.append(f"Comment: {metadata['comment']}") if 'journal_ref' in metadata and metadata['journal_ref']: content_parts.append(f"Journal Reference: {metadata['journal_ref']}") if 'doi' in metadata and metadata['doi']: content_parts.append(f"DOI: {metadata['doi']}") # Get PDF link if available in the links pdf_link = "" if 'links' in metadata and metadata['links']: for link in metadata['links']: if 'pdf' in link: pdf_link = link content_parts.append(f"PDF: {pdf_link}") break # Join all content parts with newlines content = "\n".join(content_parts) result = { 'title': metadata.get('Title', ''), 'url': url, # Using entry_id as the URL 'content': content, 'score': base_score - (i * score_decrement), 'raw_content': doc.page_content if get_full_documents else None } results.append(result) return { 'query': query, 'follow_up_questions': None, 'answer': None, 'images': [], 'results': results } except Exception as e: # Handle exceptions gracefully print(f"Error processing arXiv query '{query}': {str(e)}") return { 'query': query, 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [], 'error': str(e) } # Process queries sequentially with delay to respect arXiv rate limit (1 request per 3 seconds) search_docs = [] for i, query in enumerate(search_queries): try: # Add delay between requests (3 seconds per ArXiv's rate limit) if i > 0: # Don't delay the first request await asyncio.sleep(3.0) result = await process_single_query(query) search_docs.append(result) except Exception as e: # Handle exceptions gracefully print(f"Error processing arXiv query '{query}': {str(e)}") search_docs.append({ 'query': query, 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [], 'error': str(e) }) # Add additional delay if we hit a rate limit error if "429" in str(e) or "Too Many Requests" in str(e): print("ArXiv rate limit exceeded. Adding additional delay...") await asyncio.sleep(5.0) # Add a longer delay if we hit a rate limit return search_docs
Performs concurrent searches on arXiv using the ArxivRetriever. Args: search_queries (List[str]): List of search queries or article IDs load_max_docs (int, optional): Maximum number of documents to return per query. Default is 5. get_full_documents (bool, optional): Whether to fetch full text of documents. Default is True. load_all_available_meta (bool, optional): Whether to load all available metadata. Default is True. Returns: List[dict]: List of search responses from arXiv, one per query. Each response has format: { 'query': str, # The original search query 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [ # List of search results { 'title': str, # Title of the paper 'url': str, # URL (Entry ID) of the paper 'content': str, # Formatted summary with metadata 'score': float, # Relevance score (approximated) 'raw_content': str|None # Full paper content if available }, ... ] }
arxiv_search_async
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
async def linkup_search(search_queries, depth: Optional[str] = "standard"): """ Performs concurrent web searches using the Linkup API. Args: search_queries (List[SearchQuery]): List of search queries to process depth (str, optional): "standard" (default) or "deep". More details here https://docs.linkup.so/pages/documentation/get-started/concepts Returns: List[dict]: List of search responses from Linkup API, one per query. Each response has format: { 'results': [ # List of search results { 'title': str, # Title of the search result 'url': str, # URL of the result 'content': str, # Summary/snippet of content }, ... ] } """ client = LinkupClient() search_tasks = [] for query in search_queries: search_tasks.append( client.async_search( query, depth, output_type="searchResults", ) ) search_results = [] for response in await asyncio.gather(*search_tasks): search_results.append( { "results": [ {"title": result.name, "url": result.url, "content": result.content} for result in response.results ], } ) return search_results
Performs concurrent web searches using the Linkup API. Args: search_queries (List[SearchQuery]): List of search queries to process depth (str, optional): "standard" (default) or "deep". More details here https://docs.linkup.so/pages/documentation/get-started/concepts Returns: List[dict]: List of search responses from Linkup API, one per query. Each response has format: { 'results': [ # List of search results { 'title': str, # Title of the search result 'url': str, # URL of the result 'content': str, # Summary/snippet of content }, ... ] }
linkup_search
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
async def google_search_async(search_queries: Union[str, List[str]], max_results: int = 5, include_raw_content: bool = True): """ Performs concurrent web searches using Google. Uses Google Custom Search API if environment variables are set, otherwise falls back to web scraping. Args: search_queries (List[str]): List of search queries to process max_results (int): Maximum number of results to return per query include_raw_content (bool): Whether to fetch full page content Returns: List[dict]: List of search responses from Google, one per query """ # Check for API credentials from environment variables api_key = os.environ.get("GOOGLE_API_KEY") cx = os.environ.get("GOOGLE_CX") use_api = bool(api_key and cx) # Handle case where search_queries is a single string if isinstance(search_queries, str): search_queries = [search_queries] # Define user agent generator def get_useragent(): """Generates a random user agent string.""" lynx_version = f"Lynx/{random.randint(2, 3)}.{random.randint(8, 9)}.{random.randint(0, 2)}" libwww_version = f"libwww-FM/{random.randint(2, 3)}.{random.randint(13, 15)}" ssl_mm_version = f"SSL-MM/{random.randint(1, 2)}.{random.randint(3, 5)}" openssl_version = f"OpenSSL/{random.randint(1, 3)}.{random.randint(0, 4)}.{random.randint(0, 9)}" return f"{lynx_version} {libwww_version} {ssl_mm_version} {openssl_version}" # Create executor for running synchronous operations executor = None if use_api else concurrent.futures.ThreadPoolExecutor(max_workers=5) # Use a semaphore to limit concurrent requests semaphore = asyncio.Semaphore(5 if use_api else 2) async def search_single_query(query): async with semaphore: try: results = [] # API-based search if use_api: # The API returns up to 10 results per request for start_index in range(1, max_results + 1, 10): # Calculate how many results to request in this batch num = min(10, max_results - (start_index - 1)) # Make request to Google Custom Search API params = { 'q': query, 'key': api_key, 'cx': cx, 'start': start_index, 'num': num } print(f"Requesting {num} results for '{query}' from Google API...") async with aiohttp.ClientSession() as session: async with session.get('https://www.googleapis.com/customsearch/v1', params=params) as response: if response.status != 200: error_text = await response.text() print(f"API error: {response.status}, {error_text}") break data = await response.json() # Process search results for item in data.get('items', []): result = { "title": item.get('title', ''), "url": item.get('link', ''), "content": item.get('snippet', ''), "score": None, "raw_content": item.get('snippet', '') } results.append(result) # Respect API quota with a small delay await asyncio.sleep(0.2) # If we didn't get a full page of results, no need to request more if not data.get('items') or len(data.get('items', [])) < num: break # Web scraping based search else: # Add delay between requests await asyncio.sleep(0.5 + random.random() * 1.5) print(f"Scraping Google for '{query}'...") # Define scraping function def google_search(query, max_results): try: lang = "en" safe = "active" start = 0 fetched_results = 0 fetched_links = set() search_results = [] while fetched_results < max_results: # Send request to Google resp = requests.get( url="https://www.google.com/search", headers={ "User-Agent": get_useragent(), "Accept": "*/*" }, params={ "q": query, "num": max_results + 2, "hl": lang, "start": start, "safe": safe, }, cookies = { 'CONSENT': 'PENDING+987', # Bypasses the consent page 'SOCS': 'CAESHAgBEhIaAB', } ) resp.raise_for_status() # Parse results soup = BeautifulSoup(resp.text, "html.parser") result_block = soup.find_all("div", class_="ezO2md") new_results = 0 for result in result_block: link_tag = result.find("a", href=True) title_tag = link_tag.find("span", class_="CVA68e") if link_tag else None description_tag = result.find("span", class_="FrIlee") if link_tag and title_tag and description_tag: link = unquote(link_tag["href"].split("&")[0].replace("/url?q=", "")) if link in fetched_links: continue fetched_links.add(link) title = title_tag.text description = description_tag.text # Store result in the same format as the API results search_results.append({ "title": title, "url": link, "content": description, "score": None, "raw_content": description }) fetched_results += 1 new_results += 1 if fetched_results >= max_results: break if new_results == 0: break start += 10 time.sleep(1) # Delay between pages return search_results except Exception as e: print(f"Error in Google search for '{query}': {str(e)}") return [] # Execute search in thread pool loop = asyncio.get_running_loop() search_results = await loop.run_in_executor( executor, lambda: google_search(query, max_results) ) # Process the results results = search_results # If requested, fetch full page content asynchronously (for both API and web scraping) if include_raw_content and results: content_semaphore = asyncio.Semaphore(3) async with aiohttp.ClientSession() as session: fetch_tasks = [] async def fetch_full_content(result): async with content_semaphore: url = result['url'] headers = { 'User-Agent': get_useragent(), 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' } try: await asyncio.sleep(0.2 + random.random() * 0.6) async with session.get(url, headers=headers, timeout=10) as response: if response.status == 200: # Check content type to handle binary files content_type = response.headers.get('Content-Type', '').lower() # Handle PDFs and other binary files if 'application/pdf' in content_type or 'application/octet-stream' in content_type: # For PDFs, indicate that content is binary and not parsed result['raw_content'] = f"[Binary content: {content_type}. Content extraction not supported for this file type.]" else: try: # Try to decode as UTF-8 with replacements for non-UTF8 characters html = await response.text(errors='replace') soup = BeautifulSoup(html, 'html.parser') result['raw_content'] = soup.get_text() except UnicodeDecodeError as ude: # Fallback if we still have decoding issues result['raw_content'] = f"[Could not decode content: {str(ude)}]" except Exception as e: print(f"Warning: Failed to fetch content for {url}: {str(e)}") result['raw_content'] = f"[Error fetching content: {str(e)}]" return result for result in results: fetch_tasks.append(fetch_full_content(result)) updated_results = await asyncio.gather(*fetch_tasks) results = updated_results print(f"Fetched full content for {len(results)} results") return { "query": query, "follow_up_questions": None, "answer": None, "images": [], "results": results } except Exception as e: print(f"Error in Google search for query '{query}': {str(e)}") return { "query": query, "follow_up_questions": None, "answer": None, "images": [], "results": [] } try: # Create tasks for all search queries search_tasks = [search_single_query(query) for query in search_queries] # Execute all searches concurrently search_results = await asyncio.gather(*search_tasks) return search_results finally: # Only shut down executor if it was created if executor: executor.shutdown(wait=False)
Performs concurrent web searches using Google. Uses Google Custom Search API if environment variables are set, otherwise falls back to web scraping. Args: search_queries (List[str]): List of search queries to process max_results (int): Maximum number of results to return per query include_raw_content (bool): Whether to fetch full page content Returns: List[dict]: List of search responses from Google, one per query
google_search_async
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
async def scrape_pages(titles: List[str], urls: List[str]) -> str: """ Scrapes content from a list of URLs and formats it into a readable markdown document. This function: 1. Takes a list of page titles and URLs 2. Makes asynchronous HTTP requests to each URL 3. Converts HTML content to markdown 4. Formats all content with clear source attribution Args: titles (List[str]): A list of page titles corresponding to each URL urls (List[str]): A list of URLs to scrape content from Returns: str: A formatted string containing the full content of each page in markdown format, with clear section dividers and source attribution """ # Create an async HTTP client async with httpx.AsyncClient(follow_redirects=True, timeout=30.0) as client: pages = [] # Fetch each URL and convert to markdown for url in urls: try: # Fetch the content response = await client.get(url) response.raise_for_status() # Convert HTML to markdown if successful if response.status_code == 200: # Handle different content types content_type = response.headers.get('Content-Type', '') if 'text/html' in content_type: # Convert HTML to markdown markdown_content = markdownify(response.text) pages.append(markdown_content) else: # For non-HTML content, just mention the content type pages.append(f"Content type: {content_type} (not converted to markdown)") else: pages.append(f"Error: Received status code {response.status_code}") except Exception as e: # Handle any exceptions during fetch pages.append(f"Error fetching URL: {str(e)}") # Create formatted output formatted_output = f"Search results: \n\n" for i, (title, url, page) in enumerate(zip(titles, urls, pages)): formatted_output += f"\n\n--- SOURCE {i+1}: {title} ---\n" formatted_output += f"URL: {url}\n\n" formatted_output += f"FULL CONTENT:\n {page}" formatted_output += "\n\n" + "-" * 80 + "\n" return formatted_output
Scrapes content from a list of URLs and formats it into a readable markdown document. This function: 1. Takes a list of page titles and URLs 2. Makes asynchronous HTTP requests to each URL 3. Converts HTML content to markdown 4. Formats all content with clear source attribution Args: titles (List[str]): A list of page titles corresponding to each URL urls (List[str]): A list of URLs to scrape content from Returns: str: A formatted string containing the full content of each page in markdown format, with clear section dividers and source attribution
scrape_pages
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
async def tavily_search( queries: List[str], max_results: Annotated[int, InjectedToolArg] = 5, topic: Annotated[Literal["general", "news", "finance"], InjectedToolArg] = "general", config: RunnableConfig = None ) -> str: """ Fetches results from Tavily search API. Args: queries (List[str]): List of search queries max_results (int): Maximum number of results to return topic (Literal['general', 'news', 'finance']): Topic to filter results by Returns: str: A formatted string of search results """ # Use tavily_search_async with include_raw_content=True to get content directly search_results = await tavily_search_async( queries, max_results=max_results, topic=topic, include_raw_content=True ) # Format the search results directly using the raw_content already provided formatted_output = f"Search results: \n\n" # Deduplicate results by URL unique_results = {} for response in search_results: for result in response['results']: url = result['url'] if url not in unique_results: unique_results[url] = {**result, "query": response['query']} async def noop(): return None configurable = Configuration.from_runnable_config(config) max_char_to_include = 30_000 # TODO: share this behavior across all search implementations / tools if configurable.process_search_results == "summarize": if configurable.summarization_model_provider == "anthropic": extra_kwargs = {"betas": ["extended-cache-ttl-2025-04-11"]} else: extra_kwargs = {} summarization_model = init_chat_model( model=configurable.summarization_model, model_provider=configurable.summarization_model_provider, **extra_kwargs ) summarization_tasks = [ noop() if not result.get("raw_content") else summarize_webpage(summarization_model, result['raw_content'][:max_char_to_include]) for result in unique_results.values() ] summaries = await asyncio.gather(*summarization_tasks) unique_results = { url: {'title': result['title'], 'content': result['content'] if summary is None else summary} for url, result, summary in zip(unique_results.keys(), unique_results.values(), summaries) } elif configurable.process_search_results == "split_and_rerank": embeddings = init_embeddings("openai:text-embedding-3-small") results_by_query = itertools.groupby(unique_results.values(), key=lambda x: x['query']) all_retrieved_docs = [] for query, query_results in results_by_query: retrieved_docs = split_and_rerank_search_results(embeddings, query, query_results) all_retrieved_docs.extend(retrieved_docs) stitched_docs = stitch_documents_by_url(all_retrieved_docs) unique_results = { doc.metadata['url']: {'title': doc.metadata['title'], 'content': doc.page_content} for doc in stitched_docs } # Format the unique results for i, (url, result) in enumerate(unique_results.items()): formatted_output += f"\n\n--- SOURCE {i+1}: {result['title']} ---\n" formatted_output += f"URL: {url}\n\n" formatted_output += f"SUMMARY:\n{result['content']}\n\n" if result.get('raw_content'): formatted_output += f"FULL CONTENT:\n{result['raw_content'][:max_char_to_include]}" # Limit content size formatted_output += "\n\n" + "-" * 80 + "\n" if unique_results: return formatted_output else: return "No valid search results found. Please try different search queries or use a different search API."
Fetches results from Tavily search API. Args: queries (List[str]): List of search queries max_results (int): Maximum number of results to return topic (Literal['general', 'news', 'finance']): Topic to filter results by Returns: str: A formatted string of search results
tavily_search
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
async def azureaisearch_search(queries: List[str], max_results: int = 5, topic: str = "general") -> str: """ Fetches results from Azure AI Search API. Args: queries (List[str]): List of search queries Returns: str: A formatted string of search results """ # Use azureaisearch_search_async with include_raw_content=True to get content directly search_results = await azureaisearch_search_async( queries, max_results=max_results, topic=topic, include_raw_content=True ) # Format the search results directly using the raw_content already provided formatted_output = f"Search results: \n\n" # Deduplicate results by URL unique_results = {} for response in search_results: for result in response['results']: url = result['url'] if url not in unique_results: unique_results[url] = result # Format the unique results for i, (url, result) in enumerate(unique_results.items()): formatted_output += f"\n\n--- SOURCE {i+1}: {result['title']} ---\n" formatted_output += f"URL: {url}\n\n" formatted_output += f"SUMMARY:\n{result['content']}\n\n" if result.get('raw_content'): formatted_output += f"FULL CONTENT:\n{result['raw_content'][:30000]}" # Limit content size formatted_output += "\n\n" + "-" * 80 + "\n" if unique_results: return formatted_output else: return "No valid search results found. Please try different search queries or use a different search API."
Fetches results from Azure AI Search API. Args: queries (List[str]): List of search queries Returns: str: A formatted string of search results
azureaisearch_search
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
async def select_and_execute_search(search_api: str, query_list: list[str], params_to_pass: dict) -> str: """Select and execute the appropriate search API. Args: search_api: Name of the search API to use query_list: List of search queries to execute params_to_pass: Parameters to pass to the search API Returns: Formatted string containing search results Raises: ValueError: If an unsupported search API is specified """ if search_api == "tavily": # Tavily search tool used with both workflow and agent # and returns a formatted source string return await tavily_search.ainvoke({'queries': query_list, **params_to_pass}) elif search_api == "duckduckgo": # DuckDuckGo search tool used with both workflow and agent return await duckduckgo_search.ainvoke({'search_queries': query_list}) elif search_api == "perplexity": search_results = perplexity_search(query_list, **params_to_pass) elif search_api == "exa": search_results = await exa_search(query_list, **params_to_pass) elif search_api == "arxiv": search_results = await arxiv_search_async(query_list, **params_to_pass) elif search_api == "pubmed": search_results = await pubmed_search_async(query_list, **params_to_pass) elif search_api == "linkup": search_results = await linkup_search(query_list, **params_to_pass) elif search_api == "googlesearch": search_results = await google_search_async(query_list, **params_to_pass) elif search_api == "azureaisearch": search_results = await azureaisearch_search_async(query_list, **params_to_pass) else: raise ValueError(f"Unsupported search API: {search_api}") return deduplicate_and_format_sources(search_results, max_tokens_per_source=4000, deduplication_strategy="keep_first")
Select and execute the appropriate search API. Args: search_api: Name of the search API to use query_list: List of search queries to execute params_to_pass: Parameters to pass to the search API Returns: Formatted string containing search results Raises: ValueError: If an unsupported search API is specified
select_and_execute_search
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
async def load_mcp_server_config(path: str) -> dict: """Load MCP server configuration from a file.""" def _load(): with open(path, "r") as f: config = json.load(f) return config config = await asyncio.to_thread(_load) return config
Load MCP server configuration from a file.
load_mcp_server_config
python
langchain-ai/open_deep_research
src/open_deep_research/utils.py
https://github.com/langchain-ai/open_deep_research/blob/master/src/open_deep_research/utils.py
MIT
def pytest_addoption(parser): """Add command-line options to pytest.""" parser.addoption("--research-agent", action="store", help="Agent type: multi_agent or graph") parser.addoption("--search-api", action="store", help="Search API to use") parser.addoption("--eval-model", action="store", help="Model for evaluation") parser.addoption("--supervisor-model", action="store", help="Model for supervisor agent") parser.addoption("--researcher-model", action="store", help="Model for researcher agent") parser.addoption("--planner-provider", action="store", help="Provider for planner model") parser.addoption("--planner-model", action="store", help="Model for planning") parser.addoption("--writer-provider", action="store", help="Provider for writer model") parser.addoption("--writer-model", action="store", help="Model for writing") parser.addoption("--max-search-depth", action="store", help="Maximum search depth")
Add command-line options to pytest.
pytest_addoption
python
langchain-ai/open_deep_research
tests/conftest.py
https://github.com/langchain-ai/open_deep_research/blob/master/tests/conftest.py
MIT
def add_model_configs(cmd, args): """Add model configuration arguments to command.""" if args.supervisor_model: cmd.append(f"--supervisor-model={args.supervisor_model}") if args.researcher_model: cmd.append(f"--researcher-model={args.researcher_model}") if args.planner_provider: cmd.append(f"--planner-provider={args.planner_provider}") if args.planner_model: cmd.append(f"--planner-model={args.planner_model}") if args.writer_provider: cmd.append(f"--writer-provider={args.writer_provider}") if args.writer_model: cmd.append(f"--writer-model={args.writer_model}") if args.eval_model: cmd.append(f"--eval-model={args.eval_model}") if args.search_api: cmd.append(f"--search-api={args.search_api}") if args.max_search_depth: cmd.append(f"--max-search-depth={args.max_search_depth}")
Add model configuration arguments to command.
add_model_configs
python
langchain-ai/open_deep_research
tests/run_test.py
https://github.com/langchain-ai/open_deep_research/blob/master/tests/run_test.py
MIT
def get_evaluation_llm(eval_model=None): """Create and return an evaluation LLM. Args: eval_model: Model identifier to use for evaluation Format: "provider:model_name" (e.g., "anthropic:claude-3-7-sonnet-latest") If None, it will use environment variable or default Returns: Structured LLM for generating evaluation grades """ # Use provided model, then environment variable, then default model_to_use = eval_model or os.environ.get("EVAL_MODEL", "anthropic:claude-3-7-sonnet-latest") criteria_eval_llm = init_chat_model(model_to_use) return criteria_eval_llm.with_structured_output(CriteriaGrade)
Create and return an evaluation LLM. Args: eval_model: Model identifier to use for evaluation Format: "provider:model_name" (e.g., "anthropic:claude-3-7-sonnet-latest") If None, it will use environment variable or default Returns: Structured LLM for generating evaluation grades
get_evaluation_llm
python
langchain-ai/open_deep_research
tests/test_report_quality.py
https://github.com/langchain-ai/open_deep_research/blob/master/tests/test_report_quality.py
MIT
def models(request, research_agent): """Get model configurations based on agent type.""" if research_agent == "multi_agent": return { "supervisor_model": ( request.config.getoption("--supervisor-model") or os.environ.get("SUPERVISOR_MODEL", "anthropic:claude-3-7-sonnet-latest") ), "researcher_model": ( request.config.getoption("--researcher-model") or os.environ.get("RESEARCHER_MODEL", "anthropic:claude-3-5-sonnet-latest") ), } else: # graph agent return { "planner_provider": ( request.config.getoption("--planner-provider") or os.environ.get("PLANNER_PROVIDER", "anthropic") ), "planner_model": ( request.config.getoption("--planner-model") or os.environ.get("PLANNER_MODEL", "claude-3-7-sonnet-latest") ), "writer_provider": ( request.config.getoption("--writer-provider") or os.environ.get("WRITER_PROVIDER", "anthropic") ), "writer_model": ( request.config.getoption("--writer-model") or os.environ.get("WRITER_MODEL", "claude-3-5-sonnet-latest") ), "max_search_depth": int( request.config.getoption("--max-search-depth") or os.environ.get("MAX_SEARCH_DEPTH", "2") ), }
Get model configurations based on agent type.
models
python
langchain-ai/open_deep_research
tests/test_report_quality.py
https://github.com/langchain-ai/open_deep_research/blob/master/tests/test_report_quality.py
MIT
def test_response_criteria_evaluation(research_agent, search_api, models, eval_model): """Test if a report meets the specified quality criteria.""" console.print(Panel.fit( f"[bold blue]Testing {research_agent} report generation with {search_api} search[/bold blue]", title="Test Configuration" )) # Create a table for model configuration models_table = Table(title="Model Configuration") models_table.add_column("Parameter", style="cyan") models_table.add_column("Value", style="green") for key, value in models.items(): models_table.add_row(key, str(value)) models_table.add_row("eval_model", eval_model) console.print(models_table) # Log inputs to LangSmith t.log_inputs({ "agent_type": research_agent, "search_api": search_api, "models": models, "eval_model": eval_model, "test": "report_quality_evaluation", "description": f"Testing report quality for {research_agent} with {search_api}" }) # Run the appropriate agent based on the parameter if research_agent == "multi_agent": # Initial messages initial_msg = [{"role": "user", "content": "Give me a high-level overview of MCP (model context protocol). Keep the report to 3 main body sections. One section on the origins of MPC, one section on interesting examples of MCP servers, and one section on the future roadmap for MCP. Report should be written for a developer audience."}] # Checkpointer for the multi-agent approach checkpointer = MemorySaver() graph = supervisor_builder.compile(checkpointer=checkpointer) # Create configuration with the provided parameters config = { "thread_id": str(uuid.uuid4()), "search_api": search_api, "supervisor_model": models.get("supervisor_model"), "researcher_model": models.get("researcher_model"), "ask_for_clarification": False, # Don't ask for clarification from the user and proceed to write the report "process_search_results": "summarize", # Optionally summarize } thread_config = {"configurable": config} # Run the workflow with asyncio asyncio.run(graph.ainvoke({"messages": initial_msg}, config=thread_config)) # Get the final state once both invocations are complete final_state = graph.get_state(thread_config) report = final_state.values.get('final_report', "No report generated") console.print(f"[bold green]Report generated with length: {len(report)} characters[/bold green]") elif research_agent == "graph": # Topic query topic_query = "Give me a high-level overview of MCP (model context protocol). Keep the report to 3 main body sections. One section on the origins of MPC, one section on interesting examples of MCP servers, and one section on the future roadmap for MCP. Report should be written for a developer audience." # Checkpointer for the graph approach checkpointer = MemorySaver() graph = builder.compile(checkpointer=checkpointer) # Configuration for the graph agent with provided parameters thread = {"configurable": { "thread_id": str(uuid.uuid4()), "search_api": search_api, "planner_provider": models.get("planner_provider", "anthropic"), "planner_model": models.get("planner_model", "claude-3-7-sonnet-latest"), "writer_provider": models.get("writer_provider", "anthropic"), "writer_model": models.get("writer_model", "claude-3-5-sonnet-latest"), "max_search_depth": models.get("max_search_depth", 2), }} async def run_graph_agent(thread): # Run the graph until the interruption async for event in graph.astream({"topic":topic_query}, thread, stream_mode="updates"): if '__interrupt__' in event: interrupt_value = event['__interrupt__'][0].value # Pass True to approve the report plan and proceed to write the report async for event in graph.astream(Command(resume=True), thread, stream_mode="updates"): # console.print(f"[dim]{event}[/dim]") # console.print() None final_state = graph.get_state(thread) report = final_state.values.get('final_report', "No report generated") return report report = asyncio.run(run_graph_agent(thread)) # Get evaluation LLM using the specified model criteria_eval_structured_llm = get_evaluation_llm(eval_model) # Evaluate the report against our quality criteria eval_result = criteria_eval_structured_llm.invoke([ {"role": "system", "content": RESPONSE_CRITERIA_SYSTEM_PROMPT}, {"role": "user", "content": f"""\n\n Report: \n\n{report}\n\nEvaluate whether the report meets the criteria and provide detailed justification for your evaluation."""} ]) # Extract section headers for analysis import re section_headers = re.findall(r'##\s+([^\n]+)', report) # Display the generated report console.print(Panel( Markdown(report), title="Generated Report", border_style="blue" )) # Create evaluation results display result_color = "green" if eval_result.grade else "red" result_text = "PASSED" if eval_result.grade else "FAILED" console.print(Panel.fit( f"[bold {result_color}]{result_text}[/bold {result_color}]", title="Evaluation Result" )) # Create sections table sections_table = Table(title="Report Structure Analysis") sections_table.add_column("Section", style="cyan") sections_table.add_column("Header", style="yellow") for i, header in enumerate(section_headers, 1): sections_table.add_row(f"Section {i}", header) console.print(sections_table) console.print(f"[bold]Total sections found: {len(section_headers)}[/bold]") # Display justification in a panel console.print(Panel( eval_result.justification, title="Evaluation Justification", border_style="yellow" )) # Log outputs to LangSmith t.log_outputs({ "report": report, "evaluation_result": eval_result.grade, "justification": eval_result.justification, "report_length": len(report), "section_count": len(section_headers), "section_headers": section_headers, }) # Test passes if the evaluation criteria are met assert eval_result.grade
Test if a report meets the specified quality criteria.
test_response_criteria_evaluation
python
langchain-ai/open_deep_research
tests/test_report_quality.py
https://github.com/langchain-ai/open_deep_research/blob/master/tests/test_report_quality.py
MIT
async def generate_report_multi_agent( messages: list[MessageLikeRepresentation], process_search_results: Literal["summarize", "split_and_rerank"] | None = None, include_source: bool = True, summarization_model: str = summarization_model, summarization_model_provider: str = summarization_model_provider, supervisor_model: str = supervisor_model, researcher_model: str = researcher_model, ): """Generate a report using the open deep research multi-agent architecture""" graph = supervisor_builder.compile() config = {"configurable": {}} if include_source: config["configurable"]["include_source_str"] = True if process_search_results: config["configurable"]["process_search_results"] = process_search_results config["configurable"]["summarization_model"] = summarization_model config["configurable"]["summarization_model_provider"] = summarization_model_provider config["configurable"]["supervisor_model"] = supervisor_model config["configurable"]["researcher_model"] = researcher_model final_state = await graph.ainvoke( # this is a hack # TODO: Find workaround at some point {"messages": messages + [{"role": "user", "content": "Generate the report now and don't ask any more follow-up questions"}]}, config ) return { "messages": [ {"role": "assistant", "content": final_state["final_report"]} ] }
Generate a report using the open deep research multi-agent architecture
generate_report_multi_agent
python
langchain-ai/open_deep_research
tests/evals/run_evaluate.py
https://github.com/langchain-ai/open_deep_research/blob/master/tests/evals/run_evaluate.py
MIT
async def generate_report_workflow( query: str, process_search_results: Literal["summarize", "split_and_rerank"] | None = None, include_source: bool = True ): """Generate a report using the open deep research workflow""" graph = builder.compile(checkpointer=MemorySaver()) config = { "configurable": { "thread_id": str(uuid.uuid4()), } } if include_source: config["configurable"]["include_source_str"] = True if process_search_results: config["configurable"]["process_search_results"] = process_search_results # Run the graph until the interruption await graph.ainvoke( {"topic": query}, config ) # Pass True to approve the report plan final_state = await graph.ainvoke(Command(resume=True), config) return final_state
Generate a report using the open deep research workflow
generate_report_workflow
python
langchain-ai/open_deep_research
tests/evals/target.py
https://github.com/langchain-ai/open_deep_research/blob/master/tests/evals/target.py
MIT
async def generate_report_multi_agent( messages: list[MessageLikeRepresentation], process_search_results: Literal["summarize", "split_and_rerank"] | None = None, include_source: bool = True ): """Generate a report using the open deep research multi-agent architecture""" graph = supervisor_builder.compile() config = {"configurable": {}} if include_source: config["configurable"]["include_source_str"] = True if process_search_results: config["configurable"]["process_search_results"] = process_search_results final_state = await graph.ainvoke( # this is a hack {"messages": messages + [{"role": "user", "content": "Generate the report now and don't ask any more follow-up questions"}]}, config ) return final_state
Generate a report using the open deep research multi-agent architecture
generate_report_multi_agent
python
langchain-ai/open_deep_research
tests/evals/target.py
https://github.com/langchain-ai/open_deep_research/blob/master/tests/evals/target.py
MIT
def write_file_prefix(f: IO[Any], interpreter: str) -> None: """Write a shebang line. :param f: An open file handle. :param interpreter: A path to a python interpreter. """ # if the provided path is too long for a shebang we should error out if len(interpreter) > BINPRM_BUF_SIZE: sys.exit(BINPRM_ERROR) f.write(b"#!" + interpreter.encode(sys.getfilesystemencoding()) + b"\n")
Write a shebang line. :param f: An open file handle. :param interpreter: A path to a python interpreter.
write_file_prefix
python
linkedin/shiv
src/shiv/builder.py
https://github.com/linkedin/shiv/blob/master/src/shiv/builder.py
BSD-2-Clause
def write_to_zipapp( archive: zipfile.ZipFile, arcname: str, data: bytes, date_time: Tuple[int, int, int, int, int, int], compression: int, stat: Optional[os.stat_result] = None, ) -> None: """Write a file or a bytestring to a ZipFile as a separate entry and update contents_hash as a side effect.""" zinfo = zipfile.ZipInfo(arcname, date_time=date_time) zinfo.compress_type = compression if stat: zinfo.external_attr = (S_IMODE(stat.st_mode) | S_IFMT(stat.st_mode)) << 16 archive.writestr(zinfo, data)
Write a file or a bytestring to a ZipFile as a separate entry and update contents_hash as a side effect.
write_to_zipapp
python
linkedin/shiv
src/shiv/builder.py
https://github.com/linkedin/shiv/blob/master/src/shiv/builder.py
BSD-2-Clause
def rglob_follow_symlinks(path: Path, glob: str) -> Generator[Path, None, None]: """Path.rglob extended to follow symlinks, while we wait for Python 3.13.""" for p in path.rglob('*'): if p.is_symlink() and p.is_dir(): yield from chain([p], rglob_follow_symlinks(p, glob)) else: yield p
Path.rglob extended to follow symlinks, while we wait for Python 3.13.
rglob_follow_symlinks
python
linkedin/shiv
src/shiv/builder.py
https://github.com/linkedin/shiv/blob/master/src/shiv/builder.py
BSD-2-Clause
def create_archive( sources: List[Path], target: Path, interpreter: str, main: str, env: Environment, compressed: bool = True ) -> None: """Create an application archive from SOURCE. This function is a heavily modified version of stdlib's `zipapp.create_archive <https://docs.python.org/3/library/zipapp.html#zipapp.create_archive>`_ """ # Check that main has the right format. mod, sep, fn = main.partition(":") mod_ok = all(part.isidentifier() for part in mod.split(".")) fn_ok = all(part.isidentifier() for part in fn.split(".")) if not (sep == ":" and mod_ok and fn_ok): raise zipapp.ZipAppError("Invalid entry point: " + main) # Collect our timestamp data main_py = MAIN_TEMPLATE.format(module=mod, fn=fn) timestamp = datetime.strptime(env.built_at, BUILD_AT_TIMESTAMP_FORMAT).replace(tzinfo=timezone.utc).timestamp() zipinfo_datetime: Tuple[int, int, int, int, int, int] = time.gmtime(int(timestamp))[0:6] with target.open(mode="wb") as fd: # Write shebang. write_file_prefix(fd, interpreter) # Determine compression. compression = zipfile.ZIP_DEFLATED if compressed else zipfile.ZIP_STORED # Pack zipapp with dependencies. with zipfile.ZipFile(fd, "w", compression=compression) as archive: site_packages = Path("site-packages") contents_hash = hashlib.sha256() for source in sources: # Glob is known to return results in non-deterministic order. # We need to sort them by in-archive paths to ensure # that archive contents are reproducible. # # NOTE: https://github.com/linkedin/shiv/issues/236 # this special rglob function can be replaced with "rglob('*', follow_symlinks=True)" # when Python 3.13 becomes the lowest supported version for path in sorted(rglob_follow_symlinks(source, "*"), key=str): # Skip compiled files and directories (as they are not required to be present in the zip). if path.suffix == ".pyc" or path.is_dir(): continue data = path.read_bytes() # update the contents hash contents_hash.update(data) # take filenames into account as well - build_id should change if a file is moved or renamed contents_hash.update(str(path.relative_to(source)).encode()) arcname = str(site_packages / path.relative_to(source)) write_to_zipapp(archive, arcname, data, zipinfo_datetime, compression, stat=path.stat()) if env.build_id is None: # Now that we have a hash of all the source files, use it as our build id if the user did not # specify a custom one. env.build_id = contents_hash.hexdigest() # now let's add the shiv bootstrap code. bootstrap_target = Path("_bootstrap") for path, name in iter_package_files(bootstrap): data = path.read_bytes() write_to_zipapp( archive, str(bootstrap_target / name), data, zipinfo_datetime, compression, stat=path.stat(), ) # Write environment info in json file. # # The environment file contains build_id which is a SHA-256 checksum of all **site-packages** contents. # the bootstrap code, environment.json and __main__.py are not used to calculate the checksum, is it's # only used for local caching of site-packages and these files are always read from archive. write_to_zipapp(archive, "environment.json", env.to_json().encode("utf-8"), zipinfo_datetime, compression) # write __main__ write_to_zipapp(archive, "__main__.py", main_py.encode("utf-8"), zipinfo_datetime, compression) # Make pyz executable (on windows this is no-op). target.chmod(target.stat().st_mode | S_IXUSR | S_IXGRP | S_IXOTH)
Create an application archive from SOURCE. This function is a heavily modified version of stdlib's `zipapp.create_archive <https://docs.python.org/3/library/zipapp.html#zipapp.create_archive>`_
create_archive
python
linkedin/shiv
src/shiv/builder.py
https://github.com/linkedin/shiv/blob/master/src/shiv/builder.py
BSD-2-Clause
def find_entry_point(site_packages_dirs: List[Path], console_script: str) -> str: """Find a console_script in a site-packages directory. Console script metadata is stored in entry_points.txt per setuptools convention. This function searches all entry_points.txt files and returns the import string for a given console_script argument. :param site_packages_dirs: Paths to site-packages directories on disk. :param console_script: A console_script string. """ config_parser = ConfigParser() for site_packages in site_packages_dirs: # noinspection PyTypeChecker config_parser.read(site_packages.rglob("entry_points.txt")) return config_parser["console_scripts"][console_script]
Find a console_script in a site-packages directory. Console script metadata is stored in entry_points.txt per setuptools convention. This function searches all entry_points.txt files and returns the import string for a given console_script argument. :param site_packages_dirs: Paths to site-packages directories on disk. :param console_script: A console_script string.
find_entry_point
python
linkedin/shiv
src/shiv/cli.py
https://github.com/linkedin/shiv/blob/master/src/shiv/cli.py
BSD-2-Clause
def console_script_exists(site_packages_dirs: List[Path], console_script: str) -> bool: """Return true if the console script with provided name exists in one of the site-packages directories. Console script is expected to be in the 'bin' directory of site packages. :param site_packages_dirs: Paths to site-packages directories on disk. :param console_script: A console script name. """ for site_packages in site_packages_dirs: if (site_packages / "bin" / console_script).exists(): return True return False
Return true if the console script with provided name exists in one of the site-packages directories. Console script is expected to be in the 'bin' directory of site packages. :param site_packages_dirs: Paths to site-packages directories on disk. :param console_script: A console script name.
console_script_exists
python
linkedin/shiv
src/shiv/cli.py
https://github.com/linkedin/shiv/blob/master/src/shiv/cli.py
BSD-2-Clause
def copytree(src: Path, dst: Path) -> None: """A utility function for syncing directories. This function is based on shutil.copytree. In Python versions that are older than 3.8, shutil.copytree would raise FileExistsError if the "dst" directory already existed. """ # Make our target (if it doesn't already exist). dst.mkdir(parents=True, exist_ok=True) for path in src.iterdir(): # type: Path # If we encounter a subdirectory, recurse. if path.is_dir(): copytree(path, dst / path.relative_to(src)) else: shutil.copy2(str(path), str(dst / path.relative_to(src)))
A utility function for syncing directories. This function is based on shutil.copytree. In Python versions that are older than 3.8, shutil.copytree would raise FileExistsError if the "dst" directory already existed.
copytree
python
linkedin/shiv
src/shiv/cli.py
https://github.com/linkedin/shiv/blob/master/src/shiv/cli.py
BSD-2-Clause
def main( output_file: str, entry_point: Optional[str], console_script: Optional[str], python: Optional[str], site_packages: Optional[str], build_id: Optional[str], compressed: bool, compile_pyc: bool, extend_pythonpath: bool, reproducible: bool, no_modify: bool, preamble: Optional[str], root: Optional[str], pip_args: List[str], ) -> None: """ Shiv is a command line utility for building fully self-contained Python zipapps as outlined in PEP 441, but with all their dependencies included! """ if not pip_args and not site_packages: sys.exit(NO_PIP_ARGS_OR_SITE_PACKAGES) if output_file is None: sys.exit(NO_OUTFILE) # check for disallowed pip arguments for disallowed in DISALLOWED_ARGS: for supplied_arg in pip_args: if supplied_arg in disallowed: sys.exit(DISALLOWED_PIP_ARGS.format(arg=supplied_arg, reason=DISALLOWED_ARGS[disallowed])) if build_id is not None: click.secho( "Warning! You have overridden the default build-id behavior, " "executables created by shiv must have unique build IDs or unexpected behavior could occur.", fg="yellow", ) sources: List[Path] = [] with TemporaryDirectory() as tmp_site_packages: # If both site_packages and pip_args are present, we need to copy the site_packages # dir into our staging area (tmp_site_packages) as pip may modify the contents. if site_packages: if pip_args: for sp in site_packages: copytree(Path(sp), Path(tmp_site_packages)) else: sources.extend([Path(p).expanduser() for p in site_packages]) if pip_args: # Install dependencies into staged site-packages. pip.install(["--target", tmp_site_packages] + list(pip_args)) if preamble: bin_dir = Path(tmp_site_packages, "bin") bin_dir.mkdir(exist_ok=True) shutil.copy(Path(preamble).absolute(), bin_dir / Path(preamble).name) sources.append(Path(tmp_site_packages).absolute()) if no_modify: # if no_modify is specified, we need to build a map of source files and their # sha256 hashes, to be checked at runtime: hashes = {} for source in sources: for path in source.rglob("**/*.py"): hashes[str(path.relative_to(source))] = hashlib.sha256(path.read_bytes()).hexdigest() # if entry_point is a console script, get the callable and null out the console_script variable # so that we avoid modifying sys.argv in bootstrap.py if entry_point is None and console_script is not None: try: entry_point = find_entry_point(sources, console_script) except KeyError: if not console_script_exists(sources, console_script): sys.exit(NO_ENTRY_POINT.format(entry_point=console_script)) else: console_script = None # Some projects need reproducible artifacts, so they can use SOURCE_DATE_EPOCH # environment variable to specify the timestamps in the zipapp. timestamp = int( os.environ.get(SOURCE_DATE_EPOCH_ENV, SOURCE_DATE_EPOCH_DEFAULT if reproducible else time.time()) ) # create runtime environment metadata env = Environment( built_at=datetime.utcfromtimestamp(timestamp).strftime(BUILD_AT_TIMESTAMP_FORMAT), build_id=build_id, entry_point=entry_point, script=console_script, compile_pyc=compile_pyc, extend_pythonpath=extend_pythonpath, shiv_version=__version__, no_modify=no_modify, reproducible=reproducible, preamble=Path(preamble).name if preamble else None, root=root, ) if no_modify: env.hashes = hashes # create the zip builder.create_archive( sources, target=Path(output_file).expanduser(), interpreter=python or DEFAULT_SHEBANG, main="_bootstrap:bootstrap", env=env, compressed=compressed, )
Shiv is a command line utility for building fully self-contained Python zipapps as outlined in PEP 441, but with all their dependencies included!
main
python
linkedin/shiv
src/shiv/cli.py
https://github.com/linkedin/shiv/blob/master/src/shiv/cli.py
BSD-2-Clause
def main(print_as_json, pyz): """A simple utility to print debugging information about PYZ files created with ``shiv``""" zip_file = zipfile.ZipFile(pyz) data = json.loads(zip_file.read("environment.json")) if print_as_json: click.echo(json.dumps(data, indent=4, sort_keys=True)) else: click.echo() click.secho("pyz file: ", fg="green", bold=True, nl=False) click.secho(pyz, fg="white") click.echo() for key, value in data.items(): click.secho(f"{key}: ", fg="blue", bold=True, nl=False) if key == "hashes": click.secho(json.dumps(value, sort_keys=True, indent=2)) else: click.secho(f"{value}", fg="white") click.echo()
A simple utility to print debugging information about PYZ files created with ``shiv``
main
python
linkedin/shiv
src/shiv/info.py
https://github.com/linkedin/shiv/blob/master/src/shiv/info.py
BSD-2-Clause
def clean_pip_env() -> Generator[None, None, None]: """A context manager for temporarily removing 'PIP_REQUIRE_VIRTUALENV' from the environment. Since shiv installs via `--target`, we need to ignore venv requirements if they exist. """ require_venv = os.environ.pop(PIP_REQUIRE_VIRTUALENV, None) try: yield finally: if require_venv is not None: os.environ[PIP_REQUIRE_VIRTUALENV] = require_venv
A context manager for temporarily removing 'PIP_REQUIRE_VIRTUALENV' from the environment. Since shiv installs via `--target`, we need to ignore venv requirements if they exist.
clean_pip_env
python
linkedin/shiv
src/shiv/pip.py
https://github.com/linkedin/shiv/blob/master/src/shiv/pip.py
BSD-2-Clause
def install(args: List[str]) -> None: """`pip install` as a function. Accepts a list of pip arguments. .. code-block:: py >>> install(['numpy', '--target', 'site-packages']) Collecting numpy Downloading numpy-1.13.3-cp35-cp35m-manylinux1_x86_64.whl (16.9MB) 100% || 16.9MB 53kB/s Installing collected packages: numpy Successfully installed numpy-1.13.3 """ with clean_pip_env(): # if being invoked as a pyz, we must ensure we have access to our own # site-packages when subprocessing since there is no guarantee that pip # will be available subprocess_env = os.environ.copy() sitedir_index = get_first_sitedir_index() extend_python_path(subprocess_env, sys.path[sitedir_index:]) process = subprocess.Popen( [sys.executable, "-m", "pip", "--disable-pip-version-check", "install", *args], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=subprocess_env, universal_newlines=True, ) for output in process.stdout: # type: ignore if output: click.echo(output.rstrip()) if process.wait() > 0: sys.exit(PIP_INSTALL_ERROR)
`pip install` as a function. Accepts a list of pip arguments. .. code-block:: py >>> install(['numpy', '--target', 'site-packages']) Collecting numpy Downloading numpy-1.13.3-cp35-cp35m-manylinux1_x86_64.whl (16.9MB) 100% || 16.9MB 53kB/s Installing collected packages: numpy Successfully installed numpy-1.13.3
install
python
linkedin/shiv
src/shiv/pip.py
https://github.com/linkedin/shiv/blob/master/src/shiv/pip.py
BSD-2-Clause
def acquire_nix(lock_file): # pragma: no cover """Acquire a lock file on linux or osx.""" fd = os.open(lock_file, OPEN_MODE) try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except (IOError, OSError): os.close(fd) else: return fd
Acquire a lock file on linux or osx.
acquire_nix
python
linkedin/shiv
src/shiv/bootstrap/filelock.py
https://github.com/linkedin/shiv/blob/master/src/shiv/bootstrap/filelock.py
BSD-2-Clause
def run(module): # pragma: no cover """Run a module in a scrubbed environment. If a single pyz has multiple callers, we want to remove these vars as we no longer need them and they can cause subprocesses to fail with a ModuleNotFoundError. :param Callable module: The entry point to invoke the pyz with. """ with suppress(KeyError): del os.environ[Environment.MODULE] with suppress(KeyError): del os.environ[Environment.ENTRY_POINT] with suppress(KeyError): del os.environ[Environment.CONSOLE_SCRIPT] sys.exit(module())
Run a module in a scrubbed environment. If a single pyz has multiple callers, we want to remove these vars as we no longer need them and they can cause subprocesses to fail with a ModuleNotFoundError. :param Callable module: The entry point to invoke the pyz with.
run
python
linkedin/shiv
src/shiv/bootstrap/__init__.py
https://github.com/linkedin/shiv/blob/master/src/shiv/bootstrap/__init__.py
BSD-2-Clause
def current_zipfile(): """A function to vend the current zipfile, if any""" if zipfile.is_zipfile(sys.argv[0]): with zipfile.ZipFile(sys.argv[0]) as fd: yield fd else: yield None
A function to vend the current zipfile, if any
current_zipfile
python
linkedin/shiv
src/shiv/bootstrap/__init__.py
https://github.com/linkedin/shiv/blob/master/src/shiv/bootstrap/__init__.py
BSD-2-Clause
def import_string(import_name): """Returns a callable for a given setuptools style import string :param str import_name: A console_scripts style import string """ import_name = str(import_name).replace(":", ".") try: import_module(import_name) except ImportError: if "." not in import_name: # this is a case like "import name", where continuing to the # next style of import would not improve the situation, so # we raise here. raise else: return sys.modules[import_name] # this is a case where the previous attempt may have failed due to # not being importable. ("not a package", etc) module_name, obj_name = import_name.rsplit(".", 1) try: module = __import__(module_name, None, None, [obj_name]) except ImportError: # Recurse to support importing modules not yet set up by the parent module # (or package for that matter) module = import_string(module_name) try: return getattr(module, obj_name) except AttributeError as e: raise ImportError(e)
Returns a callable for a given setuptools style import string :param str import_name: A console_scripts style import string
import_string
python
linkedin/shiv
src/shiv/bootstrap/__init__.py
https://github.com/linkedin/shiv/blob/master/src/shiv/bootstrap/__init__.py
BSD-2-Clause
def cache_path(archive, root_dir, build_id): """Returns a ~/.shiv cache directory for unzipping site-packages during bootstrap. :param ZipFile archive: The zipfile object we are bootstrapping from. :param str root_dir: Optional, either a path or environment variable pointing to a SHIV_ROOT. :param str build_id: The build id generated at zip creation. """ if root_dir: if root_dir.startswith("$"): root_dir = os.environ.get(root_dir[1:], root_dir[1:]) root_dir = Path(root_dir).expanduser() root = root_dir or Path("~/.shiv").expanduser() name = Path(archive.filename).resolve().name return root / f"{name}_{build_id}"
Returns a ~/.shiv cache directory for unzipping site-packages during bootstrap. :param ZipFile archive: The zipfile object we are bootstrapping from. :param str root_dir: Optional, either a path or environment variable pointing to a SHIV_ROOT. :param str build_id: The build id generated at zip creation.
cache_path
python
linkedin/shiv
src/shiv/bootstrap/__init__.py
https://github.com/linkedin/shiv/blob/master/src/shiv/bootstrap/__init__.py
BSD-2-Clause
def extract_site_packages(archive, target_path, compile_pyc=False, compile_workers=0, force=False): """Extract everything in site-packages to a specified path. :param ZipFile archive: The zipfile object we are bootstrapping from. :param Path target_path: The path to extract our zip to. :param bool compile_pyc: A boolean to dictate whether we pre-compile pyc. :param int compile_workers: An int representing the number of pyc compiler workers. :param bool force: A boolean to dictate whether or not we force extraction. """ parent = target_path.parent target_path_tmp = Path(parent, target_path.name + ".tmp") lock = Path(parent, f".{target_path.name}_lock") # If this is the first time that a pyz is being extracted, we'll need to create the ~/.shiv dir if not parent.exists(): parent.mkdir(parents=True, exist_ok=True) with FileLock(lock): # we acquired a lock, it's possible that prior invocation was holding the lock and has # completed bootstrapping, so let's check (again) if we need to do any work if not target_path.exists() or force: # extract our site-packages for fileinfo in archive.infolist(): if fileinfo.filename.startswith("site-packages"): extracted = archive.extract(fileinfo.filename, target_path_tmp) # restore original permissions os.chmod(extracted, fileinfo.external_attr >> 16) if compile_pyc: compileall.compile_dir(target_path_tmp, quiet=2, workers=compile_workers) # if using `force` we will need to delete our target path if target_path.exists(): shutil.rmtree(str(target_path)) # atomic move shutil.move(str(target_path_tmp), str(target_path))
Extract everything in site-packages to a specified path. :param ZipFile archive: The zipfile object we are bootstrapping from. :param Path target_path: The path to extract our zip to. :param bool compile_pyc: A boolean to dictate whether we pre-compile pyc. :param int compile_workers: An int representing the number of pyc compiler workers. :param bool force: A boolean to dictate whether or not we force extraction.
extract_site_packages
python
linkedin/shiv
src/shiv/bootstrap/__init__.py
https://github.com/linkedin/shiv/blob/master/src/shiv/bootstrap/__init__.py
BSD-2-Clause
def extend_python_path(environ, additional_paths): """Create or extend a PYTHONPATH variable with the frozen environment we are bootstrapping with.""" # we don't want to clobber any existing PYTHONPATH value, so check for it. python_path = environ["PYTHONPATH"].split(os.pathsep) if "PYTHONPATH" in environ else [] python_path.extend(additional_paths) # put it back into the environment so that PYTHONPATH contains the shiv-manipulated paths # and any pre-existing PYTHONPATH values with no duplicates. environ["PYTHONPATH"] = os.pathsep.join(sorted(set(python_path), key=python_path.index))
Create or extend a PYTHONPATH variable with the frozen environment we are bootstrapping with.
extend_python_path
python
linkedin/shiv
src/shiv/bootstrap/__init__.py
https://github.com/linkedin/shiv/blob/master/src/shiv/bootstrap/__init__.py
BSD-2-Clause
def ensure_no_modify(site_packages, hashes): """Compare the sha256 hash of the unpacked source files to the files when they were added to the pyz.""" for path in site_packages.rglob("**/*.py"): if hashlib.sha256(path.read_bytes()).hexdigest() != hashes.get(str(path.relative_to(site_packages))): raise RuntimeError( "A Python source file has been modified! File: {}. " "Try again with SHIV_FORCE_EXTRACT=1 to overwrite the modified source file(s).".format(str(path)) )
Compare the sha256 hash of the unpacked source files to the files when they were added to the pyz.
ensure_no_modify
python
linkedin/shiv
src/shiv/bootstrap/__init__.py
https://github.com/linkedin/shiv/blob/master/src/shiv/bootstrap/__init__.py
BSD-2-Clause
def test_extend_path_existing_pythonpath(self): """When PYTHONPATH exists, extending it preserves the existing values.""" env = {"PYTHONPATH": "hello"} extend_python_path(env, ["test", ".pth"]) assert env["PYTHONPATH"] == os.pathsep.join(["hello", "test", ".pth"])
When PYTHONPATH exists, extending it preserves the existing values.
test_extend_path_existing_pythonpath
python
linkedin/shiv
test/test_bootstrap.py
https://github.com/linkedin/shiv/blob/master/test/test_bootstrap.py
BSD-2-Clause
def test_find_entry_point(self, tmpdir, package_location): """Test that we can find console_script metadata.""" install(["-t", str(tmpdir), str(package_location)]) assert find_entry_point([Path(tmpdir)], "hello") == "hello:main"
Test that we can find console_script metadata.
test_find_entry_point
python
linkedin/shiv
test/test_cli.py
https://github.com/linkedin/shiv/blob/master/test/test_cli.py
BSD-2-Clause
def test_find_entry_point_two_points(self, tmpdir, package_location): """Test that we can find console_script metadata.""" install(["-t", str(tmpdir), str(package_location)]) assert find_entry_point([Path(tmpdir)], "hello") == "hello:main"
Test that we can find console_script metadata.
test_find_entry_point_two_points
python
linkedin/shiv
test/test_cli.py
https://github.com/linkedin/shiv/blob/master/test/test_cli.py
BSD-2-Clause
def test_console_script_exists(self, tmp_path, package_location): """Test that we can check console_script presence.""" install_dir = tmp_path / "install" install(["-t", str(install_dir), str(package_location)]) empty_dir = tmp_path / "empty" empty_dir.mkdir() assert console_script_exists([empty_dir, install_dir], "hello.exe" if os.name == "nt" else "hello")
Test that we can check console_script presence.
test_console_script_exists
python
linkedin/shiv
test/test_cli.py
https://github.com/linkedin/shiv/blob/master/test/test_cli.py
BSD-2-Clause
def test_no_args(self, runner): """This should fail with a warning about supplying pip arguments""" result = runner([]) assert result.exit_code == 1 assert NO_PIP_ARGS_OR_SITE_PACKAGES in result.output
This should fail with a warning about supplying pip arguments
test_no_args
python
linkedin/shiv
test/test_cli.py
https://github.com/linkedin/shiv/blob/master/test/test_cli.py
BSD-2-Clause
def test_no_outfile(self, runner): """This should fail with a warning about not providing an outfile""" result = runner(["-e", "test", "flask"]) assert result.exit_code == 1 assert NO_OUTFILE in result.output
This should fail with a warning about not providing an outfile
test_no_outfile
python
linkedin/shiv
test/test_cli.py
https://github.com/linkedin/shiv/blob/master/test/test_cli.py
BSD-2-Clause
def test_disallowed_args(self, runner, arg): """This method tests that all the potential disallowed arguments match their error messages.""" # run shiv with a disallowed argument result = runner(["-o", "tmp", arg]) # get the 'reason' message: reason = next(iter([DISALLOWED_ARGS[disallowed] for disallowed in DISALLOWED_ARGS if arg in disallowed])) assert result.exit_code == 1 # assert we got the correct reason assert DISALLOWED_PIP_ARGS.format(arg=arg, reason=reason) in result.output
This method tests that all the potential disallowed arguments match their error messages.
test_disallowed_args
python
linkedin/shiv
test/test_cli.py
https://github.com/linkedin/shiv/blob/master/test/test_cli.py
BSD-2-Clause
def test_preamble_no_pip(self, shiv_root, runner, package_location, tmp_path): """Test that the preamble script is created even with no pip installed packages.""" output_file = shiv_root / "test.pyz" target = tmp_path / "target" preamble = tmp_path / "preamble.py" preamble.write_text("#!/usr/bin/env python3\nprint('hello from preamble')") preamble.chmod(preamble.stat().st_mode | stat.S_IEXEC) # first, by installing our test package into a target install(["-t", str(target), str(package_location)]) result = runner( ["-e", "hello:main", "--preamble", str(preamble), "-o", str(output_file), "--site-packages", target] ) # check that the command successfully completed assert result.exit_code == 0 # ensure the created file actually exists assert output_file.exists() # now run the produced zipapp proc = subprocess.run( [str(output_file)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ, ) assert proc.returncode == 0 assert proc.stdout.decode().splitlines() == ["hello from preamble", "hello world"]
Test that the preamble script is created even with no pip installed packages.
test_preamble_no_pip
python
linkedin/shiv
test/test_cli.py
https://github.com/linkedin/shiv/blob/master/test/test_cli.py
BSD-2-Clause
def test_alternate_root(self, runner, package_location, tmp_path): """Test that the --root argument properly sets the extraction root.""" output_file = tmp_path / "test.pyz" shiv_root = tmp_path / "root" result = runner( ["-e", "hello:main", "--root", str(shiv_root), "-o", str(output_file), str(package_location)] ) # check that the command successfully completed assert result.exit_code == 0 # ensure the created file actually exists assert output_file.exists() # now run the produced zipapp proc = subprocess.run( [str(output_file)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ, ) assert proc.returncode == 0 assert "hello" in proc.stdout.decode() assert shiv_root.exists()
Test that the --root argument properly sets the extraction root.
test_alternate_root
python
linkedin/shiv
test/test_cli.py
https://github.com/linkedin/shiv/blob/master/test/test_cli.py
BSD-2-Clause
def test_alternate_root_environment_variable(self, runner, package_location, tmp_path, env_var): """Test that the --root argument works with environment variables.""" output_file = tmp_path / "test.pyz" shiv_root_var = "NEW_ROOT" shiv_root_path = tmp_path / 'new_root' result = runner( ["-e", "hello:main", "--root", "$" + shiv_root_var, "-o", str(output_file), str(package_location)] ) with env_var(shiv_root_var, str(shiv_root_path)): # check that the command successfully completed assert result.exit_code == 0 # ensure the created file actually exists assert output_file.exists() # now run the produced zipapp proc = subprocess.run( [str(output_file)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ, ) assert proc.returncode == 0 assert "hello" in proc.stdout.decode() assert shiv_root_path.exists()
Test that the --root argument works with environment variables.
test_alternate_root_environment_variable
python
linkedin/shiv
test/test_cli.py
https://github.com/linkedin/shiv/blob/master/test/test_cli.py
BSD-2-Clause
def forward(self, inputs, outputs, labels): """ Args: inputs: The original inputs that are feed to the teacher model outputs: the outputs of the model to be trained. It is expected to be either a Tensor, or a Tuple[Tensor, Tensor], with the original output in the first position and the distillation predictions as the second output labels: the labels for the base criterion """ outputs_kd = None if not isinstance(outputs, torch.Tensor): # assume that the model outputs a tuple of [outputs, outputs_kd] outputs, outputs_kd = outputs base_loss = self.base_criterion(outputs, labels) if self.distillation_type == 'none': return base_loss if outputs_kd is None: raise ValueError("When knowledge distillation is enabled, the model is " "expected to return a Tuple[Tensor, Tensor] with the output of the " "class_token and the dist_token") # don't backprop throught the teacher with torch.no_grad(): teacher_outputs = self.teacher_model(inputs) if self.distillation_type == 'soft': T = self.tau # taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100 # with slight modifications distillation_loss = F.kl_div( F.log_softmax(outputs_kd / T, dim=1), #We provide the teacher's targets in log probability because we use log_target=True #(as recommended in pytorch https://github.com/pytorch/pytorch/blob/9324181d0ac7b4f7949a574dbc3e8be30abe7041/torch/nn/functional.py#L2719) #but it is possible to give just the probabilities and set log_target=False. In our experiments we tried both. F.log_softmax(teacher_outputs / T, dim=1), reduction='sum', log_target=True ) * (T * T) / outputs_kd.numel() #We divide by outputs_kd.numel() to have the legacy PyTorch behavior. #But we also experiments output_kd.size(0) #see issue 61(https://github.com/facebookresearch/deit/issues/61) for more details elif self.distillation_type == 'hard': distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1)) loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha return loss
Args: inputs: The original inputs that are feed to the teacher model outputs: the outputs of the model to be trained. It is expected to be either a Tensor, or a Tuple[Tensor, Tensor], with the original output in the first position and the distillation predictions as the second output labels: the labels for the base criterion
forward
python
facebookresearch/deit
losses.py
https://github.com/facebookresearch/deit/blob/master/losses.py
Apache-2.0
def _load_checkpoint_for_ema(model_ema, checkpoint): """ Workaround for ModelEma._load_checkpoint to accept an already-loaded object """ mem_file = io.BytesIO() torch.save({'state_dict_ema':checkpoint}, mem_file) mem_file.seek(0) model_ema._load_checkpoint(mem_file)
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
_load_checkpoint_for_ema
python
facebookresearch/deit
utils.py
https://github.com/facebookresearch/deit/blob/master/utils.py
Apache-2.0
def __init__( self, target: Target, user: str, dns: Optional[str] = None, upn: Optional[str] = None, sam: Optional[str] = None, spns: Optional[str] = None, passw: Optional[str] = None, group: Optional[str] = None, connection: Optional[LDAPConnection] = None, timeout: int = 5, **kwargs, # type: ignore ): """ Initialize account management with target and account options. Args: target: Target environment information (domain, credentials) user: Username for the account to manage dns: DNS hostname for the account upn: UserPrincipalName to set sam: sAMAccountName to set spns: Service Principal Names to set (comma-separated) passw: Password for the account group: Distinguished name of the group to place the account in scheme: LDAP connection scheme (ldap or ldaps) connection: Existing LDAP connection to reuse timeout: Connection timeout in seconds **kwargs: Additional arguments """ self.target = target self.user = user self.dns = dns self.upn = upn self.sam = sam self.spns = spns self.password = passw self.group = group self._connection = connection self.timeout = timeout self.kwargs = kwargs
Initialize account management with target and account options. Args: target: Target environment information (domain, credentials) user: Username for the account to manage dns: DNS hostname for the account upn: UserPrincipalName to set sam: sAMAccountName to set spns: Service Principal Names to set (comma-separated) passw: Password for the account group: Distinguished name of the group to place the account in scheme: LDAP connection scheme (ldap or ldaps) connection: Existing LDAP connection to reuse timeout: Connection timeout in seconds **kwargs: Additional arguments
__init__
python
ly4k/Certipy
certipy/commands/account.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/account.py
MIT
def connection(self) -> LDAPConnection: """ Get or establish an LDAP connection to the target. Returns: Active LDAP connection """ if self._connection is not None: return self._connection self._connection = LDAPConnection(self.target) self._connection.connect() return self._connection
Get or establish an LDAP connection to the target. Returns: Active LDAP connection
connection
python
ly4k/Certipy
certipy/commands/account.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/account.py
MIT
def create(self) -> bool: """ Create a new computer account in Active Directory. This method creates a computer account with the specified properties, or with reasonable defaults if not provided. Returns: True if account creation succeeded, False otherwise """ # Determine username (sAMAccountName) username = self.user if self.sam is not None: logging.warning( "The parameter -sam overrides the -user parameter for the create operation" ) res = input("Do you want to continue? (Y/n): ") if res.strip().lower() == "n": return False username = self.sam # Check if user already exists user = self.connection.get_user(username, silent=True) if user is not None: logging.error( f"User {user.get('sAMAccountName')!r} already exists. " f"If you want to update the user, specify the 'update' action" ) return False # Set container group for the account group = self.group if group is None: group = f"CN=Computers,{self.connection.default_path}" # Ensure computer account name ends with $ if username[-1] != "$": username += "$" # Generate random password if not provided password = self.password if password is None: password = "".join( random.choice(string.ascii_letters + string.digits) for _ in range(16) ) self.password = password # Set DNS hostname if not provided dns = self.dns if dns is None: dns = f"{username.rstrip('$')}.{self.connection.domain}".lower() # Create DN for the new account hostname = username[:-1] dn = f"CN={hostname},{group}" # Set default SPNs if not provided spns = self.spns if spns is None: base_name = username.rstrip("$") spns = [ f"HOST/{base_name}", f"RestrictedKrbHost/{base_name}", ] else: spns = [spn.strip() for spn in spns.split(",") if spn.strip()] # Prepare account attributes attributes: Dict[str, Any] = { "sAMAccountName": username, "unicodePwd": password, # Just for the pretty print "userAccountControl": 0x1000, # WORKSTATION_TRUST_ACCOUNT "servicePrincipalName": spns, "dnsHostName": dns, } logging.info("Creating new account:") pretty_print(attributes, indent=2) # Convert password to proper format for LDAP attributes["unicodePwd"] = ('"%s"' % password).encode("utf-16-le") # Add the account via LDAP result = self.connection.add( dn, ["top", "person", "organizationalPerson", "user", "computer"], attributes, ) # Handle result if result["result"] == 0: logging.info( f"Successfully created account {username!r} with password {password!r}" ) return True elif result["result"] == RESULT_INSUFFICIENT_ACCESS_RIGHTS: logging.error( f"User {self.target.username!r} doesn't have the right to create a machine account" ) elif ( result["result"] == RESULT_UNWILLING_TO_PERFORM and int(result["message"].split(":")[0].strip(), 16) == 0x216D ): logging.error( f"Machine account quota exceeded for {self.target.username!r}" ) else: logging.error( f"Received error: ({result['description']}) {result['message']}" ) return False
Create a new computer account in Active Directory. This method creates a computer account with the specified properties, or with reasonable defaults if not provided. Returns: True if account creation succeeded, False otherwise
create
python
ly4k/Certipy
certipy/commands/account.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/account.py
MIT
def read(self) -> bool: """ Read and display account attributes. This method retrieves and displays key attributes of the specified account. Returns: True if account was found and attributes read, False otherwise """ # Get user object user = self.connection.get_user(self.user) if user is None: return False # Define attributes to display attributes = [ "cn", "distinguishedName", "name", "objectSid", "sAMAccountName", "dNSHostName", "servicePrincipalName", "userPrincipalName", "userAccountControl", "whenCreated", "whenChanged", ] # Collect attribute values attribute_values = {} logging.info(f"Reading attributes for {user.get('sAMAccountName')!r}:") for attribute in attributes: value = user.get(attribute) if value is not None: attribute_values[attribute] = value # Display attributes pretty_print(attribute_values, indent=2) return True
Read and display account attributes. This method retrieves and displays key attributes of the specified account. Returns: True if account was found and attributes read, False otherwise
read
python
ly4k/Certipy
certipy/commands/account.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/account.py
MIT
def update(self) -> bool: """ Update an existing account's attributes. This method modifies specified attributes of an existing account. Returns: True if account was successfully updated, False otherwise """ # Get user object user = self.connection.get_user(self.user) if user is None: return False # Prepare attribute changes changes: Dict[str, List[Tuple[Any, Any]]] = {} changes_formatted: Dict[str, Any] = {} # Define which attributes to update based on provided parameters attribute_mapping = { "unicodePwd": self.password, "dNSHostName": self.dns, "userPrincipalName": self.upn, "sAMAccountName": self.sam, "servicePrincipalName": ( [spn.strip() for spn in self.spns.split(",") if spn.strip()] if self.spns is not None else None ), } # Process each attribute that needs to be updated for attribute, value in attribute_mapping.items(): if value is None: continue if value == "" or (isinstance(value, list) and len(value) == 0): # Delete the attribute changes[attribute] = [(ldap3.MODIFY_DELETE, [])] changes_formatted[attribute] = "*DELETED*" else: # Replace the attribute with new value if attribute == "unicodePwd": # Special handling for password encoded_value = ('"%s"' % value).encode("utf-16-le") changes_formatted[attribute] = value # Show plaintext in output else: if isinstance(value, list): encoded_value = value else: encoded_value = [value] # LDAP expects lists for attributes changes_formatted[attribute] = value changes[attribute] = [(ldap3.MODIFY_REPLACE, encoded_value)] if not changes: logging.warning(f"No changes specified for {user.get('sAMAccountName')!r}") return False logging.info(f"Updating user {user.get('sAMAccountName')!r}:") pretty_print(changes_formatted, indent=2) # Apply changes via LDAP result = self.connection.modify( user.get("distinguishedName"), changes, ) # Handle result if result["result"] == 0: logging.info(f"Successfully updated {user.get('sAMAccountName')!r}") return True elif result["result"] == RESULT_INSUFFICIENT_ACCESS_RIGHTS: logging.error( f"User {self.target.username!r} doesn't have permission to update " f"these attributes on {user.get('sAMAccountName')!r}" ) else: logging.error(f"Received error: {result['message']}") return False
Update an existing account's attributes. This method modifies specified attributes of an existing account. Returns: True if account was successfully updated, False otherwise
update
python
ly4k/Certipy
certipy/commands/account.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/account.py
MIT
def delete(self) -> bool: """ Delete an account from Active Directory. This method permanently removes the specified account. Returns: True if account was successfully deleted, False otherwise """ # Get user object user = self.connection.get_user(self.user) if user is None: return False # Confirm deletion account_name = user.get("sAMAccountName") logging.warning(f"You are about to delete {account_name!r}") res = input("Are you sure? (y/N): ") if res.strip().lower() != "y": logging.info("Deletion canceled") return False # Delete account via LDAP result = self.connection.delete(user.get("distinguishedName")) # Handle result if result["result"] == 0: logging.info(f"Successfully deleted {account_name!r}") return True elif result["result"] == RESULT_INSUFFICIENT_ACCESS_RIGHTS: logging.error( f"User {self.target.username!r} doesn't have permission to delete {account_name!r}" ) else: logging.error(f"Received error: {result['message']}") return False
Delete an account from Active Directory. This method permanently removes the specified account. Returns: True if account was successfully deleted, False otherwise
delete
python
ly4k/Certipy
certipy/commands/account.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/account.py
MIT
def entry(options: argparse.Namespace) -> None: """ Entry point for the 'account' command. This function creates the Account object and dispatches to the appropriate method based on the specified action. Args: options: Command line options """ # Create target from command line options target = Target.from_options(options, dc_as_target=True) options.__delattr__("target") # Create account manager account = Account(target, **vars(options)) # Map actions to methods actions = { "create": account.create, "read": account.read, "update": account.update, "delete": account.delete, } # Validate action if options.account_action not in actions: logging.error(f"Unknown action: {options.account_action}") logging.info(f"Available actions: {', '.join(actions.keys())}") return # Execute the requested action result = actions[options.account_action]() # Set exit code based on result if result is False: import sys sys.exit(1)
Entry point for the 'account' command. This function creates the Account object and dispatches to the appropriate method based on the specified action. Args: options: Command line options
entry
python
ly4k/Certipy
certipy/commands/account.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/account.py
MIT
def __init__(self, tcp_shell: Any, domain_dumper: Any, client: Any): """ Initialize the LDAP shell. Args: tcp_shell: Shell to use for I/O domain_dumper: Domain information provider client: LDAP client connection """ super().__init__(tcp_shell, domain_dumper, client) self.use_rawinput = True self.shell = tcp_shell self.prompt = "\n# " self.tid = None self.intro = "Type help for list of commands" self.loggedIn = True self.last_output = None self.completion = [] self.client = client self.domain_dumper = domain_dumper
Initialize the LDAP shell. Args: tcp_shell: Shell to use for I/O domain_dumper: Domain information provider client: LDAP client connection
__init__
python
ly4k/Certipy
certipy/commands/auth.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/auth.py
MIT
def truncate_key(value: bytes, keysize: int) -> bytes: """ Truncate a key to the specified size using SHA1 hashing. Args: value: Input key material keysize: Desired key size in bytes Returns: Truncated key of exactly keysize bytes """ output = b"" current_num = 0 while len(output) < keysize: current_digest = hash_digest(bytes([current_num]) + value, hashes.SHA1) if len(output) + len(current_digest) > keysize: output += current_digest[: keysize - len(output)] break output += current_digest current_num += 1 return output
Truncate a key to the specified size using SHA1 hashing. Args: value: Input key material keysize: Desired key size in bytes Returns: Truncated key of exactly keysize bytes
truncate_key
python
ly4k/Certipy
certipy/commands/auth.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/auth.py
MIT
def __init__( self, target: Target, pfx: Optional[str] = None, username: Optional[str] = None, domain: Optional[str] = None, password: Optional[str] = None, cert: Optional[x509.Certificate] = None, key: Optional[PrivateKeyTypes] = None, no_save: bool = False, no_hash: bool = False, print: bool = False, kirbi: bool = False, ldap_shell: bool = False, **kwargs, # type: ignore ): """ Initialize authentication parameters. Args: target: Target information (domain, DC IP, etc.) pfx: Path to PFX/P12 certificate file username: Username to authenticate as domain: Domain to authenticate to password: Password for PFX file cert: Pre-loaded certificate object key: Pre-loaded private key object no_save: Don't save credential cache to disk no_hash: Don't extract NT hash print: Print ticket information kirbi: Save credential cache in Kirbi format ldap_shell: Launch interactive LDAP shell after authentication ldap_port: LDAP port (default: 389 for ldap, 636 for ldaps) ldap_scheme: LDAP scheme (ldap or ldaps) ldap_user_dn: LDAP user distinguished name user_dn: User distinguished name **kwargs: Additional parameters """ self.target = target self.username = username self.domain = domain self.pfx = pfx self.password = password self.cert = cert self.key = key self.no_save = no_save self.no_hash = no_hash self.print = print self.kirbi = kirbi self.ldap_shell = ldap_shell self.kwargs = kwargs # These will be populated during authentication self.nt_hash: Optional[str] = None self.lm_hash: Optional[str] = None self.ccache_name: Optional[str] = None # Load certificate and key from PFX if provided if self.pfx is not None: pfx_password = None if self.password: pfx_password = self.password.encode() try: with open(self.pfx, "rb") as f: pfx_data = f.read() self.key, self.cert = load_pfx(pfx_data, pfx_password) except Exception as e: logging.error(f"Failed to load PFX file: {e}") raise
Initialize authentication parameters. Args: target: Target information (domain, DC IP, etc.) pfx: Path to PFX/P12 certificate file username: Username to authenticate as domain: Domain to authenticate to password: Password for PFX file cert: Pre-loaded certificate object key: Pre-loaded private key object no_save: Don't save credential cache to disk no_hash: Don't extract NT hash print: Print ticket information kirbi: Save credential cache in Kirbi format ldap_shell: Launch interactive LDAP shell after authentication ldap_port: LDAP port (default: 389 for ldap, 636 for ldaps) ldap_scheme: LDAP scheme (ldap or ldaps) ldap_user_dn: LDAP user distinguished name user_dn: User distinguished name **kwargs: Additional parameters
__init__
python
ly4k/Certipy
certipy/commands/auth.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/auth.py
MIT
def authenticate( self, username: Optional[str] = None, domain: Optional[str] = None, is_key_credential: bool = False, ) -> Union[str, bool, None]: """ Authenticate using a certificate. This is the main entry point for authentication. It will determine whether to use LDAP or Kerberos authentication based on configuration. Args: username: Username to authenticate as domain: Domain to authenticate to is_key_credential: Whether we're using a key credential Returns: NT hash if extracted, True if successful, False if failed, None if error """ if not self.cert: raise ValueError("Certificate is not specified and no PFX was provided") # Print authentication information present in the certificate print_certificate_authentication_information(self.cert) # Resolve username and domain from target if not provided if not username: username = self.username or self.target.username if not domain: domain = self.domain or self.target.domain # Use LDAP authentication if requested if self.ldap_shell: return self.ldap_authentication(domain) # Extract identity information from certificate if needed id_type = None identity = None object_sid = None cert_username = None cert_domain = None # Skip certificate parsing for key credentials if not is_key_credential: # Extract identity information from certificate identities = get_identities_from_certificate(self.cert) # Get the object SID from the certificate if available object_sid = get_object_sid_from_certificate(self.cert) # No identities found in the certificate if not identities: logging.warning("Could not find identity in the provided certificate") # Single identity found - use it directly elif len(identities) == 1: id_type, identity = identities[0] cert_username, cert_domain = cert_id_to_parts([(id_type, identity)]) # Multiple identities found - handle based on input parameters else: logging.info("Found multiple identities in certificate") # Case 1: If username is provided, try to find a matching identity if username: matching_ids = [] for idx, (id_t, id_val) in enumerate(identities): u, d = cert_id_to_parts([(id_t, id_val)]) if u and ( u.lower() == username.lower() or u.lower() + "$" == username.lower() ): matching_ids.append((idx, id_t, id_val, u, d)) # Found exactly one match for the username if len(matching_ids) == 1: idx, id_type, identity, cert_username, cert_domain = ( matching_ids[0] ) logging.info(f"Using identity: {id_type}: {identity}") # Found multiple matches - prompt user to select one elif len(matching_ids) > 1: logging.info( f"Found multiple identities for username '{username}'" ) logging.info("Please select one:") for i, (idx, id_t, id_val, u, d) in enumerate(matching_ids): print(f" [{i}] {id_t}: {id_val!r} ({u}@{d})") while True: try: choice = int(input("> ")) if 0 <= choice < len(matching_ids): ( idx, id_type, identity, cert_username, cert_domain, ) = matching_ids[choice] break logging.warning("Invalid index") except ValueError: logging.warning("Invalid input, enter a number") # No matches found - prompt user to select from all identities else: logging.warning(f"No identities match username '{username}'") logging.info("Please select an identity:") for i, (id_t, id_val) in enumerate(identities): u, d = cert_id_to_parts([(id_t, id_val)]) print( f" [{i}] {id_t}: {id_val!r} ({u or 'unknown'}@{d or 'unknown'})" ) while True: try: idx = int(input("> ")) if 0 <= idx < len(identities): id_type, identity = identities[idx] cert_username, cert_domain = cert_id_to_parts( [(id_type, identity)] ) break logging.warning("Invalid index") except ValueError: logging.warning("Invalid input, enter a number") # Case 2: No username provided - prompt user to select an identity else: logging.info("Please select an identity:") for i, (id_t, id_val) in enumerate(identities): u, d = cert_id_to_parts([(id_t, id_val)]) print( f" [{i}] {id_t}: {id_val!r} ({u or 'unknown'}@{d or 'unknown'})" ) while True: try: idx = int(input("> ")) if 0 <= idx < len(identities): id_type, identity = identities[idx] cert_username, cert_domain = cert_id_to_parts( [(id_type, identity)] ) break logging.warning("Invalid index") except ValueError: logging.warning("Invalid input, enter a number") # Resolve username and domain if not username: username = cert_username if not domain: domain = cert_domain # Check for mismatches between certificate and provided identity if ( self._check_identity_mismatches( username, domain, cert_username, cert_domain ) is False ): return False # Ensure we have both username and domain if not all([username, domain]) and not is_key_credential: logging.error( "Username or domain is not specified, and identity " "information was not found in the certificate" ) return False if not username or not domain: logging.error(f"Username or domain is invalid: {username}@{domain}") return False # Normalize domain and username domain = domain.lower() username = username.lower() upn = f"{username}@{domain}" # Resolve target IP if needed if self.target and self.target.resolver and self.target.target_ip is None: self.target.target_ip = self.target.resolver.resolve(domain) logging.info(f"Using principal: {upn!r}") # Perform Kerberos authentication return self.kerberos_authentication( username, domain, is_key_credential, id_type, identity, object_sid, upn, )
Authenticate using a certificate. This is the main entry point for authentication. It will determine whether to use LDAP or Kerberos authentication based on configuration. Args: username: Username to authenticate as domain: Domain to authenticate to is_key_credential: Whether we're using a key credential Returns: NT hash if extracted, True if successful, False if failed, None if error
authenticate
python
ly4k/Certipy
certipy/commands/auth.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/auth.py
MIT
def _check_identity_mismatches( self, username: Optional[str], domain: Optional[str], cert_username: Optional[str], cert_domain: Optional[str], ) -> Optional[bool]: """ Check for mismatches between provided identity and certificate identity. Args: username: Provided username domain: Provided domain cert_username: Username from certificate cert_domain: Domain from certificate Returns: None if checks passed, False if should abort """ # Check username mismatch (accounting for computer accounts with $) if ( cert_username and username and cert_username.lower() != username.lower() and cert_username.lower() + "$" != username.lower() ): logging.warning( f"The provided username does not match the identity " f"found in the certificate: {username!r} - {cert_username!r}" ) res = input("Do you want to continue? (Y/n): ") if res.strip().lower() == "n": return False # Check domain mismatch (accounting for subdomains) if ( cert_domain and domain and domain.lower() != cert_domain.lower() and not cert_domain.lower().startswith(domain.lower().rstrip(".") + ".") ): logging.warning( f"The provided domain does not match the identity " f"found in the certificate: {domain!r} - {cert_domain!r}" ) res = input("Do you want to continue? (Y/n): ") if res.strip().lower() == "n": return False return None
Check for mismatches between provided identity and certificate identity. Args: username: Provided username domain: Provided domain cert_username: Username from certificate cert_domain: Domain from certificate Returns: None if checks passed, False if should abort
_check_identity_mismatches
python
ly4k/Certipy
certipy/commands/auth.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/auth.py
MIT
def ldap_authentication(self, domain: Optional[str] = None) -> bool: """ Authenticate to LDAP using a certificate. Args: domain: Domain to authenticate to Returns: True if successful, False otherwise """ if self.key is None: raise ValueError("Private key is not specified and no PFX was provided") if self.cert is None: raise ValueError("Certificate is not specified and no PFX was provided") ldap_conn = LDAPConnection(self.target, (self.cert, self.key)) try: ldap_conn.schannel_connect() except Exception as e: logging.error(f"Failed to connect to LDAP server: {e}") handle_error() return False if ldap_conn.default_path is None: logging.error("Failed to retrieve default naming context") return False domain_dumper = DummyDomainDumper(ldap_conn.default_path) ldap_shell = LdapShell(sys, domain_dumper, ldap_conn.ldap_conn) try: ldap_shell.cmdloop() except KeyboardInterrupt: print("Bye!\n") return True
Authenticate to LDAP using a certificate. Args: domain: Domain to authenticate to Returns: True if successful, False otherwise
ldap_authentication
python
ly4k/Certipy
certipy/commands/auth.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/auth.py
MIT
def kerberos_authentication( self, username: str, domain: str, is_key_credential: bool = False, id_type: Optional[str] = None, identity: Optional[str] = None, object_sid: Optional[str] = None, upn: Optional[str] = None, ) -> Union[str, bool, None]: """ Authenticate to Kerberos using PKINIT with a certificate. Args: username: Username to authenticate as domain: Domain to authenticate to is_key_credential: Whether we're using a key credential id_type: Type of identity in certificate identity: identity value from certificate object_sid: SID from certificate upn: User Principal Name Returns: NT hash if extracted, True if successful, False otherwise """ if self.key is None: raise ValueError("Private key is not specified and no PFX was provided") if self.cert is None: raise ValueError("Certificate is not specified and no PFX was provided") if not isinstance(self.key, rsa.RSAPrivateKey): raise ValueError( "Currently only RSA private keys are supported. Try using -ldap-shell instead" ) # Create AS-REQ for PKINIT as_req, diffie = build_pkinit_as_req(username, domain, self.key, self.cert) # Resolve target IP if needed if self.target and self.target.resolver and self.target.target_ip is None: self.target.target_ip = self.target.resolver.resolve(domain) logging.info("Trying to get TGT...") logging.debug(f"Sending AS-REQ to KDC {domain} ({self.target.target_ip})") try: # Send Kerberos AS-REQ tgt = sendReceive(as_req, domain, self.target.target_ip) except KerberosError as e: # Handle Kerberos errors with helpful messages if e.getErrorCode() not in KRB5_ERROR_MESSAGES: logging.error(f"Got unknown Kerberos error: {e.getErrorCode():#x}") return False if "KDC_ERR_CLIENT_NAME_MISMATCH" in str(e) and not is_key_credential: logging.error( f"Name mismatch between certificate and user {username!r}" ) if id_type is not None: logging.error( f"Verify that the username {username!r} matches the certificate {id_type}: {identity}" ) elif "KDC_ERR_WRONG_REALM" in str(e) and not is_key_credential: logging.error(f"Wrong domain name specified {domain!r}") if id_type is not None: logging.error( f"Verify that the domain {domain!r} matches the certificate {id_type}: {identity}" ) elif "KDC_ERR_CERTIFICATE_MISMATCH" in str(e) and not is_key_credential: logging.error( f"Object SID mismatch between certificate and user {username!r}" ) if object_sid is not None: logging.error( f"Verify that user {username!r} has object SID {object_sid!r}" ) elif "KDC_ERR_INCONSISTENT_KEY_PURPOSE" in str(e): logging.error("Certificate is not valid for client authentication") logging.error( "Check the certificate template and ensure it has the correct EKU(s)" ) logging.error( "If you recently changed the certificate template, wait a few minutes for the change to propagate" ) else: logging.error(f"Got error while trying to request TGT: {e}") handle_error() logging.error("See the wiki for more information") return False logging.info("Got TGT") # Process AS-REP as_rep = decoder.decode(tgt, asn1Spec=AS_REP())[0] # Extract PA-PK-AS-REP for pa in as_rep["padata"]: if pa["padata-type"] == 17: # PA-PK-AS-REP pk_as_rep = PaPkAsRep.load(bytes(pa["padata-value"])).native break else: logging.error("PA_PK_AS_REP was not found in AS_REP") return False # Process Diffie-Hellman key exchange data ci = cms.ContentInfo.load(pk_as_rep["dhSignedData"]).native sd = ci["content"] key_info = sd["encap_content_info"] if key_info["content_type"] != "1.3.6.1.5.2.3.2": logging.error("Unexpected value for key info content type") return False # Get public key from KDC auth_data = KDCDHKeyInfo.load(key_info["content"]).native pub_key = int.from_bytes( core.BitString(auth_data["subjectPublicKey"]).dump()[7:], "big", signed=False, ) # Complete Diffie-Hellman exchange shared_key = diffie.exchange(pub_key) server_nonce = pk_as_rep["serverDHNonce"] full_key = shared_key + diffie.dh_nonce + server_nonce # Derive encryption key etype = as_rep["enc-part"]["etype"] cipher = _enctype_table[etype] if etype == EncType.AES256: t_key = truncate_key(full_key, 32) elif etype == EncType.AES128: t_key = truncate_key(full_key, 16) else: logging.error("Unexpected encryption type in AS_REP") return False # Decrypt AS-REP key = Key(cipher.enctype, t_key) enc_data = as_rep["enc-part"]["cipher"] dec_data = cipher.decrypt(key, 3, enc_data) enc_as_rep_part = decoder.decode(dec_data, asn1Spec=EncASRepPart())[0] # Extract session key cipher = _enctype_table[int(enc_as_rep_part["key"]["keytype"])] session_key = Key(cipher.enctype, bytes(enc_as_rep_part["key"]["keyvalue"])) # Create credential cache ccache = CCache() ccache.fromTGT(tgt, key, None) krb_cred = ccache.toKRBCRED() # Print ticket if requested if self.print: logging.info("Ticket:") print(base64.b64encode(krb_cred).decode()) # Save ticket to file if requested if not self.no_save: if self.kirbi: kirbi_name = f"{username.rstrip('$')}.kirbi" logging.info(f"Saving Kirbi file to {kirbi_name!r}") saved_path = try_to_save_file(ccache.toKRBCRED(), kirbi_name) logging.info(f"Wrote Kirbi file to {saved_path!r}") else: self.ccache_name = f"{username.rstrip('$')}.ccache" logging.info(f"Saving credential cache to {self.ccache_name!r}") saved_path = try_to_save_file(ccache.getData(), self.ccache_name) logging.info(f"Wrote credential cache to {saved_path!r}") # Extract NT hash if requested if not self.no_hash: logging.info(f"Trying to retrieve NT hash for {username!r}") try: # Create AP-REQ for User-to-User (U2U) authentication ap_req = AP_REQ() ap_req["pvno"] = 5 ap_req["msg-type"] = e2i(constants.ApplicationTagNumbers.AP_REQ) ap_req["ap-options"] = constants.encodeFlags([]) # Use received ticket ticket = Ticket() ticket = ticket.from_asn1(as_rep["ticket"]) seq_set(ap_req, "ticket", ticket.to_asn1) # Create authenticator for AP-REQ authenticator = Authenticator() authenticator["authenticator-vno"] = 5 authenticator["crealm"] = bytes(as_rep["crealm"]) client_name = Principal() client_name = client_name.from_asn1(as_rep, "crealm", "cname") seq_set(authenticator, "cname", client_name.components_to_asn1) # Set time in authenticator now = datetime.datetime.now(datetime.timezone.utc) authenticator["cusec"] = now.microsecond authenticator["ctime"] = KerberosTime.to_asn1(now) # Encrypt authenticator with session key encoded_authenticator = encoder.encode(authenticator) encrypted_encoded_authenticator = cipher.encrypt( session_key, 7, encoded_authenticator, None ) ap_req["authenticator"] = noValue ap_req["authenticator"]["etype"] = cipher.enctype ap_req["authenticator"]["cipher"] = encrypted_encoded_authenticator encoded_ap_req = encoder.encode(ap_req) # Create TGS-REQ with U2U flag tgs_req = TGS_REQ() tgs_req["pvno"] = 5 tgs_req["msg-type"] = e2i(constants.ApplicationTagNumbers.TGS_REQ) # Add AP-REQ as PA data tgs_req["padata"] = noValue tgs_req["padata"][0] = noValue tgs_req["padata"][0]["padata-type"] = e2i( constants.PreAuthenticationDataTypes.PA_TGS_REQ ) tgs_req["padata"][0]["padata-value"] = encoded_ap_req req_body = seq_set(tgs_req, "req-body") # Set KDC options for U2U opts = [ e2i(constants.KDCOptions.forwardable), e2i(constants.KDCOptions.renewable), e2i(constants.KDCOptions.canonicalize), e2i(constants.KDCOptions.enc_tkt_in_skey), # This enables U2U e2i(constants.KDCOptions.forwardable), e2i(constants.KDCOptions.renewable_ok), ] req_body["kdc-options"] = constants.encodeFlags(opts) # Request a ticket to self (U2U) server_name = Principal( username, type=e2i(constants.PrincipalNameType.NT_UNKNOWN) ) seq_set(req_body, "sname", server_name.components_to_asn1) req_body["realm"] = str(as_rep["crealm"]) # Set validity period now = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta( days=1 ) req_body["till"] = KerberosTime.to_asn1(now) req_body["nonce"] = getrandbits(31) # Request supported encryption types seq_set_iter( req_body, "etype", (int(cipher.enctype), e2i(constants.EncryptionTypes.rc4_hmac)), ) # Include our own ticket ticket_asn1 = ticket.to_asn1(TicketAsn1()) seq_set_iter(req_body, "additional-tickets", (ticket_asn1,)) # Send TGS-REQ message = encoder.encode(tgs_req) tgs = sendReceive(message, domain, self.target.target_ip) tgs = decoder.decode(tgs, asn1Spec=TGS_REP())[0] # Decrypt ticket from TGS-REP ciphertext = tgs["ticket"]["enc-part"]["cipher"] new_cipher = _enctype_table[int(tgs["ticket"]["enc-part"]["etype"])] plaintext = new_cipher.decrypt(session_key, 2, ciphertext) # Create special key using the t_key special_key = Key(18, t_key) # Extract PAC from ticket data = plaintext enc_ticket_part = decoder.decode(data, asn1Spec=EncTicketPart())[0] ad_if_relevant = decoder.decode( enc_ticket_part["authorization-data"][0]["ad-data"], asn1Spec=AD_IF_RELEVANT(), )[0] pac_type = PACTYPE(ad_if_relevant[0]["ad-data"].asOctets()) buff = pac_type["Buffers"] # Default hash values nt_hash = None lm_hash = "aad3b435b51404eeaad3b435b51404ee" # Look for credential info in PAC for _ in range(pac_type["cBuffers"]): info_buffer = PAC_INFO_BUFFER(buff) data = pac_type["Buffers"][info_buffer["Offset"] - 8 :][ : info_buffer["cbBufferSize"] ] # PAC_CREDENTIAL_INFO contains the hashes if info_buffer["ulType"] == 2: # PAC_CREDENTIAL_INFO cred_info = PAC_CREDENTIAL_INFO(data) new_cipher = _enctype_table[cred_info["EncryptionType"]] # Decrypt the credentials with the special key out = new_cipher.decrypt( special_key, 16, cred_info["SerializedData"] ) # Parse credential data type1 = TypeSerialization1(out) new_data = out[len(type1) + 4 :] pcc = PAC_CREDENTIAL_DATA(new_data) # Extract NTLM hashes for cred in pcc["Credentials"]: cred_structs = NTLM_SUPPLEMENTAL_CREDENTIAL( b"".join(cred["Credentials"]) ) if any(cred_structs["LmPassword"]): lm_hash = cred_structs["LmPassword"].hex() nt_hash = cred_structs["NtPassword"].hex() break break # Move to next buffer buff = buff[len(info_buffer) :] else: logging.error("Could not find credentials in PAC") return False # Store hashes in object self.lm_hash = lm_hash self.nt_hash = nt_hash # Display hash information if not is_key_credential: logging.info(f"Got hash for {upn!r}: {lm_hash}:{nt_hash}") # Return the NT hash return nt_hash except Exception as e: logging.error(f"Failed to extract NT hash: {e}") handle_error() return False # Authentication succeeded return True
Authenticate to Kerberos using PKINIT with a certificate. Args: username: Username to authenticate as domain: Domain to authenticate to is_key_credential: Whether we're using a key credential id_type: Type of identity in certificate identity: identity value from certificate object_sid: SID from certificate upn: User Principal Name Returns: NT hash if extracted, True if successful, False otherwise
kerberos_authentication
python
ly4k/Certipy
certipy/commands/auth.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/auth.py
MIT
def entry(options: argparse.Namespace) -> None: """ Entry point for the 'auth' command. Args: options: Command-line arguments """ # Ensure we don't try to use password authentication options.no_pass = True # Create target from options target = Target.from_options(options, dc_as_target=True, require_username=False) # Create authenticator and perform authentication try: authenticate = Authenticate(target=target, **vars(options)) result = authenticate.authenticate() if result is False: sys.exit(1) except Exception as e: logging.error(f"Authentication failed: {e}") handle_error() sys.exit(1)
Entry point for the 'auth' command. Args: options: Command-line arguments
entry
python
ly4k/Certipy
certipy/commands/auth.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/auth.py
MIT
def request(self, req: Any, *args, **kwargs): # type: ignore """ Send a request to the CA service. Args: req: Request object *args: Additional arguments **kwargs: Additional keyword arguments Returns: Response from the CA service Raises: DCERPCException: If the RPC call fails """ req["ORPCthis"] = self.get_cinstance().get_ORPCthis() # type: ignore req["ORPCthis"]["flags"] = 0 self.connect(self._iid) dce = self.get_dce_rpc() try: resp = dce.request(req, self.get_iPid(), *args, **kwargs) except Exception as e: if str(e).find("RPC_E_DISCONNECTED") >= 0: msg = str(e) + "\n" msg += ( "DCOM keep-alive pinging it might not be working as expected. You " "can't be idle for more than 14 minutes!\n" ) msg += "You should exit the app and start again\n" raise DCERPCException(msg) else: raise return resp
Send a request to the CA service. Args: req: Request object *args: Additional arguments **kwargs: Additional keyword arguments Returns: Response from the CA service Raises: DCERPCException: If the RPC call fails
request
python
ly4k/Certipy
certipy/commands/ca.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/ca.py
MIT
def __init__( self, target: Target, ca: Optional[str] = None, template: Optional[str] = None, officer: Optional[str] = None, request_id: Optional[int] = None, connection: Optional[LDAPConnection] = None, scheme: str = "ldaps", dynamic: bool = False, config: Optional[str] = None, timeout: int = 5, **kwargs, # type: ignore ): """ Initialize CA management object. Args: target: Target information (hostname, credentials, etc.) ca: CA name template: Certificate template name officer: Officer username request_id: Certificate request ID connection: Existing LDAP connection to reuse scheme: LDAP scheme (ldap or ldaps) dc_host: Domain controller hostname dynamic: Use dynamic port allocation config: CA configuration string timeout: Connection timeout in seconds **kwargs: Additional arguments """ self.target = target self.request_id = request_id self.ca = ca self.officer = officer self.template = template self.scheme = scheme self.dynamic = dynamic self.config = config self.timeout = timeout self.kwargs = kwargs # Initialize connection objects self._connection: Optional[LDAPConnection] = connection self._cert_admin: Optional[ICertAdminD] = None self._cert_admin2: Optional[ICertAdminD2] = None self._cert_request2: Optional[ICertRequestD2] = None self._rrp_dce = None
Initialize CA management object. Args: target: Target information (hostname, credentials, etc.) ca: CA name template: Certificate template name officer: Officer username request_id: Certificate request ID connection: Existing LDAP connection to reuse scheme: LDAP scheme (ldap or ldaps) dc_host: Domain controller hostname dynamic: Use dynamic port allocation config: CA configuration string timeout: Connection timeout in seconds **kwargs: Additional arguments
__init__
python
ly4k/Certipy
certipy/commands/ca.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/ca.py
MIT
def connection(self) -> LDAPConnection: """ Get or create an LDAP connection to the domain. Returns: Active LDAP connection Raises: ValueError: If target resolution fails """ if self._connection: return self._connection target = copy.copy(self.target) if target.do_kerberos: if self.target.dc_host is None: raise Exception( "Kerberos auth requires DNS name of the target DC. Use -dc-host." ) target.remote_name = self.target.dc_host target.target_ip = target.dc_ip self._connection = LDAPConnection(target) self._connection.connect() return self._connection
Get or create an LDAP connection to the domain. Returns: Active LDAP connection Raises: ValueError: If target resolution fails
connection
python
ly4k/Certipy
certipy/commands/ca.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/ca.py
MIT
def cert_admin(self) -> ICertAdminD: """ Get or create an ICertAdminD interface. Returns: ICertAdminD interface """ if self._cert_admin is not None: return self._cert_admin dcom = get_dcom_connection(self.target) interface = dcom.CoCreateInstanceEx(CLSID_ICertAdminD, IID_ICertAdminD) interface.get_cinstance().set_auth_level(RPC_C_AUTHN_LEVEL_PKT_PRIVACY) # type: ignore self._cert_admin = ICertAdminD(interface) return self._cert_admin
Get or create an ICertAdminD interface. Returns: ICertAdminD interface
cert_admin
python
ly4k/Certipy
certipy/commands/ca.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/ca.py
MIT
def cert_admin2(self) -> ICertAdminD2: """ Get or create an ICertAdminD2 interface. Returns: ICertAdminD2 interface """ if self._cert_admin2 is not None: return self._cert_admin2 dcom = get_dcom_connection(self.target) interface = dcom.CoCreateInstanceEx(CLSID_ICertAdminD, IID_ICertAdminD2) interface.get_cinstance().set_auth_level(RPC_C_AUTHN_LEVEL_PKT_PRIVACY) # type: ignore self._cert_admin2 = ICertAdminD2(interface) return self._cert_admin2
Get or create an ICertAdminD2 interface. Returns: ICertAdminD2 interface
cert_admin2
python
ly4k/Certipy
certipy/commands/ca.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/ca.py
MIT
def cert_request2(self) -> ICertRequestD2: """ Get or create an ICertRequestD2 interface. Returns: ICertRequestD2 interface """ if self._cert_request2 is not None: return self._cert_request2 dcom = get_dcom_connection(self.target) interface = dcom.CoCreateInstanceEx(CLSID_CCertRequestD, IID_ICertRequestD2) interface.get_cinstance().set_auth_level(RPC_C_AUTHN_LEVEL_PKT_PRIVACY) # type: ignore self._cert_request2 = ICertRequestD2(interface) return self._cert_request2
Get or create an ICertRequestD2 interface. Returns: ICertRequestD2 interface
cert_request2
python
ly4k/Certipy
certipy/commands/ca.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/ca.py
MIT
def rrp_dce(self): """ Get or create a connection to the remote registry service. Returns: RRP DCE/RPC connection or None if connection fails """ if self._rrp_dce is not None: return self._rrp_dce dce = get_dce_rpc_from_string_binding( "ncacn_np:445[\\pipe\\winreg]", self.target, timeout=self.target.timeout ) # Try to connect up to 3 times (registry service might need to start) for _ in range(3): try: dce.connect() _ = dce.bind(rrp.MSRPC_UUID_RRP) logging.debug( f"Connected to remote registry at {self.target.remote_name!r} ({self.target.target_ip!r})" ) break except Exception as e: if "STATUS_PIPE_NOT_AVAILABLE" in str(e): logging.warning( "Failed to connect to remote registry. Service should be starting now. Trying again..." ) time.sleep(1) else: logging.error(f"Failed to connect to remote registry: {e}") handle_error() return None else: logging.warning("Failed to connect to remote registry after 3 attempts") return None self._rrp_dce = dce return self._rrp_dce
Get or create a connection to the remote registry service. Returns: RRP DCE/RPC connection or None if connection fails
rrp_dce
python
ly4k/Certipy
certipy/commands/ca.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/ca.py
MIT
def get_exchange_certificate(self) -> x509.Certificate: """ Get the CA exchange certificate. Returns: CA exchange certificate Raises: Exception: If the certificate retrieval fails """ request = ICertRequestD2GetCAProperty() request["pwszAuthority"] = checkNullString(self.ca) request["PropId"] = 0x0000000F # Exchange certificate property ID request["PropIndex"] = 0 request["PropType"] = 0x00000003 # Binary data type resp = self.cert_request2.request(request) # Convert the certificate blob to an x509 certificate exchange_cert = der_to_cert(b"".join(resp["pctbPropertyValue"]["pb"])) return exchange_cert
Get the CA exchange certificate. Returns: CA exchange certificate Raises: Exception: If the certificate retrieval fails
get_exchange_certificate
python
ly4k/Certipy
certipy/commands/ca.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/ca.py
MIT
def get_config_rrp(self) -> "CAConfiguration": """ Get CA configuration via the Remote Registry Protocol. Used as a fallback when CSRA fails. This method navigates the Windows registry structure to extract CA configuration settings including policy modules, edit flags, request disposition, disabled extensions, interface flags, and security descriptors. Returns: CAConfiguration object containing CA configuration settings Raises: ValueError: If critical registry values have unexpected types """ # Open local machine registry hive hklm = rrp.hOpenLocalMachine(self.rrp_dce) h_root_key = hklm["phKey"] # First retrieve active policy module information policy_key_path = ( f"SYSTEM\\CurrentControlSet\\Services\\CertSvc\\Configuration\\{self.ca}\\" "PolicyModules" ) policy_key = rrp.hBaseRegOpenKey(self.rrp_dce, h_root_key, policy_key_path) # Get active policy module name _, active_policy = rrp.hBaseRegQueryValue( self.rrp_dce, policy_key["phkResult"], "Active" ) if not isinstance(active_policy, str): logging.warning( f"Expected a string for active policy, got {type(active_policy)!r}" ) logging.warning("Falling back to default policy") active_policy = "CertificateAuthority_MicrosoftDefault.Policy" active_policy = active_policy.strip("\x00") # Open policy module configuration policy_key_path = ( f"SYSTEM\\CurrentControlSet\\Services\\CertSvc\\Configuration\\{self.ca}\\" f"PolicyModules\\{active_policy}" ) policy_key = rrp.hBaseRegOpenKey(self.rrp_dce, h_root_key, policy_key_path) # Retrieve edit flags (controls certificate request behavior) _, edit_flags = rrp.hBaseRegQueryValue( self.rrp_dce, policy_key["phkResult"], "EditFlags" ) if not isinstance(edit_flags, int): logging.warning(f"Expected an int for edit flags, got {type(edit_flags)!r}") logging.warning("Falling back to default edit flags") edit_flags = 0x00000000 # Retrieve request disposition (auto-enrollment settings) _, request_disposition = rrp.hBaseRegQueryValue( self.rrp_dce, policy_key["phkResult"], "RequestDisposition" ) if not isinstance(request_disposition, int): logging.warning( f"Expected an int for request disposition, got {type(request_disposition)!r}" ) logging.warning("Falling back to default request disposition") request_disposition = 0x00000000 # Retrieve disabled extensions _, disable_extension_list = rrp.hBaseRegQueryValue( self.rrp_dce, policy_key["phkResult"], "DisableExtensionList" ) if not isinstance(disable_extension_list, str): logging.warning( f"Expected a string for disable extension list, got {type(disable_extension_list)!r}" ) logging.warning("Falling back to default disable extension list") disable_extension_list = "" # Process null-terminated string list into Python list disable_extension_list = [ item for item in disable_extension_list.strip("\x00").split("\x00") if item ] # Now get general CA configuration settings configuration_key_path = ( f"SYSTEM\\CurrentControlSet\\Services\\CertSvc\\Configuration\\{self.ca}" ) configuration_key = rrp.hBaseRegOpenKey( self.rrp_dce, h_root_key, configuration_key_path ) # Retrieve interface flags (controls CA interface behavior) _, interface_flags = rrp.hBaseRegQueryValue( self.rrp_dce, configuration_key["phkResult"], "InterfaceFlags" ) if not isinstance(interface_flags, int): logging.warning( f"Expected an int for interface flags, got {type(interface_flags)!r}" ) logging.warning("Falling back to default interface flags") interface_flags = 0x00000000 # Retrieve security descriptor (controls access permissions) _, security_descriptor = rrp.hBaseRegQueryValue( self.rrp_dce, configuration_key["phkResult"], "Security" ) if not isinstance(security_descriptor, bytes): raise ValueError( f"Expected a bytes object for security descriptor, got {type(security_descriptor)!r}" ) # Parse the binary security descriptor security_descriptor = CASecurity(security_descriptor) # Return a complete configuration object return CAConfiguration( active_policy, edit_flags, disable_extension_list, request_disposition, interface_flags, security_descriptor, )
Get CA configuration via the Remote Registry Protocol. Used as a fallback when CSRA fails. This method navigates the Windows registry structure to extract CA configuration settings including policy modules, edit flags, request disposition, disabled extensions, interface flags, and security descriptors. Returns: CAConfiguration object containing CA configuration settings Raises: ValueError: If critical registry values have unexpected types
get_config_rrp
python
ly4k/Certipy
certipy/commands/ca.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/ca.py
MIT
def get_config(self) -> Optional["CAConfiguration"]: """ Get CA configuration using the Remote Registry Protocol (RRP). This method attempts to retrieve the CA configuration using RRP and handles any exceptions that might occur during the process. Returns: CAConfiguration object containing configuration settings or None if retrieval fails """ try: logging.info(f"Retrieving CA configuration for {self.ca!r} via RRP") result = self.get_config_rrp() logging.info(f"Successfully retrieved CA configuration for {self.ca!r}") return result except Exception as e: logging.warning( f"Failed to get CA configuration for {self.ca!r} via RRP: {e}" ) handle_error(True) logging.warning(f"Could not retrieve configuration for {self.ca!r}") return None
Get CA configuration using the Remote Registry Protocol (RRP). This method attempts to retrieve the CA configuration using RRP and handles any exceptions that might occur during the process. Returns: CAConfiguration object containing configuration settings or None if retrieval fails
get_config
python
ly4k/Certipy
certipy/commands/ca.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/ca.py
MIT
def issue(self) -> bool: """ Issue (approve) a pending certificate request. Returns: True if successful, False otherwise """ if self.request_id is None: logging.error( "A request ID (-request-id) is required in order to issue a pending or failed certificate request" ) return False request = ICertAdminDResubmitRequest() request["pwszAuthority"] = checkNullString(self.ca) request["pdwRequestId"] = int(self.request_id) request["pwszExtensionName"] = checkNullString("\x00") # No extension try: resp = self.cert_admin.request(request) except DCERPCSessionError as e: if "E_ACCESSDENIED" in str(e): logging.error( "Access denied: Insufficient permissions to issue certificate" ) return False logging.error( f"Failed to issue certificate request ID {self.request_id}: {e}" ) handle_error() return False error_code = resp["pdwDisposition"] if error_code == 3: # Success logging.info( f"Successfully issued certificate request ID {self.request_id}" ) return True else: error_msg = translate_error_code(error_code) logging.error(f"Failed to issue certificate: {error_msg}") return False
Issue (approve) a pending certificate request. Returns: True if successful, False otherwise
issue
python
ly4k/Certipy
certipy/commands/ca.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/ca.py
MIT
def deny(self) -> bool: """ Deny a pending certificate request. Returns: True if successful, False otherwise """ if self.request_id is None: logging.error( "A request ID (-request-id) is required in order to deny a pending certificate request" ) return False request = ICertAdminDDenyRequest() request["pwszAuthority"] = checkNullString(self.ca) request["pdwRequestId"] = int(self.request_id) try: resp = self.cert_admin.request(request) except DCERPCSessionError as e: if "E_ACCESSDENIED" in str(e): logging.error( "Access denied: Insufficient permissions to deny certificate request" ) return False logging.error( f"Failed to deny certificate request ID {self.request_id}: {e}" ) handle_error() return False error_code = resp["ErrorCode"] if error_code == 0: # Success logging.info( f"Successfully denied certificate request ID {self.request_id}" ) return True else: error_msg = translate_error_code(error_code) logging.error(f"Failed to deny certificate request: {error_msg}") return False
Deny a pending certificate request. Returns: True if successful, False otherwise
deny
python
ly4k/Certipy
certipy/commands/ca.py
https://github.com/ly4k/Certipy/blob/master/certipy/commands/ca.py
MIT