Upload 3 files
Browse files
main1.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from setup import *
|
3 |
+
import pandas as pd
|
4 |
+
from openpyxl import Workbook
|
5 |
+
from openpyxl.utils.dataframe import dataframe_to_rows
|
6 |
+
from openpyxl.styles import Font
|
7 |
+
from feasibility_agent import feasibility_agent_func
|
8 |
+
from short import graph
|
9 |
+
|
10 |
+
|
11 |
+
# # Function to create Excel file
|
12 |
+
# def create_excel(df):
|
13 |
+
# # Create a new Excel workbook and select the active sheet
|
14 |
+
# wb = Workbook()
|
15 |
+
# ws = wb.active
|
16 |
+
# ws.title = "Use Cases"
|
17 |
+
|
18 |
+
# # Define and write headers to the Excel sheet
|
19 |
+
# headers = ['Use Case', 'Description', 'URLs']
|
20 |
+
# ws.append(headers)
|
21 |
+
|
22 |
+
# # Write data rows
|
23 |
+
# for _, row in df.iterrows():
|
24 |
+
# try:
|
25 |
+
# use_case = row['use_case']
|
26 |
+
# description = row['description']
|
27 |
+
# urls = row['urls_list']
|
28 |
+
|
29 |
+
# ws.append([use_case, description, None]) # Add use case and description
|
30 |
+
# if urls:
|
31 |
+
# for url_index, url in enumerate(urls):
|
32 |
+
# cell = ws.cell(row=ws.max_row, column=3) # URLs go into the third column
|
33 |
+
# cell.value = url
|
34 |
+
# cell.hyperlink = url
|
35 |
+
# cell.font = Font(color="0000FF", underline="single")
|
36 |
+
|
37 |
+
# # Add a new row for additional URLs
|
38 |
+
# if url_index < len(urls) - 1:
|
39 |
+
# ws.append([None, None, None])
|
40 |
+
# except KeyError as e:
|
41 |
+
# print(f"Missing key in DataFrame row: {e}")
|
42 |
+
# except Exception as e:
|
43 |
+
# print(f"Unexpected error while processing row: {e}")
|
44 |
+
|
45 |
+
# excel_file_path = "GenAI_use_cases_feasibility.xlsx"
|
46 |
+
# wb.save(excel_file_path)
|
47 |
+
# return excel_file_path
|
48 |
+
|
49 |
+
|
50 |
+
# # Function to handle the report and create the DataFrame
|
51 |
+
# def pd_creation(report):
|
52 |
+
# # Assuming feasibility_agent_func returns a dictionary
|
53 |
+
# pd_dict = feasibility_agent_func(report)
|
54 |
+
|
55 |
+
# # Check for expected keys in pd_dict before proceeding
|
56 |
+
# required_columns = ['use_case', 'description', 'urls_list']
|
57 |
+
# print("-----Dict------->", pd_dict)
|
58 |
+
# # if not all(col in pd_dict for col in required_columns):
|
59 |
+
# # raise ValueError(f"Missing one or more expected columns: {required_columns}")
|
60 |
+
|
61 |
+
# # Create the DataFrame from the dictionary
|
62 |
+
# df = pd.DataFrame(pd_dict)
|
63 |
+
|
64 |
+
# # Convert the dataframe to the format expected by Gradio (list of lists)
|
65 |
+
# data = df.values.tolist() # This creates a list of lists from the dataframe
|
66 |
+
|
67 |
+
# # Create the Excel file and return its path
|
68 |
+
# excel_file_path = create_excel(df) # Create the Excel file and get its path
|
69 |
+
|
70 |
+
# return data, excel_file_path # Return the formatted data and the Excel file path
|
71 |
+
|
72 |
+
# Main function that handles the user query and generates the report
|
73 |
+
def main(user_input):
|
74 |
+
topic = user_input
|
75 |
+
report = graph(topic=topic, max_analysts=5)
|
76 |
+
print(report)
|
77 |
+
# pd_dict, excel_file_path = pd_creation(report)
|
78 |
+
|
79 |
+
# Save the report as a markdown file
|
80 |
+
report_file_path = "generated_report.md"
|
81 |
+
with open(report_file_path, "w") as f:
|
82 |
+
f.write(report)
|
83 |
+
|
84 |
+
return report, report_file_path
|
85 |
+
|
86 |
+
# Example queries
|
87 |
+
examples = [
|
88 |
+
"How is the retail industry leveraging AI and ML?",
|
89 |
+
"AI applications in automotive manufacturing"
|
90 |
+
]
|
91 |
+
|
92 |
+
# Creating the Gradio interface
|
93 |
+
with gr.Blocks(theme=gr.themes.Soft(font=gr.themes.GoogleFont('Open Sans'))) as demo:
|
94 |
+
# Header section
|
95 |
+
gr.HTML("<center><h1>UseCaseGenie - Discover GenAI Use cases for your company and Industry! 🤖🧑🍳.</h1><center>")
|
96 |
+
gr.Markdown("""#### This GenAI Assistant 🤖 helps you discover and explore Generative AI use cases for your company and industry.
|
97 |
+
You can download the generated use case report as a <b>Markdown file</b> to gain insights and explore relevant GenAI applications.
|
98 |
+
### <b>Steps:</b>
|
99 |
+
1. <b>Enter your query</b> regarding any company or industry.
|
100 |
+
2. <b>Click on the 'Submit' button</b> and wait for the GenAI assistant to generate the report.
|
101 |
+
3. <b>Download the generated report<b>
|
102 |
+
4. Explore the GenAI use cases""")
|
103 |
+
gr.Markdown("**Note:** The app demo may occasionally show errors due to rate limits of the underlying language model (LLM). If you encounter an error, please try again later. If the problem persists please raise issue. Thank you for your understanding!")
|
104 |
+
|
105 |
+
# Input for the user query
|
106 |
+
with gr.Row():
|
107 |
+
user_input = gr.Textbox(label="Enter your Query", placeholder='Type_here...')
|
108 |
+
|
109 |
+
# Examples to help users with inputs
|
110 |
+
with gr.Row():
|
111 |
+
gr.Examples(examples=examples, inputs=user_input)
|
112 |
+
|
113 |
+
# Buttons for submitting and downloading
|
114 |
+
with gr.Row():
|
115 |
+
submit_button = gr.Button("Submit")
|
116 |
+
clear_btn = gr.ClearButton([user_input], value='Clear')
|
117 |
+
|
118 |
+
# File download buttons
|
119 |
+
with gr.Row():
|
120 |
+
# Create a downloadable markdown file
|
121 |
+
download_report_button = gr.File(label="Usecases Report")
|
122 |
+
|
123 |
+
# # Create a downloadable Excel file
|
124 |
+
# download_excel_button = gr.File(label="Feasibility Excel File")
|
125 |
+
|
126 |
+
# Display report in Markdown format
|
127 |
+
with gr.Row():
|
128 |
+
report_output = gr.Markdown()
|
129 |
+
|
130 |
+
submit_button.click(main, inputs=[user_input], outputs=[report_output, download_report_button])
|
131 |
+
|
132 |
+
# Run the interface
|
133 |
+
demo.launch()
|
134 |
+
|
setup.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import getpass
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
from langchain_groq import ChatGroq
|
5 |
+
from langchain_cohere import ChatCohere
|
6 |
+
from langchain_community.tools import TavilySearchResults
|
7 |
+
|
8 |
+
load_dotenv()
|
9 |
+
|
10 |
+
def _set_env(var: str):
|
11 |
+
if not os.environ.get(var):
|
12 |
+
os.environ[var] = getpass.getpass(f"Enter {var}: ")
|
13 |
+
|
14 |
+
|
15 |
+
# Setting up environmentl_variables
|
16 |
+
# _set_env("GROQ_API_KEY")
|
17 |
+
_set_env("TAVILY_API_KEY")
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
# llm defined
|
22 |
+
llm1 = ChatGroq(temperature=0.5, model_name="llama-3.1-8b-instant", api_key='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')
|
23 |
+
llm2 = ChatGroq(temperature=0.7, model_name="qwen-qwq-32b", api_key='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')
|
24 |
+
# llm3 = ChatCohere(model='command-a-03-2025', cohere_api_key='AjWeplJ8UcvyKktAT5lSVbSnTGxZu0l2scBExWVN')
|
25 |
+
|
26 |
+
# search tool
|
27 |
+
tavily_search = TavilySearchResults(
|
28 |
+
max_results=2,
|
29 |
+
search_depth="advanced",
|
30 |
+
include_answer=True,
|
31 |
+
include_raw_content=True,
|
32 |
+
include_images=True,
|
33 |
+
)
|
short.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import BaseModel,Field
|
2 |
+
from typing import TypedDict, Annotated
|
3 |
+
from langgraph.graph import MessagesState,StateGraph, START, END
|
4 |
+
from langchain_community.tools import TavilySearchResults
|
5 |
+
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
|
6 |
+
from langchain_community.document_loaders import WebBaseLoader
|
7 |
+
from langgraph.checkpoint.memory import MemorySaver
|
8 |
+
|
9 |
+
|
10 |
+
import operator
|
11 |
+
from setup import *
|
12 |
+
|
13 |
+
class GeneratorState(MessagesState):
|
14 |
+
context : Annotated[list, operator.add] # retrived docs
|
15 |
+
max_usecase : int # no of usecase to generate
|
16 |
+
topic : str # input query
|
17 |
+
|
18 |
+
|
19 |
+
class SearchQuery(BaseModel):
|
20 |
+
search_query : str = Field(description = 'Search query for web-search')
|
21 |
+
|
22 |
+
|
23 |
+
keyword_search = TavilySearchResults(
|
24 |
+
max_results=1,
|
25 |
+
search_depth="advanced",
|
26 |
+
include_answer=True,
|
27 |
+
include_raw_content=True,
|
28 |
+
include_images=True)
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
def search_web(state: GeneratorState):
|
33 |
+
|
34 |
+
topic = state['topic']
|
35 |
+
structured_llm = llm2.with_structured_output(SearchQuery)
|
36 |
+
|
37 |
+
search_instructions = """You are an AI assistant specialized in generating effective internet search queries. Your goal is to create **precise, keyword-rich search queries** that retrieve the best results for **AI use cases in specific industries** using Tavily Search.
|
38 |
+
|
39 |
+
## **Instructions:**
|
40 |
+
- Extract the **industry name** from the user’s query.
|
41 |
+
- Generate a **focused search query** that retrieves **practical AI use cases** in that industry.
|
42 |
+
- Include keywords like **"applications," "use cases," "impact," or "case studies"** to refine the search.
|
43 |
+
- Prioritize sources like **research papers, industry reports, and authoritative tech sites**.
|
44 |
+
- Use **Google-style operators (e.g., `site:`) to focus on trusted sources** if applicable.
|
45 |
+
|
46 |
+
---
|
47 |
+
## **Example:**
|
48 |
+
User Input: `"GenAI in healthcare"`
|
49 |
+
Generated Query:
|
50 |
+
"Generative AI use cases in healthcare applications and impact"
|
51 |
+
|
52 |
+
Generate search query for the below:
|
53 |
+
{topic}
|
54 |
+
"""
|
55 |
+
|
56 |
+
search_prompt = search_instructions.format(topic=topic)
|
57 |
+
search_query = structured_llm.invoke(search_prompt)
|
58 |
+
exclude_domains = ["vktr.com"]
|
59 |
+
search_docs = tavily_search.invoke(search_query.search_query, exclude_domains=exclude_domains)
|
60 |
+
page_url = [doc['url'] for doc in search_docs]
|
61 |
+
loader = WebBaseLoader(
|
62 |
+
web_paths= page_url,
|
63 |
+
bs_get_text_kwargs={"separator": "|", "strip": True},
|
64 |
+
raise_for_status=True,
|
65 |
+
)
|
66 |
+
docs = loader.load()
|
67 |
+
formatted_search_docs = "\n\n---\n\n".join(
|
68 |
+
[
|
69 |
+
f'<Document href="{doc.metadata['source']}"/>\n{doc.page_content}\n</Document>'
|
70 |
+
for doc in docs
|
71 |
+
])
|
72 |
+
|
73 |
+
summarization_prompt = '''You are an advanced summarization assistant. Your task is to generate a 500-word summary based on the provided context.
|
74 |
+
Maintain key information while removing redundancy, preserving critical details, and ensuring readability. Follow these guidelines:
|
75 |
+
Focus on Key Points: Extract essential facts, insights, and takeaways.
|
76 |
+
Maintain Clarity & Coherence: Ensure logical flow and readability.
|
77 |
+
Preserve Critical Data: Retain names, dates, figures, and important references.
|
78 |
+
Adjust Length as Needed: Summarize concisely while covering all vital aspects.
|
79 |
+
Format the summary professionally, adapting tone and detail to match the context.
|
80 |
+
context : {formatted_search_docs}
|
81 |
+
'''
|
82 |
+
summarized_docs = llm2.invoke([SystemMessage(content=summarization_prompt)])
|
83 |
+
|
84 |
+
return {'context': [summarized_docs]}
|
85 |
+
|
86 |
+
|
87 |
+
|
88 |
+
def generate_usecases(state: GeneratorState):
|
89 |
+
topic = state['topic']
|
90 |
+
context = state['context']
|
91 |
+
generation_prompt = '''
|
92 |
+
You are a highly skilled technical writer. Your task is to consolidate insights from analyst memos into a structured report based on the given context and topic. Ensure the report includes a **brief introduction**, at least five AI use cases, and a **short conclusion**. Follow this format:
|
93 |
+
topic : {topic}
|
94 |
+
context:
|
95 |
+
{context}
|
96 |
+
|
97 |
+
# Focus Title: [Provided Title]
|
98 |
+
|
99 |
+
## Introduction:
|
100 |
+
Provide a concise overview of the report's purpose and relevance.
|
101 |
+
|
102 |
+
## Use Case 1: [Descriptive Title]
|
103 |
+
**Objective/Use Case:** Summarize the goal in one or two sentences.
|
104 |
+
**AI Application:** Describe the AI technologies used.
|
105 |
+
**Cross-Functional Benefit:**
|
106 |
+
- **[Department]:** [Benefit]
|
107 |
+
- **[Department]:** [Benefit]
|
108 |
+
|
109 |
+
## Use Case 2: [Descriptive Title]
|
110 |
+
(Repeat format)
|
111 |
+
|
112 |
+
## Conclusion:
|
113 |
+
Summarize key takeaways and potential future implications.
|
114 |
+
|
115 |
+
Ensure clarity, relevance, and no duplicate citations in the **Sources** section. Extract insights accurately from the **context** provided.'''
|
116 |
+
|
117 |
+
system_message = generation_prompt.format(topic=topic, context=context)
|
118 |
+
answer = llm1.invoke([SystemMessage(content=system_message)])
|
119 |
+
|
120 |
+
return {'messages': answer}
|
121 |
+
|
122 |
+
|
123 |
+
|
124 |
+
def graph(topic,max_analysts):
|
125 |
+
graph_builder = StateGraph(GeneratorState)
|
126 |
+
|
127 |
+
graph_builder.add_node('search_web', search_web)
|
128 |
+
graph_builder.add_node('usecase_generation', generate_usecases)
|
129 |
+
|
130 |
+
graph_builder.add_edge(START, 'search_web')
|
131 |
+
graph_builder.add_edge('search_web', 'usecase_generation')
|
132 |
+
graph_builder.add_edge('usecase_generation', END)
|
133 |
+
|
134 |
+
memory = MemorySaver()
|
135 |
+
graph = graph_builder.compile(checkpointer=memory)
|
136 |
+
config = {"configurable": {"thread_id": "1"}}
|
137 |
+
graph.invoke({"topic":topic,
|
138 |
+
"max_analysts":max_analysts},
|
139 |
+
config)
|
140 |
+
|
141 |
+
final_state = graph.get_state(config)
|
142 |
+
report = final_state.values['messages'][0].content
|
143 |
+
|
144 |
+
return report
|