xiaosuhu1986 commited on
Commit
f29a3a5
·
0 Parent(s):
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ api.py
2
+ testtext_tmp
__pycache__/tools.cpython-312.pyc ADDED
Binary file (1.65 kB). View file
 
agent.ipynb ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "/home/frank-elite/miniconda3/envs/paintrekbot/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
13
+ " from .autonotebook import tqdm as notebook_tqdm\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "import os\n",
19
+ "\n",
20
+ "from typing import Annotated, Literal\n",
21
+ "from typing_extensions import TypedDict\n",
22
+ "from langgraph.prebuilt import ToolNode\n",
23
+ "from langchain_core.messages import HumanMessage\n",
24
+ "from langgraph.graph import StateGraph, MessagesState, START, END\n",
25
+ "from langgraph.checkpoint.memory import MemorySaver\n",
26
+ "from langchain_google_genai import ChatGoogleGenerativeAI\n",
27
+ "from tools import get_job, get_resume"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": 2,
33
+ "metadata": {},
34
+ "outputs": [],
35
+ "source": [
36
+ "GOOGLE_API_KEY=\"AIzaSyA8eIxHBqeBWEP1g3t8bpvLxNaH5Lquemo\"\n",
37
+ "os.environ[\"GOOGLE_API_KEY\"] = GOOGLE_API_KEY\n"
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "code",
42
+ "execution_count": 3,
43
+ "metadata": {},
44
+ "outputs": [],
45
+ "source": [
46
+ "tools = [get_job, get_resume]\n",
47
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-1.5-flash-latest\").bind_tools(tools)"
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "code",
52
+ "execution_count": 4,
53
+ "metadata": {},
54
+ "outputs": [],
55
+ "source": [
56
+ "def expert(state: MessagesState):\n",
57
+ " system_message = \"\"\"\n",
58
+ " You are a resume expert. You are tasked with improving the user resume based on a job description.\n",
59
+ " You can access the resume and job data using the provided tools.\n",
60
+ "\n",
61
+ " You must NEVER provide information that the user does not have.\n",
62
+ " These include, skills or experiences that are not in the resume. Do not make things up.\n",
63
+ " \"\"\"\n",
64
+ " messages = state[\"messages\"]\n",
65
+ " response = llm.invoke([system_message] + messages)\n",
66
+ " return {\"messages\": [response]}\n",
67
+ "\n",
68
+ "tool_node = ToolNode(tools)"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": 5,
74
+ "metadata": {},
75
+ "outputs": [],
76
+ "source": [
77
+ "def should_continue(state: MessagesState) -> Literal[\"tools\", END]:\n",
78
+ " messages = state['messages']\n",
79
+ " last_message = messages[-1]\n",
80
+ " if last_message.tool_calls:\n",
81
+ " return \"tools\"\n",
82
+ " return END"
83
+ ]
84
+ },
85
+ {
86
+ "cell_type": "code",
87
+ "execution_count": 6,
88
+ "metadata": {},
89
+ "outputs": [
90
+ {
91
+ "data": {
92
+ "text/plain": [
93
+ "<langgraph.graph.state.StateGraph at 0x70171ba751c0>"
94
+ ]
95
+ },
96
+ "execution_count": 6,
97
+ "metadata": {},
98
+ "output_type": "execute_result"
99
+ }
100
+ ],
101
+ "source": [
102
+ "graph = StateGraph(MessagesState)\n",
103
+ "\n",
104
+ "graph.add_node(\"expert\", expert)\n",
105
+ "graph.add_node(\"tools\", tool_node)\n",
106
+ "\n",
107
+ "graph.add_edge(START, \"expert\")\n",
108
+ "graph.add_conditional_edges(\"expert\", should_continue)\n",
109
+ "graph.add_edge(\"tools\", \"expert\")"
110
+ ]
111
+ },
112
+ {
113
+ "cell_type": "code",
114
+ "execution_count": 7,
115
+ "metadata": {},
116
+ "outputs": [],
117
+ "source": [
118
+ "checkpointer = MemorySaver()\n",
119
+ "\n",
120
+ "app = graph.compile(checkpointer=checkpointer)"
121
+ ]
122
+ },
123
+ {
124
+ "cell_type": "code",
125
+ "execution_count": 8,
126
+ "metadata": {},
127
+ "outputs": [
128
+ {
129
+ "name": "stdout",
130
+ "output_type": "stream",
131
+ "text": [
132
+ "I can access and process information from a resume and a job description using the `get_resume()` and `get_job()` functions. Based on the content of both, I can identify areas where the resume could be improved to better match the job description. However, I will only use information explicitly present in the provided resume and job description. I cannot add skills or experiences that are not already listed in the resume.\n",
133
+ "Based on the job title \"Software Engineer\" and the skills listed in the resume (\"Software Architecture\", \"System Optimization\", \"Team Mentorship\", \"Project Management\", \"API Development\", \"Continuous Integration/Continuous Deployment\", \"Bilingual\"), I can offer the following suggestions for improving the resume:\n",
134
+ "\n",
135
+ "* **Highlight relevant skills:** The resume should emphasize skills directly relevant to the \"Software Engineer\" role. For example, the \"API Development\" and \"Continuous Integration/Continuous Deployment\" skills should be prominently featured, perhaps with examples of projects where these skills were used.\n",
136
+ "\n",
137
+ "* **Quantify achievements:** Whenever possible, quantify accomplishments. Instead of simply listing \"Project Management,\" describe specific projects, the size of the team managed, and the positive outcomes achieved. Similarly, quantify successes in system optimization or software architecture.\n",
138
+ "\n",
139
+ "* **Tailor to the job description:** If the job description provides more detail (which it currently doesn't), further adjustments can be made to align the resume even more closely. For instance, if the job description emphasizes a specific programming language or framework, ensure that expertise in that area is highlighted.\n",
140
+ "\n",
141
+ "* **Consider adding a summary:** A brief summary at the beginning of the resume could highlight the most relevant skills and experience, immediately grabbing the reader's attention.\n",
142
+ "\n",
143
+ "I cannot make specific changes to the resume's content without knowing more about the specific projects and experiences. The suggestions above focus on improving the presentation and emphasizing the existing information to better match the job description.\n",
144
+ "Exiting...\n"
145
+ ]
146
+ }
147
+ ],
148
+ "source": [
149
+ "while True:\n",
150
+ " user_input = input(\">> \")\n",
151
+ " if user_input.lower() in [\"quit\", \"exit\"]:\n",
152
+ " print(\"Exiting...\")\n",
153
+ " break\n",
154
+ "\n",
155
+ " response = app.invoke(\n",
156
+ " {\"messages\": [HumanMessage(content=user_input)]},\n",
157
+ " config={\"configurable\": {\"thread_id\": 1}}\n",
158
+ " )\n",
159
+ "\n",
160
+ " print(response[\"messages\"][-1].content)"
161
+ ]
162
+ },
163
+ {
164
+ "cell_type": "code",
165
+ "execution_count": null,
166
+ "metadata": {},
167
+ "outputs": [],
168
+ "source": []
169
+ }
170
+ ],
171
+ "metadata": {
172
+ "kernelspec": {
173
+ "display_name": "paintrekbot",
174
+ "language": "python",
175
+ "name": "python3"
176
+ },
177
+ "language_info": {
178
+ "codemirror_mode": {
179
+ "name": "ipython",
180
+ "version": 3
181
+ },
182
+ "file_extension": ".py",
183
+ "mimetype": "text/x-python",
184
+ "name": "python",
185
+ "nbconvert_exporter": "python",
186
+ "pygments_lexer": "ipython3",
187
+ "version": "3.12.8"
188
+ }
189
+ },
190
+ "nbformat": 4,
191
+ "nbformat_minor": 2
192
+ }
example_agent_chatgpt.ipynb ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 21,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os\n",
10
+ "from typing import Dict, List, Optional, TypedDict, Union\n",
11
+ "from langgraph.graph import Graph\n",
12
+ "from langchain_core.messages import HumanMessage, AIMessage\n",
13
+ "from pydantic import BaseModel, Field\n",
14
+ "import json\n",
15
+ "from typing import Annotated\n",
16
+ "from langchain_google_genai import ChatGoogleGenerativeAI\n"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": 16,
22
+ "metadata": {},
23
+ "outputs": [],
24
+ "source": [
25
+ "GOOGLE_API_KEY=\"AIzaSyA8eIxHBqeBWEP1g3t8bpvLxNaH5Lquemo\"\n",
26
+ "os.environ[\"GOOGLE_API_KEY\"] = GOOGLE_API_KEY"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": 17,
32
+ "metadata": {},
33
+ "outputs": [],
34
+ "source": [
35
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-1.5-flash-latest\")"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "code",
40
+ "execution_count": 22,
41
+ "metadata": {},
42
+ "outputs": [],
43
+ "source": [
44
+ "# Define our data model\n",
45
+ "class Data(BaseModel):\n",
46
+ " name: str = Field(description=\"name\")\n",
47
+ " age: int = Field(description=\"age\")\n",
48
+ " hobby: Optional[List[str]] = Field(description=\"A list of hobbies.\")\n",
49
+ "\n",
50
+ "# Define state for the graph\n",
51
+ "class GraphState(TypedDict):\n",
52
+ " messages: List[Union[HumanMessage, AIMessage]]\n",
53
+ " collected_data: Dict\n",
54
+ " current_field: Optional[str]"
55
+ ]
56
+ },
57
+ {
58
+ "cell_type": "code",
59
+ "execution_count": 26,
60
+ "metadata": {},
61
+ "outputs": [],
62
+ "source": [
63
+ "# Function to analyze messages and extract information\n",
64
+ "def extract_info(state: GraphState) -> Dict:\n",
65
+ " # Get the last user message\n",
66
+ " last_message = state[\"messages\"][-2].content if len(state[\"messages\"]) > 1 else \"\"\n",
67
+ " \n",
68
+ " # Prepare prompt for information extraction\n",
69
+ " extraction_prompt = f\"\"\"\n",
70
+ " Extract the following information from the user's message: '{last_message}'\n",
71
+ " If the information is present, format it as JSON matching these fields:\n",
72
+ " - name: person's name\n",
73
+ " - age: person's age (as integer)\n",
74
+ " - hobby: list of hobbies\n",
75
+ " \n",
76
+ " Only include fields that are clearly mentioned. Return 'null' for missing fields.\n",
77
+ " \"\"\"\n",
78
+ " \n",
79
+ " # Extract information using LLM\n",
80
+ " response = llm.invoke([HumanMessage(content=extraction_prompt)])\n",
81
+ " try:\n",
82
+ " extracted_info = json.loads(response.content)\n",
83
+ " # Update collected data with any new information\n",
84
+ " for key, value in extracted_info.items():\n",
85
+ " if value is not None:\n",
86
+ " state[\"collected_data\"][key] = value\n",
87
+ " except:\n",
88
+ " pass\n",
89
+ " \n",
90
+ " return {\"messages\": state[\"messages\"], \"collected_data\": state[\"collected_data\"], \"current_field\": state[\"current_field\"]}\n",
91
+ "\n",
92
+ "# Function to determine next field\n",
93
+ "def determine_next_field(state: Dict) -> Dict:\n",
94
+ " required_fields = {\"name\", \"age\"}\n",
95
+ " collected_fields = set(state[\"collected_data\"].keys())\n",
96
+ " missing_fields = required_fields - collected_fields\n",
97
+ " \n",
98
+ " if missing_fields:\n",
99
+ " state[\"current_field\"] = next(iter(missing_fields))\n",
100
+ " else:\n",
101
+ " state[\"current_field\"] = \"hobby\" if \"hobby\" not in state[\"collected_data\"] else None\n",
102
+ " \n",
103
+ " return state\n",
104
+ "\n",
105
+ "# Function to generate response\n",
106
+ "def generate_response(state: Dict) -> Dict:\n",
107
+ " if state[\"current_field\"] is None:\n",
108
+ " # All information collected\n",
109
+ " data = Data(**state[\"collected_data\"])\n",
110
+ " response = f\"Thank you! I've collected all the information:\\n{data.model_dump_json(indent=2)}\"\n",
111
+ " else:\n",
112
+ " # Ask for specific field\n",
113
+ " field_descriptions = {\n",
114
+ " \"name\": \"your name\",\n",
115
+ " \"age\": \"your age\",\n",
116
+ " \"hobby\": \"any hobbies you have\"\n",
117
+ " }\n",
118
+ " response = f\"Could you please tell me {field_descriptions[state['current_field']]}?\"\n",
119
+ " \n",
120
+ " state[\"messages\"].append(AIMessage(content=response))\n",
121
+ " return state\n",
122
+ "\n",
123
+ "# Create the graph\n",
124
+ "def create_chat_graph() -> Graph:\n",
125
+ " workflow = Graph()\n",
126
+ " \n",
127
+ " # Define the graph edges\n",
128
+ " workflow.add_node(\"extract_info\", extract_info)\n",
129
+ " workflow.add_node(\"determine_next_field\", determine_next_field)\n",
130
+ " workflow.add_node(\"generate_response\", generate_response)\n",
131
+ " \n",
132
+ " workflow.add_edge(\"extract_info\", \"determine_next_field\")\n",
133
+ " workflow.add_edge(\"determine_next_field\", \"generate_response\")\n",
134
+ " \n",
135
+ " # Set the entry point\n",
136
+ " workflow.set_entry_point(\"extract_info\")\n",
137
+ " workflow.set_finish_point(\"generate_response\")\n",
138
+ " \n",
139
+ " return workflow"
140
+ ]
141
+ },
142
+ {
143
+ "cell_type": "code",
144
+ "execution_count": 27,
145
+ "metadata": {},
146
+ "outputs": [
147
+ {
148
+ "name": "stdout",
149
+ "output_type": "stream",
150
+ "text": [
151
+ "Assistant: Hi! I'd like to collect some information from you.\n"
152
+ ]
153
+ },
154
+ {
155
+ "ename": "KeyError",
156
+ "evalue": "'generate_response'",
157
+ "output_type": "error",
158
+ "traceback": [
159
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
160
+ "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
161
+ "Cell \u001b[0;32mIn[27], line 34\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[38;5;28;01mbreak\u001b[39;00m\n\u001b[1;32m 33\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;18m__name__\u001b[39m \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m__main__\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[0;32m---> 34\u001b[0m run_chat()\n",
162
+ "Cell \u001b[0;32mIn[27], line 27\u001b[0m, in \u001b[0;36mrun_chat\u001b[0;34m()\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[38;5;66;03m# Run the graph and update state\u001b[39;00m\n\u001b[1;32m 26\u001b[0m new_state \u001b[38;5;241m=\u001b[39m app\u001b[38;5;241m.\u001b[39minvoke(state)\n\u001b[0;32m---> 27\u001b[0m state \u001b[38;5;241m=\u001b[39m new_state[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgenerate_response\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;66;03m# Get the final state from the last node\u001b[39;00m\n\u001b[1;32m 29\u001b[0m \u001b[38;5;66;03m# Check if all information is collected\u001b[39;00m\n\u001b[1;32m 30\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m state[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcurrent_field\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(state[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcollected_data\u001b[39m\u001b[38;5;124m\"\u001b[39m]) \u001b[38;5;241m>\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m2\u001b[39m: \u001b[38;5;66;03m# At least name and age collected\u001b[39;00m\n",
163
+ "\u001b[0;31mKeyError\u001b[0m: 'generate_response'"
164
+ ]
165
+ }
166
+ ],
167
+ "source": [
168
+ "# Example usage\n",
169
+ "def run_chat():\n",
170
+ " graph = create_chat_graph()\n",
171
+ " app = graph.compile()\n",
172
+ " \n",
173
+ " # Initialize state\n",
174
+ " state = {\n",
175
+ " \"messages\": [AIMessage(content=\"Hi! I'd like to collect some information from you.\")],\n",
176
+ " \"collected_data\": {},\n",
177
+ " \"current_field\": None\n",
178
+ " }\n",
179
+ " \n",
180
+ " while True:\n",
181
+ " # Print last message\n",
182
+ " print(\"Assistant:\", state[\"messages\"][-1].content)\n",
183
+ " \n",
184
+ " # Get user input\n",
185
+ " user_input = input(\"User: \")\n",
186
+ " if user_input.lower() in ['quit', 'exit']:\n",
187
+ " break\n",
188
+ " \n",
189
+ " # Update state with user message\n",
190
+ " state[\"messages\"].append(HumanMessage(content=user_input))\n",
191
+ " \n",
192
+ " # Run the graph and update state\n",
193
+ " new_state = app.invoke(state)\n",
194
+ " state = new_state[\"generate_response\"] # Get the final state from the last node\n",
195
+ " \n",
196
+ " # Check if all information is collected\n",
197
+ " if state[\"current_field\"] is None and len(state[\"collected_data\"]) >= 2: # At least name and age collected\n",
198
+ " break\n",
199
+ "\n",
200
+ "if __name__ == \"__main__\":\n",
201
+ " run_chat()"
202
+ ]
203
+ },
204
+ {
205
+ "cell_type": "code",
206
+ "execution_count": null,
207
+ "metadata": {},
208
+ "outputs": [],
209
+ "source": []
210
+ }
211
+ ],
212
+ "metadata": {
213
+ "kernelspec": {
214
+ "display_name": "paintrekbot",
215
+ "language": "python",
216
+ "name": "python3"
217
+ },
218
+ "language_info": {
219
+ "codemirror_mode": {
220
+ "name": "ipython",
221
+ "version": 3
222
+ },
223
+ "file_extension": ".py",
224
+ "mimetype": "text/x-python",
225
+ "name": "python",
226
+ "nbconvert_exporter": "python",
227
+ "pygments_lexer": "ipython3",
228
+ "version": "3.12.8"
229
+ }
230
+ },
231
+ "nbformat": 4,
232
+ "nbformat_minor": 2
233
+ }
modules/__pycache__/job.cpython-312.pyc ADDED
Binary file (2.31 kB). View file
 
modules/__pycache__/resume.cpython-312.pyc ADDED
Binary file (4.58 kB). View file
 
modules/job.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ from pydantic import BaseModel, Field
3
+
4
+ class Job(BaseModel):
5
+ title: str = Field(description="Job title or position.")
6
+ company: str = Field(description="The company name.")
7
+ location: Optional[str] = Field(description="Location of the job.")
8
+ salary: Optional[str] = Field(description="Salary range for the job.")
9
+ description: str = Field(description="Detailed job description.")
10
+ responsibilities: List[str] = Field(description="List of job responsibilities.")
11
+ benefits: Optional[List[str]] = Field(description="List of job benefits.")
12
+ employment_type: Optional[str] = Field(description="Type of employment (e.g., full-time, part-time).")
13
+ posted_date: Optional[str] = Field(description="Date when the job was posted.")
14
+
15
+ @classmethod
16
+ def mock(cls):
17
+ return cls(
18
+ title='Software Engineer',
19
+ company='Tech Corp',
20
+ location='San Francisco, CA',
21
+ salary='$100,000 - $120,000',
22
+ description='We are looking for a skilled Software Engineer to join our team.',
23
+ requirements=[
24
+ 'Bachelor\'s degree in Computer Science or related field',
25
+ '3+ years of experience in software development',
26
+ 'Proficiency in Python and JavaScript',
27
+ 'Experience with Django and React',
28
+ 'Strong problem-solving skills'
29
+ ],
30
+ responsibilities=[
31
+ 'Develop and maintain web applications',
32
+ 'Collaborate with cross-functional teams',
33
+ 'Write clean, scalable, and efficient code',
34
+ 'Participate in code reviews',
35
+ 'Troubleshoot and debug applications'
36
+ ],
37
+ benefits=[
38
+ 'Health insurance',
39
+ '401(k) matching',
40
+ 'Paid time off',
41
+ 'Flexible working hours'
42
+ ],
43
+ employment_type='Full-time',
44
+ posted_date='2024-10-01'
45
+ )
modules/resume.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ from pydantic import BaseModel, Field, validator
3
+
4
+ class WorkExperience(BaseModel):
5
+ job_title: str = Field(description="Job title or position.")
6
+ company: str = Field(description="The company name.")
7
+ experience: int = Field(description="Years of experience in the job.")
8
+ responsibilities: List[str] = Field(description="List of responsibilities in the job.")
9
+
10
+ class Education(BaseModel):
11
+ degree: str = Field(description="Degree obtained.")
12
+ school: str = Field(description="The university name.")
13
+ major: str = Field(description="Major subject.")
14
+ year: Optional[int] = Field(description="Year of graduation.")
15
+
16
+ @validator('year', pre=True, always=True)
17
+ def set_year(cls, v):
18
+ if v is None:
19
+ return 0
20
+ return v
21
+
22
+ class Resume(BaseModel):
23
+ """Structured resume data."""
24
+
25
+ name: str = Field(description="Name of the person")
26
+ professional_summary: str = Field(description="Professional summary of the person.")
27
+ work_experience: List[WorkExperience] = Field(description="List of work experiences held by the person.")
28
+ education: List[Education] = Field(description="List of educational qualifications of the person.")
29
+ skills: List[str] = Field(description="List of skills relevant to the jobs.")
30
+
31
+ @classmethod
32
+ def mock(cls):
33
+ return cls(
34
+ name='Jeff',
35
+ professional_summary='Innovative software engineer with 8+ years of experience in the tech industry. Senior Developer at Company X, Freelance Software Architect, and Junior Developer at Company Y. Proficient in developing scalable applications, optimizing system performance, and leading cross-functional teams. Fluent in English and Spanish.',
36
+ work_experience=[
37
+ WorkExperience(
38
+ job_title='Senior Developer',
39
+ company='Company X',
40
+ experience=5,
41
+ responsibilities=[
42
+ 'Led the development of scalable web applications',
43
+ 'Optimized system performance and reduced server costs',
44
+ 'Mentored junior developers and conducted code reviews',
45
+ 'Collaborated with product managers to define project requirements',
46
+ 'Implemented CI/CD pipelines to streamline deployments',
47
+ 'Developed RESTful APIs for mobile and web applications',
48
+ 'Ensured application security and compliance with industry standards'
49
+ ]
50
+ ),
51
+ WorkExperience(
52
+ job_title='Freelance Software Architect',
53
+ company='Independent Consultant',
54
+ experience=2,
55
+ responsibilities=[
56
+ 'Designed software architecture for various clients',
57
+ 'Provided technical consultancy and project management',
58
+ 'Developed custom software solutions to meet client needs',
59
+ 'Conducted system analysis and performance tuning',
60
+ 'Integrated third-party services and APIs',
61
+ 'Created technical documentation and user manuals'
62
+ ]
63
+ ),
64
+ WorkExperience(
65
+ job_title='Junior Developer',
66
+ company='Company Y',
67
+ experience=1,
68
+ responsibilities=[
69
+ 'Assisted in the development of web applications',
70
+ 'Performed bug fixes and code maintenance',
71
+ 'Collaborated with senior developers on project tasks',
72
+ 'Participated in daily stand-ups and sprint planning',
73
+ 'Wrote unit tests to ensure code quality',
74
+ 'Contributed to open-source projects'
75
+ ]
76
+ )
77
+ ],
78
+ education=[
79
+ Education(
80
+ degree='B.Sc. Computer Science',
81
+ school='X University',
82
+ major='Computer Science',
83
+ year=1999
84
+ )
85
+ ],
86
+ skills=[
87
+ 'Software Architecture',
88
+ 'System Optimization',
89
+ 'Team Mentorship',
90
+ 'Project Management',
91
+ 'API Development',
92
+ 'Continuous Integration/Continuous Deployment',
93
+ 'Bilingual'
94
+ ]
95
+ )
paintrek-chat-v1.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohappyeyeballs==2.4.4
2
+ aiohttp==3.11.11
3
+ aiosignal==1.3.2
4
+ annotated-types==0.7.0
5
+ anyio==4.8.0
6
+ asttokens @ file:///opt/conda/conda-bld/asttokens_1646925590279/work
7
+ attrs==25.1.0
8
+ Bottleneck @ file:///croot/bottleneck_1731058641041/work
9
+ Brotli @ file:///croot/brotli-split_1736182456865/work
10
+ cachetools==5.5.1
11
+ certifi==2024.12.14
12
+ charset-normalizer==3.4.1
13
+ comm @ file:///croot/comm_1709322850197/work
14
+ contourpy @ file:///croot/contourpy_1732540045555/work
15
+ cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
16
+ debugpy @ file:///croot/debugpy_1736267418885/work
17
+ decorator @ file:///opt/conda/conda-bld/decorator_1643638310831/work
18
+ distro==1.9.0
19
+ executing @ file:///opt/conda/conda-bld/executing_1646925071911/work
20
+ filetype==1.2.0
21
+ fonttools @ file:///croot/fonttools_1737039080035/work
22
+ frozenlist==1.5.0
23
+ google-ai-generativelanguage==0.6.15
24
+ google-api-core==2.24.1
25
+ google-api-python-client==2.160.0
26
+ google-auth==2.38.0
27
+ google-auth-httplib2==0.2.0
28
+ google-generativeai==0.8.4
29
+ googleapis-common-protos==1.66.0
30
+ greenlet==3.1.1
31
+ grpcio==1.70.0
32
+ grpcio-status==1.70.0
33
+ h11==0.14.0
34
+ httpcore==1.0.7
35
+ httplib2==0.22.0
36
+ httpx==0.28.1
37
+ idna==3.10
38
+ ipykernel @ file:///croot/ipykernel_1737660677549/work
39
+ ipython @ file:///croot/ipython_1734548052611/work
40
+ jedi @ file:///croot/jedi_1733987392413/work
41
+ jiter==0.8.2
42
+ jsonpatch==1.33
43
+ jsonpointer==3.0.0
44
+ jupyter_client @ file:///croot/jupyter_client_1737570961872/work
45
+ jupyter_core @ file:///croot/jupyter_core_1718818295206/work
46
+ kiwisolver @ file:///croot/kiwisolver_1737039087198/work
47
+ langchain==0.3.17
48
+ langchain-core==0.3.33
49
+ langchain-google-genai==2.0.9
50
+ langchain-ollama==0.2.3
51
+ langchain-openai==0.3.3
52
+ langchain-text-splitters==0.3.5
53
+ langgraph==0.2.68
54
+ langgraph-checkpoint==2.0.10
55
+ langgraph-sdk==0.1.51
56
+ langsmith==0.3.3
57
+ matplotlib==3.10.0
58
+ matplotlib-inline @ file:///work/perseverance-python-buildout/croot/matplotlib-inline_1698864771271/work
59
+ mkl-service==2.4.0
60
+ mkl_fft @ file:///io/mkl313/mkl_fft_1730824109137/work
61
+ mkl_random @ file:///io/mkl313/mkl_random_1730823916628/work
62
+ msgpack==1.1.0
63
+ multidict==6.1.0
64
+ nest-asyncio @ file:///croot/nest-asyncio_1708532673751/work
65
+ numexpr @ file:///croot/numexpr_1730215937391/work
66
+ numpy @ file:///croot/numpy_and_numpy_base_1708638617955/work/dist/numpy-1.26.4-cp312-cp312-linux_x86_64.whl#sha256=1d700f51d8b4fa684d858c9e3b56b1656bc5c82b6b79ff08d4e3b491c430059f
67
+ ollama==0.4.7
68
+ openai==1.60.2
69
+ orjson==3.10.15
70
+ packaging @ file:///croot/packaging_1734472117206/work
71
+ pandas @ file:///croot/pandas_1732735089971/work/dist/pandas-2.2.3-cp312-cp312-linux_x86_64.whl#sha256=57b66702d418720ec8483f7c4ec7c08d41815316ad7ce09d5b7bbc34eefcfdfd
72
+ parso @ file:///croot/parso_1733963305961/work
73
+ pexpect @ file:///tmp/build/80754af9/pexpect_1605563209008/work
74
+ pillow @ file:///croot/pillow_1738010226202/work
75
+ platformdirs @ file:///work/perseverance-python-buildout/croot/platformdirs_1701732573265/work
76
+ prompt-toolkit @ file:///croot/prompt-toolkit_1704404351921/work
77
+ propcache==0.2.1
78
+ proto-plus==1.26.0
79
+ protobuf==5.29.3
80
+ psutil @ file:///croot/psutil_1736367091698/work
81
+ ptyprocess @ file:///tmp/build/80754af9/ptyprocess_1609355006118/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
82
+ pure-eval @ file:///opt/conda/conda-bld/pure_eval_1646925070566/work
83
+ pyasn1==0.6.1
84
+ pyasn1_modules==0.4.1
85
+ pydantic==2.10.6
86
+ pydantic_core==2.27.2
87
+ Pygments @ file:///work/perseverance-python-buildout/croot/pygments_1698846270603/work
88
+ pyparsing==3.2.1
89
+ python-dateutil @ file:///croot/python-dateutil_1716495738603/work
90
+ python-dotenv==1.0.1
91
+ pytz @ file:///croot/pytz_1713974312559/work
92
+ PyYAML==6.0.2
93
+ pyzmq @ file:///croot/pyzmq_1734687138743/work
94
+ regex==2024.11.6
95
+ requests==2.32.3
96
+ requests-toolbelt==1.0.0
97
+ rsa==4.9
98
+ seaborn @ file:///croot/seaborn_1718302919398/work
99
+ setuptools==75.8.0
100
+ six @ file:///tmp/build/80754af9/six_1644875935023/work
101
+ sniffio==1.3.1
102
+ SQLAlchemy==2.0.37
103
+ stack-data @ file:///opt/conda/conda-bld/stack_data_1646927590127/work
104
+ tenacity==9.0.0
105
+ tiktoken==0.8.0
106
+ tornado @ file:///croot/tornado_1733960490606/work
107
+ tqdm==4.67.1
108
+ traitlets @ file:///croot/traitlets_1718227057033/work
109
+ typing_extensions==4.12.2
110
+ tzdata @ file:///croot/python-tzdata_1690578112552/work
111
+ unicodedata2 @ file:///croot/unicodedata2_1736541023050/work
112
+ uritemplate==4.1.1
113
+ urllib3==2.3.0
114
+ wcwidth @ file:///Users/ktietz/demo/mc3/conda-bld/wcwidth_1629357192024/work
115
+ wheel==0.45.1
116
+ yarl==1.18.3
117
+ zstandard==0.23.0
tools.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Literal
2
+ from langchain_core.tools import tool
3
+
4
+ from modules.job import Job
5
+ from modules.resume import Resume
6
+
7
+ def process_job() -> Job:
8
+ """Process job data."""
9
+ job = Job.mock()
10
+ return job
11
+
12
+ def process_resume() -> Resume:
13
+ """Process resume data."""
14
+ resume = Resume.mock()
15
+ return resume
16
+
17
+ @tool
18
+ def get_job(field: Optional[Literal['title', 'company', 'location', 'salary', 'description', 'responsibilities', 'benefits', 'employment_type', 'posted_date']] = None) -> str:
19
+ """Get job data."""
20
+ job = process_job()
21
+ if field:
22
+ return getattr(job, field)
23
+ return job.dict()
24
+
25
+ @tool
26
+ def get_resume(field: Optional[Literal['name', 'professional_summary', 'work_experience', 'education', 'skills']] = None) -> str:
27
+ """Get resume data."""
28
+ resume = process_resume()
29
+ if field:
30
+ return getattr(resume, field)
31
+ return resume.dict()