File size: 4,625 Bytes
eb3b209
 
c801ad7
fbd16a2
 
0dd81f9
fbd16a2
 
eb3b209
0dd81f9
 
 
 
 
579b38c
0dd81f9
579b38c
0dd81f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb3b209
 
0dd81f9
309b902
eb3b209
309b902
9e815ad
309b902
eb3b209
309b902
9e815ad
eb3b209
c801ad7
eb3b209
0dd81f9
eb3b209
fbd16a2
 
 
0dd81f9
 
 
eb3b209
0dd81f9
 
eb3b209
fbd16a2
eb3b209
fbd16a2
eb3b209
 
3d5e9b3
 
c801ad7
eb3b209
0dd81f9
 
 
 
 
 
 
 
 
 
 
 
3d5e9b3
0dd81f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
from smolagents.tools import Tool
import requests
from typing import List, Dict
import os
from dotenv import load_dotenv
from bs4 import BeautifulSoup

load_dotenv()

def scrape_indeed(position: str, location: str) -> List[Dict]:
    """
    Scrapes job postings from Indeed.
    """
    url = f"https://www.indeed.com/jobs?q=Odoo+{position}&l={location}"
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'}
    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        soup = BeautifulSoup(response.content, 'html.parser')
        jobs = []
        for div in soup.find_all('div', class_='jobsearch-SerpJobCard'):
            title_element = div.find('a', title=True)
            title = title_element.text if title_element else 'N/A'
            company_element = div.find('span', class_='company')
            company = company_element.text.strip() if company_element else 'N/A'
            location_element = div.find('div', class_='location')
            location = location_element.text if location_element else 'N/A'
            link = 'https://www.indeed.com' + div.find('a', href=True)['href'] if div.find('a', href=True) else 'N/A'
            jobs.append({
                "Title": title,
                "Company": company,
                "Location": location,
                "Link": link,
                "Source": "Indeed"
            })
        return jobs
    except requests.exceptions.RequestException as e:
        print(f"Indeed scraping failed: {e}")
        return []

class LinkedInJobSearchTool(Tool):
    name = "linkedin_job_search"
    description = "Searches for job postings on LinkedIn and Indeed based on job title, location, and work mode (remote, hybrid, in-office) for Odoo profiles."
    
    inputs = {
        "position": {"type": "string", "description": "Job title (e.g., Data Scientist)"},
        "location": {"type": "string", "description": "City or country (e.g., Germany)"},
        "work_mode": {"type": "string", "description": "remote, hybrid, in-office"}
    }
    
    output_type = "array"

    def forward(self, position: str, location: str, work_mode: str) -> List[Dict]:
        """
        Fetches job listings from LinkedIn and Indeed and returns structured JSON.
        """
        BRAVE_API_KEY = os.getenv("BRAVE_API_KEY")
        if not BRAVE_API_KEY:
            return [{"Error": "Brave API key not found in .env file."}]
        
        linkedin_results = []
        indeed_results = []

        # LinkedIn Job Search
        base_url = "https://api.brave.com/v1/jobs"
        params = {
            "q": f"Odoo {position} {work_mode} jobs",
            "location": location,
            "api_key": BRAVE_API_KEY
        }

        try:
            response = requests.get(base_url, params=params)
            response.raise_for_status()
            data = response.json()
            linkedin_jobs = data.get("jobs", [])
            
            if linkedin_jobs:
                for job in linkedin_jobs:
                    linkedin_results.append({
                        "Title": job['title'],
                        "Company": job.get('company', 'N/A'),
                        "Location": job.get('location', 'N/A'),
                        "Posted": job.get('posted_date', 'N/A'),
                        "Link": job.get('url', 'N/A'),
                        "Source": "LinkedIn"
                    })
        except requests.exceptions.RequestException as e:
            linkedin_results = [{"Error": f"LinkedIn Error: {str(e)}"}]

        # Indeed Job Search
        indeed_results = scrape_indeed(position, location)

        # Combine results, prioritizing LinkedIn
        combined_results = linkedin_results + indeed_results

        # Format the results
        formatted_results = ""
        if combined_results:
            for job in combined_results:
                formatted_results += f"Title: {job.get('Title', 'N/A')}\n"
                formatted_results += f"Company: {job.get('Company', 'N/A')}\n"
                formatted_results += f"Location: {job.get('Location', 'N/A')}\n"
                formatted_results += f"Posted: {job.get('Posted', 'N/A')}\n"
                formatted_results += f"Link: {job.get('Link', 'N/A')}\n"
                formatted_results += f"Source: {job.get('Source', 'N/A')}\n"
                formatted_results += "---\n"
        else:
            formatted_results = "No jobs found. Try different keywords."

        return [{"Results": formatted_results}]