Spaces:
Sleeping
Sleeping
File size: 2,492 Bytes
51dc433 9b5b26a c19d193 0bbfd20 6aae614 9b5b26a 51dc433 0bbfd20 9b5b26a 51dc433 9b5b26a 51dc433 0bbfd20 51dc433 9b5b26a 0bbfd20 9b5b26a 8c01ffb 6aae614 e121372 51dc433 13d500a 8c01ffb 9b5b26a 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b 51dc433 0bbfd20 51dc433 8c01ffb 861422e 8fe992b 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
import os
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
@tool
def summarize_text(text: str, max_length: int = 130) -> str:
"""تلخيص النصوص باستخدام نموذج BART من Hugging Face
Args:
text: النص الأصلي المراد تلخيصه
max_length: الحد الأقصى لطول الملخص (افتراضي: 130 كلمة)
"""
try:
API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
headers = {"Authorization": f"Bearer {os.environ['HF_TOKEN']}"}
data = {
"inputs": text,
"parameters": {"max_length": max_length}
}
response = requests.post(API_URL, headers=headers, json=data)
response.raise_for_status()
summary = response.json()[0]['summary_text']
return f"الملخص: {summary}"
except Exception as e:
return f"خطأ في التلخيص: {str(e)}"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""أداة لجلب الوقت الحالي في منطقة زمنية محددة.
Args:
timezone: سلسلة نصية تمثل منطقة زمنية صالحة (مثال: 'America/New_York').
"""
try:
tz = pytz.timezone(timezone)
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[
final_answer,
DuckDuckGoSearchTool(),
image_generation_tool,
summarize_text,
get_current_time_in_timezone # تأكد من إضافتها هنا
],
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |