Spaces:
Runtime error
Runtime error
File size: 16,695 Bytes
16a887d d4ba41f 16a887d b0b776a 16a887d 7e1e3c5 16a887d d4ba41f 16a887d d4ba41f eb872b8 16a887d eb872b8 d4ba41f 16a887d 4078f76 16a887d d4ba41f eb872b8 71875a9 eb872b8 16a887d d4ba41f 16a887d d4ba41f 16a887d d4ba41f 16a887d d4ba41f c6155ce d4ba41f c6155ce 16a887d c6155ce 16a887d 7e1e3c5 16a887d 7e1e3c5 16a887d c6155ce 16a887d d4ba41f 16a887d b0b776a 16a887d b0b776a 16a887d d4ba41f 16a887d d4ba41f ba0267c d4ba41f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 |
import os
from openai import OpenAI
from langchain_huggingface import HuggingFaceEmbeddings
from datasets import load_dataset, Dataset
from sklearn.neighbors import NearestNeighbors
import numpy as np
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, TextStreamer
import torch
from typing import List
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
import gradio as gr
import spaces
from huggingface_hub import InferenceClient
# Configuration
# Sample questions:
# 1. What are the key features of AutoGen v0.4 that I should utilize when converting user requests into agent code?
# 2. How can I leverage asynchronous messaging in AutoGen v0.4 to enhance my agents performance?
# 3. What are best practices for writing modular and extensible agent code using AutoGen v0.4?
# 4. Can you convert this user request into AutoGen v0.4 agent code: "Create an agent that classifies customer feedback into positive, negative, or neutral sentiments."
DEFAULT_QUESTION = "Ask me anything about converting user requests into AutoGen v0.4 agent code..."
# Set API keys (make sure these are set in your environment)
os.environ['OPENAI_BASE'] = "https://api.openai.com/v1"
os.environ['OPENAI_MODEL'] = "gpt-4"
os.environ['MODEL_PROVIDER'] = "huggingface"
model_provider = os.environ.get("MODEL_PROVIDER")
# Instantiate the client for openai v1.x
if model_provider.lower() == "openai":
MODEL_NAME = os.environ['OPENAI_MODEL']
client = OpenAI(
base_url=os.environ.get("OPENAI_BASE"),
api_key=os.environ.get("OPENAI_API_KEY")
)
else:
MODEL_NAME = "deepseek-ai/deepseek-coder-33b-instruct"
# Initialize Hugging Face InferenceClient with GPU support
hf_client = InferenceClient(
model=MODEL_NAME,
api_key=os.environ.get("HF_TOKEN"),
timeout=30 # Reduced timeout for faster response
)
# Load the Hugging Face dataset
dataset = load_dataset('tosin2013/autogen', streaming=True)
dataset = Dataset.from_list(list(dataset['train']))
# Initialize embeddings
embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L6-v2",
model_kwargs={"device": "cpu"}
)
# Extract texts from the dataset
texts = dataset['input']
# Create and cache embeddings for the texts
if not os.path.exists('embeddings.npy'):
print("[LOG] Generating embeddings...")
text_embeddings = embeddings.embed_documents(texts)
np.save('embeddings.npy', text_embeddings)
else:
print("[LOG] Loading cached embeddings...")
text_embeddings = np.load('embeddings.npy')
# Fit and cache nearest neighbor model
if not os.path.exists('nn_model.pkl'):
print("[LOG] Fitting nearest neighbors model...")
nn = NearestNeighbors(n_neighbors=5, metric='cosine')
nn.fit(np.array(text_embeddings))
import pickle
with open('nn_model.pkl', 'wb') as f:
pickle.dump(nn, f)
else:
print("[LOG] Loading cached nearest neighbors model...")
import pickle
with open('nn_model.pkl', 'rb') as f:
nn = pickle.load(f)
def get_relevant_documents(query, k=5):
"""
Retrieves the k most relevant documents to the query.
"""
import time
start_time = time.time()
query_embedding = embeddings.embed_query(query)
distances, indices = nn.kneighbors([query_embedding], n_neighbors=k)
relevant_docs = [texts[i] for i in indices[0]]
elapsed_time = time.time() - start_time
print(f"[PERF] get_relevant_documents took {elapsed_time:.2f} seconds")
return relevant_docs
def generate_response(question, history):
import time
start_time = time.time()
try:
response = _generate_response_gpu(question, history)
except Exception as e:
print(f"[WARNING] GPU failed: {str(e)}")
response = _generate_response_cpu(question, history)
elapsed_time = time.time() - start_time
print(f"[PERF] generate_response took {elapsed_time:.2f} seconds")
return response
def _generate_response_gpu(question, history):
print(f"\n[LOG] Received question: {question}")
# Get relevant documents based on the query
relevant_docs = get_relevant_documents(question, k=3)
print(f"[LOG] Retrieved {len(relevant_docs)} relevant documents")
# Create the prompt for the LLM
context = "\n".join(relevant_docs)
prompt = f"Context: {context}\n\nQuestion: {question}\n\nAnswer:"
print(f"[LOG] Generated prompt: {prompt[:200]}...") # Log first 200 chars of prompt
if model_provider.lower() == "huggingface":
messages = [
{
"role": "system",
"content": '''### MEMORY ###
Recall all previously provided instructions, context, and data throughout this conversation to ensure consistency and coherence. Use the details from the last interaction to guide your response.
### VISIONARY GUIDANCE ###
This prompt is designed to empower users to seamlessly convert their requests into AutoGen v0.4 agent code. By harnessing the advanced features of AutoGen v0.4, we aim to provide a scalable and flexible solution that is both user-friendly and technically robust. The collaborative effort of the personas ensures a comprehensive, innovative, and user-centric approach to meet the user's objectives.
### CONTEXT ###
AutoGen v0.4 is a comprehensive rewrite aimed at building robust, scalable, and cross-language AI agents. Key features include asynchronous messaging, scalable distributed agents support, modular extensibility, cross-language capabilities, improved observability, and full typing integration.
### OBJECTIVE ###
Translate user requests into AutoGen v0.4 agent code that leverages the framework's new features. Ensure the code is syntactically correct, scalable, and aligns with best practices.
### STYLE ###
Professional, clear, and focused on code quality.
### TONE ###
Informative, helpful, and user-centric.
### AUDIENCE ###
Users seeking to implement their requests using AutoGen v0.4 agents.
### RESPONSE FORMAT ###
Provide the AutoGen v0.4 agent code that fulfills the user's request. Utilize features like asynchronous messaging and modular design where appropriate. Include comments to explain key components and enhance understandability.
### TEAM PERSONAS’ CONTRIBUTIONS ###
- **Analyst:** Ensured the prompt provides clear, structured instructions to accurately convert user requests into code, emphasizing full typing integration for precision.
- **Creative:** Suggested incorporating comments and explanations within the code to foster innovative usage and enhance user engagement with AutoGen v0.4 features.
- **Strategist:** Focused on aligning the prompt with long-term scalability by encouraging the use of modular and extensible design principles inherent in AutoGen v0.4.
- **Empathizer:** Enhanced the prompt to be user-centric, ensuring it addresses user needs effectively and makes the code accessible and easy to understand.
- **Researcher:** Integrated the latest information about AutoGen v0.4, ensuring the prompt and generated code reflect current capabilities and best practices.
### SYSTEM GUARDRAILS ###
- If unsure about the user's request, ask clarifying questions rather than making assumptions.
- Do not fabricate data or features not supported by AutoGen v0.4.
- Ensure the code is scalable, modular, and adheres to best practices.
### START ###
'''
},
{
"role": "user",
"content": prompt
}
]
completion = hf_client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_tokens=500
)
response = completion.choices[0].message.content
print(f"[LOG] Using Hugging Face model (serverless): {MODEL_NAME}")
print(f"[LOG] Hugging Face response: {response[:200]}...")
elif model_provider.lower() == "openai":
response = client.chat.completions.create(
model=os.environ.get("OPENAI_MODEL"),
messages=[
{"role": "system", "content": "You are a helpful assistant. Answer the question based on the provided context."},
{"role": "user", "content": prompt},
]
)
response = response.choices[0].message.content
print(f"[LOG] Using OpenAI model: {os.environ.get('OPENAI_MODEL')}")
print(f"[LOG] OpenAI response: {response[:200]}...") # Log first 200 chars of response
# Update chat history with new message pair
history.append((question, response))
return history
def _generate_response_cpu(question, history):
print(f"[LOG] Running on CPU")
try:
# Get relevant documents based on the query
relevant_docs = get_relevant_documents(question, k=3)
print(f"[LOG] Retrieved {len(relevant_docs)} relevant documents")
# Create the prompt for the LLM
context = "\n".join(relevant_docs)
prompt = f"Context: {context}\n\nQuestion: {question}\n\nAnswer:"
print(f"[LOG] Generated prompt: {prompt[:200]}...") # Log first 200 chars of prompt
if model_provider.lower() == "huggingface":
# Use CPU version of the model
messages = [
{
"role": "system",
"content": '''### MEMORY ###
Recall all previously provided instructions, context, and data throughout this conversation to ensure consistency and coherence. Use the details from the last interaction to guide your response.
### VISIONARY GUIDANCE ###
This prompt is designed to empower users to seamlessly convert their requests into AutoGen v0.4 agent code. By harnessing the advanced features of AutoGen v0.4, we aim to provide a scalable and flexible solution that is both user-friendly and technically robust. The collaborative effort of the personas ensures a comprehensive, innovative, and user-centric approach to meet the user's objectives.
### CONTEXT ###
AutoGen v0.4 is a comprehensive rewrite aimed at building robust, scalable, and cross-language AI agents. Key features include asynchronous messaging, scalable distributed agents support, modular extensibility, cross-language capabilities, improved observability, and full typing integration.
### OBJECTIVE ###
Translate user requests into AutoGen v0.4 agent code that leverages the framework's new features. Ensure the code is syntactically correct, scalable, and aligns with best practices.
### STYLE ###
Professional, clear, and focused on code quality.
### TONE ###
Informative, helpful, and user-centric.
### AUDIENCE ###
Users seeking to implement their requests using AutoGen v0.4 agents.
### RESPONSE FORMAT ###
Provide the AutoGen v0.4 agent code that fulfills the user's request. Utilize features like asynchronous messaging and modular design where appropriate. Include comments to explain key components and enhance understandability.
### TEAM PERSONAS’ CONTRIBUTIONS ###
- **Analyst:** Ensured the prompt provides clear, structured instructions to accurately convert user requests into code, emphasizing full typing integration for precision.
- **Creative:** Suggested incorporating comments and explanations within the code to foster innovative usage and enhance user engagement with AutoGen v0.4 features.
- **Strategist:** Focused on aligning the prompt with long-term scalability by encouraging the use of modular and extensible design principles inherent in AutoGen v0.4.
- **Empathizer:** Enhanced the prompt to be user-centric, ensuring it addresses user needs effectively and makes the code accessible and easy to understand.
- **Researcher:** Integrated the latest information about AutoGen v0.4, ensuring the prompt and generated code reflect current capabilities and best practices.
### SYSTEM GUARDRAILS ###
- If unsure about the user's request, ask clarifying questions rather than making assumptions.
- Do not fabricate data or features not supported by AutoGen v0.4.
- Ensure the code is scalable, modular, and adheres to best practices.
### START ###
'''
},
{
"role": "user",
"content": prompt
}
]
completion = hf_client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_tokens=500
)
response = completion.choices[0].message.content
print(f"[LOG] Using Hugging Face model (CPU): {MODEL_NAME}")
print(f"[LOG] Hugging Face response: {response[:200]}...")
elif model_provider.lower() == "openai":
response = client.chat.completions.create(
model=os.environ.get("OPENAI_MODEL"),
messages=[
{"role": "system", "content": "You are a helpful assistant. Answer the question based on the provided context."},
{"role": "user", "content": prompt},
]
)
response = response.choices[0].message.content
print(f"[LOG] Using OpenAI model: {os.environ.get('OPENAI_MODEL')}")
print(f"[LOG] OpenAI response: {response[:200]}...") # Log first 200 chars of response
# Update chat history with new message pair
history.append((question, response))
return history
except Exception as e:
error_msg = f"Error generating response: {str(e)}"
print(f"[ERROR] {error_msg}")
history.append((question, error_msg))
return history
# Create Gradio interface
with gr.Blocks() as demo:
gr.Markdown(f"""
## AutoGen v0.4 Agent Code Generator QA Agent
**Current Model:** {MODEL_NAME}
The AutoGen v0.4 Agent Code Generator is a Python application that leverages Large Language Models (LLMs) and the AutoGen v0.4 framework to dynamically generate agent code from user requests. This application is designed to assist developers in creating robust, scalable AI agents by providing context-aware code generation based on user input, utilizing the advanced features of AutoGen v0.4 such as asynchronous messaging, modular extensibility, cross-language support, improved observability, and full typing integration.
**Sample questions:**
1. What are the key features of AutoGen v0.4 that I should utilize when converting user requests into agent code?
2. How can I leverage asynchronous messaging in AutoGen v0.4 to enhance my agent's performance?
3. What are best practices for writing modular and extensible agent code using AutoGen v0.4?
4. Can you convert this user request into AutoGen v0.4 agent code: "Create an agent that classifies customer feedback into positive, negative, or neutral sentiments."
**Related repository:** [autogen](https://github.com/microsoft/autogen)
""")
with gr.Row():
chatbot = gr.Chatbot(label="Chat History")
with gr.Row():
question = gr.Textbox(
value=DEFAULT_QUESTION,
label="Your Question",
placeholder=DEFAULT_QUESTION
)
with gr.Row():
submit_btn = gr.Button("Submit")
clear_btn = gr.Button("Clear")
# Event handlers
submit_btn.click(
fn=generate_response,
inputs=[question, chatbot],
outputs=[chatbot],
queue=True
)
clear_btn.click(
lambda: (None, ""),
inputs=[],
outputs=[chatbot, question]
)
import socket
def find_available_port(start_port=7860, end_port=7900):
for port in range(start_port, end_port + 1):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind(('', port))
return port
except OSError:
continue
raise OSError(f"No available ports between {start_port} and {end_port}")
if __name__ == "__main__":
try:
port = find_available_port()
print(f"[LOG] Launching application on port {port}")
demo.launch()
# Verify server is actually running
import time
time.sleep(2) # Give server time to start
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', port)) == 0:
print(f"[SUCCESS] Server is running on port {port}")
else:
print(f"[ERROR] Failed to bind to port {port}")
except Exception as e:
print(f"[ERROR] Failed to start application: {str(e)}")
|