Spaces:
Running
Running
import subprocess | |
import os | |
from io import StringIO | |
import sys | |
import black | |
from pylint import lint | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
# Initialize chat_history in the session state | |
if 'chat_history' not in st.session_state: | |
st.session_state['chat_history'] = [] | |
# Access and update chat_history | |
chat_history = st.session_state['chat_history'] | |
chat_history.append("New message") | |
# Display chat history | |
st.write("Chat History:") | |
for message in chat_history: | |
st.write(message) | |
# Global state to manage communication between Tool Box and Workspace Chat App | |
if 'workspace_projects' not in st.session_state: | |
st.session_state.workspace_projects = {} | |
if 'available_agents' not in st.session_state: | |
st.session_state.available_agents = [] | |
class AIAgent: | |
def __init__(self, name, description, skills): | |
self.name = name | |
self.description = description | |
self.skills = skills | |
def create_agent_prompt(self): | |
skills_str = '\n'.join([f"* {skill}" for skill in self.skills]) | |
agent_prompt = f""" | |
I am an AI agent named {self.name}, designed to assist developers with their projects. | |
My expertise lies in the following areas: | |
{skills_str} | |
I am here to help you build, deploy, and improve your applications. | |
Feel free to ask me any questions or present me with any challenges you encounter. | |
I will do my best to provide helpful and insightful responses. | |
""" | |
return agent_prompt | |
def autonomous_build(self, chat_history, workspace_projects): | |
""" | |
Autonomous build logic that continues based on the state of chat history and workspace projects. | |
""" | |
# Example logic: Generate a summary of chat history and workspace state | |
summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history]) | |
summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()]) | |
# Example: Generate the next logical step in the project | |
next_step = "Based on the current state, the next logical step is to implement the main application logic." | |
return summary, next_step | |
def save_agent_to_file(agent): | |
"""Saves the agent's prompt to a file.""" | |
if not os.path.exists("agents"): | |
os.makedirs("agents") | |
file_path = os.path.join("agents", f"{agent.name}.txt") | |
with open(file_path, "w") as file: | |
file.write(agent.create_agent_prompt()) | |
st.session_state.available_agents.append(agent.name) | |
def load_agent_prompt(agent_name): | |
"""Loads an agent prompt from a file.""" | |
file_path = os.path.join("agents", f"{agent_name}.txt") | |
if os.path.exists(file_path): | |
with open(file_path, "r") as file: | |
agent_prompt = file.read() | |
return agent_prompt | |
else: | |
return None | |
def create_agent_from_text(name, text): | |
skills = text.split('\n') | |
agent = AIAgent(name, "AI agent created from text input.", skills) | |
save_agent_to_file(agent) | |
return agent.create_agent_prompt() | |
# Chat interface using a selected agent | |
def chat_interface_with_agent(input_text, agent_name): | |
agent_prompt = load_agent_prompt(agent_name) | |
if agent_prompt is None: | |
return f"Agent {agent_name} not found." | |
# Load the GPT-2 model which is compatible with AutoModelForCausalLM | |
model_name = "gpt2" | |
try: | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
except EnvironmentError as e: | |
return f"Error loading model: {e}" | |
# Combine the agent prompt with user input | |
combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:" | |
# Truncate input text to avoid exceeding the model's maximum length | |
max_input_length = max_input_length | |
input_ids = tokenizer.encode(combined_input, return_tensors="pt") | |
if input_ids.shape[1] > max_input_length: | |
input_ids = input_ids[:, :max_input_length] | |
outputs = model.generate(input_ids, max_length=max_input_length, do_sample=True) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response | |
# Define functions for each feature | |
# 1. Chat Interface | |
def chat_interface(input_text): | |
"""Handles user input in the chat interface. | |
Args: | |
input_text: User's input text. | |
Returns: | |
The chatbot's response. | |
""" | |
# Load the GPT-2 model which is compatible with AutoModelForCausalLM | |
model_name = "gpt2" | |
try: | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
except EnvironmentError as e: | |
return f"Error loading model: {e}" | |
# Truncate input text to avoid exceeding the model's maximum length | |
max_input_length = max_input_length | |
input_ids = tokenizer.encode(input_text, return_tensors="pt") | |
if input_ids.shape[1] > max_input_length: | |
input_ids = input_ids[:, :max_input_length] | |
outputs = model.generate(input_ids, max_length=max, do_sample=True) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response | |
# 2. Terminal | |
def terminal_interface(command, project_name=None): | |
"""Executes commands in the terminal. | |
Args: | |
command: User's command. | |
project_name: Name of the project workspace to add installed packages. | |
Returns: | |
The terminal output. | |
""" | |
# Execute command | |
try: | |
process = subprocess.run(command.split(), capture_output=True, text=True) | |
output = process.stdout | |
# If the command is to install a package, update the workspace | |
if "install" in command and project_name: | |
requirements_path = os.path.join("projects", project_name, "requirements.txt") | |
with open(requirements_path, "a") as req_file: | |
package_name = command.split()[-1] | |
req_file.write(f"{package_name}\n") | |
except Exception as e: | |
output = f"Error: {e}" | |
return output | |
# 3. Code Editor | |
def code_editor_interface(code): | |
"""Provides code completion, formatting, and linting in the code editor. | |
Args: | |
code: User's code. | |
Returns: | |
Formatted and linted code. | |
""" | |
# Format code using black | |
try: | |
formatted_code = black.format_str(code, mode=black.FileMode()) | |
except black.InvalidInput: | |
formatted_code = code # Keep original code if formatting fails | |
# Lint code using pylint | |
try: | |
pylint_output = StringIO() | |
sys.stdout = pylint_output | |
sys.stderr = pylint_output | |
lint.Run(['--from-stdin'], stdin=StringIO(formatted_code)) | |
sys.stdout = sys.__stdout__ | |
sys.stderr = sys.__stderr__ | |
lint_message = pylint_output.getvalue() | |
except Exception as e: | |
lint_message = f"Pylint error: {e}" | |
return formatted_code, lint_message | |
# 4. Workspace | |
def workspace_interface(project_name): | |
"""Manages projects, files, and resources in the workspace. | |
Args: | |
project_name: Name of the new project. | |
Returns: | |
Project creation status. | |
""" | |
project_path = os.path.join("projects", project_name) | |
# Create project directory | |
try: | |
os.makedirs(project_path) | |
requirements_path = os.path.join(project_path, "requirements.txt") | |
with open(requirements_path, "w") as req_file: | |
req_file.write("") # Initialize an empty requirements.txt file | |
status = f'Project "{project_name}" created successfully.' | |
st.session_state.workspace_projects[project_name] = {'files': []} | |
except FileExistsError: | |
status = f'Project "{project_name}" already exists.' | |
return status | |
def add_code_to_workspace(project_name, code, file_name): | |
"""Adds selected code files to the workspace. | |
Args: | |
project_name: Name of the project. | |
code: Code to be added. | |
file_name: Name of the file to be created. | |
Returns: | |
File creation status. | |
""" | |
project_path = os.path.join("projects", project_name) | |
file_path = os.path.join(project_path, file_name) | |
try: | |
with open(file_path, "w") as code_file: | |
code_file.write(code) | |
status = f'File "{file_name}" added to project "{project_name}" successfully.' | |
st.session_state.workspace_projects[project_name]['files'].append(file_name) | |
except Exception as e: | |
status = f"Error: {e}" | |
return status | |
# 5. AI-Infused Tools | |
# Define custom AI-powered tools using Hugging Face models | |
# Example: Text summarization tool | |
def summarize_text(text): | |
"""Summarizes a given text using a Hugging Face model. | |
Args: | |
text: Text to be summarized. | |
Returns: | |
Summarized text. | |
""" | |
# Load the summarization model | |
model_name = "facebook/bart-large-cnn" | |
try: | |
summarizer = pipeline("summarization", model=model_name) | |
except EnvironmentError as e: | |
return f"Error loading model: {e}" | |
# Truncate input text to avoid exceeding the model's maximum length | |
max_input_length = max_input_length | |
inputs = text | |
if len(text) > max_input_length: | |
inputs = text[:max_input_length] | |
# Generate summary | |
summary = summarizer(inputs, max_length=100, min_length=30, do_sample=False)[0][ | |
"summary_text" | |
] | |
return summary | |
# Example: Sentiment analysis tool | |
def sentiment_analysis(text): | |
"""Performs sentiment analysis on a given text using a Hugging Face model. | |
Args: | |
text: Text to be analyzed. | |
Returns: | |
Sentiment analysis result. | |
""" | |
# Load the sentiment analysis model | |
model_name = "distilbert-base-uncased-finetuned-sst-2-english" | |
try: | |
analyzer = pipeline("sentiment-analysis", model=model_name) | |
except EnvironmentError as e: | |
return f"Error loading model: {e}" | |
# Perform sentiment analysis | |
result = analyzer(text)[0] | |
return result | |
# Example: Text translation tool (code translation) | |
def translate_code(code, source_language, target_language): | |
"""Translates code from one programming language to another using OpenAI Codex. | |
Args: | |
code: Code to be translated. | |
source_language: The source programming language. | |
target_language: The target programming language. | |
Returns: | |
Translated code. | |
""" | |
# You might want to replace this with a Hugging Face translation model | |
# for example, "Helsinki-NLP/opus-mt-en-fr" | |
# Refer to Hugging Face documentation for model usage. | |
prompt = f"Translate the following {source_language} code to {target_language}:\n\n{code}" | |
try: | |
# Use a Hugging Face translation model instead of OpenAI Codex | |
# ... | |
translated_code = "Translated code" # Replace with actual translation | |
except Exception as e: | |
translated_code = f"Error: {e}" | |
return translated_code | |
# 6. Code Generation | |
def generate_code(idea): | |
"""Generates code based on a given idea using the EleutherAI/gpt-neo-2.7B model. | |
Args: | |
idea: The idea for the code to be generated. | |
Returns: | |
The generated code as a string. | |
""" | |
# Load the code generation model | |
model_name = "EleutherAI/gpt-neo-2.7B" | |
try: | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
except EnvironmentError as e: | |
return f"Error loading model: {e}" | |
# Generate the code | |
input_text = f""" | |
# Idea: {idea} | |
# Code: | |
""" | |
input_ids = tokenizer.encode(input_text, return_tensors="pt") | |
output_sequences = model.generate( | |
input_ids=input_ids, | |
max_length=max_length, | |
num_return_sequences=1, | |
no_repeat_ngram_size=2, | |
early_stopping=True, | |
temperature=0.7, # Adjust temperature for creativity | |
top_k=50, # Adjust top_k for diversity | |
) | |
generated_code = tokenizer.decode(output_sequences[0], skip_special_tokens=True) | |
# Remove the prompt and formatting | |
parts = generated_code.split("\n# Code:") | |
if len(parts) > 1: | |
generated_code = parts[1].strip() | |
else: | |
generated_code = generated_code.strip() | |
return generated_code | |
# 7. AI Personas Creator | |
def create_persona_from_text(text): | |
"""Creates an AI persona from the given text. | |
Args: | |
text: Text to be used for creating the persona. | |
Returns: | |
Persona prompt. | |
""" | |
persona_prompt = f""" | |
As an elite expert developer with the highest level of proficiency in Streamlit, Gradio, and Hugging Face, I possess a comprehensive understanding of these technologies and their applications in web development and deployment. My expertise encompasses the following areas: | |
Streamlit: | |
* In-depth knowledge of Streamlit's architecture, components, and customization options. | |
* Expertise in creating interactive and user-friendly dashboards and applications. | |
* Proficiency in integrating Streamlit with various data sources and machine learning models. | |
Gradio: | |
* Thorough understanding of Gradio's capabilities for building and deploying machine learning interfaces. | |
* Expertise in creating custom Gradio components and integrating them with Streamlit applications. | |
* Proficiency in using Gradio to deploy models from Hugging Face and other frameworks. | |
Hugging Face: | |
* Comprehensive knowledge of Hugging Face's model hub and Transformers library. | |
* Expertise in fine-tuning and deploying Hugging Face models for various NLP and computer vision tasks. | |
* Proficiency in using Hugging Face's Spaces platform for model deployment and sharing. | |
Deployment: | |
* In-depth understanding of best practices for deploying Streamlit and Gradio applications. | |
* Expertise in deploying models on cloud platforms such as AWS, Azure, and GCP. | |
* Proficiency in optimizing deployment configurations for performance and scalability. | |
Additional Skills: | |
* Strong programming skills in Python and JavaScript. | |
* Familiarity with Docker and containerization technologies. | |
* Excellent communication and problem-solving abilities. | |
I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications using Streamlit, Gradio, and Hugging Face. Please feel free to ask any questions or present any challenges you may encounter. | |
Example: | |
Task: | |
Develop a Streamlit application that allows users to generate text using a Hugging Face model. The application should include a Gradio component for user input and model prediction. | |
Solution: | |
import streamlit as st | |
import gradio as gr | |
from transformers import pipeline | |
# Create a Hugging Face pipeline | |
huggingface_model = pipeline("text-generation") | |
# Create a Streamlit app | |
st.title("Hugging Face Text Generation App") | |
# Define a Gradio component | |
demo = gr.Interface( | |
fn=huggingface_model, | |
inputs=gr.Textbox(lines=2), | |
outputs=gr.Textbox(lines=1), | |
) | |
# Display the Gradio component in the Streamlit app | |
st.write(demo) | |
""" | |
return persona_prompt | |
# Streamlit App | |
st.title("AI Agent Creator") | |
# Sidebar navigation | |
st.sidebar.title("Navigation") | |
app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"]) | |
if app_mode == "AI Agent Creator": | |
# AI Agent Creator | |
st.header("Create an AI Agent from Text") | |
st.subheader("From Text") | |
agent_name = st.text_input("Enter agent name:") | |
text_input = st.text_area("Enter skills (one per line):") | |
if st.button("Create Agent"): | |
agent_prompt = create_agent_from_text(agent_name, text_input) | |
st.success(f"Agent '{agent_name}' created and saved successfully.") | |
st.session_state.available_agents.append(agent_name) | |
elif app_mode == "Tool Box": | |
# Tool Box | |
for project, details in st.session_state.workspace_projects.items(): | |
st.write(f"Project: {project}") | |
for file in details['files']: | |
st.write(f" - {file}") | |
# Chat with AI Agents | |
st.subheader("Chat with AI Agents") | |
selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents) | |
agent_chat_input = st.text_area("Enter your message for the agent:") | |
if st.button("Send to Agent"): | |
agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent) | |
st.session_state.chat_history.append((agent_chat_input, agent_chat_response)) | |
st.write(f"{selected_agent}: {agent_chat_response}") | |
# Automate Build Process | |
st.subheader("Automate Build Process") | |
if st.button("Automate"): | |
agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now | |
summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects) | |
st.write("Autonomous Build Summary:") | |
st.write(summary) | |
st.write("Next Step:") | |
st.write(next_step) |