acecalisto3 commited on
Commit
a9ef0b6
·
verified ·
1 Parent(s): 51fec96

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -55
app.py CHANGED
@@ -24,6 +24,14 @@ if 'available_clusters' not in st.session_state:
24
  st.session_state.available_clusters = []
25
  if 'current_project' not in st.session_state:
26
  st.session_state.current_project = None
 
 
 
 
 
 
 
 
27
 
28
  # --- Agent Class ---
29
  class AIAgent:
@@ -114,25 +122,19 @@ def chat_interface_with_agent(input_text, agent_name):
114
  if agent_prompt is None:
115
  return f"Agent {agent_name} not found."
116
 
117
- # Load the GPT-2 model which is compatible with AutoModelForCausalLM
118
- model_name = "gpt2"
119
  try:
120
- model = AutoModelForCausalLM.from_pretrained(model_name)
121
- tokenizer = AutoTokenizer.from_pretrained(model_name)
122
  except EnvironmentError as e:
123
  return f"Error loading model: {e}"
124
 
125
  # Combine the agent prompt with user input
126
  combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
127
-
128
- # Truncate input text to avoid exceeding the model's maximum length
129
- max_input_length = model.config.max_length
130
- input_ids = tokenizer.encode(combined_input, return_tensors="pt")
131
- if input_ids.shape[1] > max_input_length:
132
- input_ids = input_ids[:, :max_input_length]
133
 
134
- outputs = model.generate(input_ids, max_length=max_input_length, do_sample=True)
135
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
136
  return response
137
 
138
  def chat_interface_with_cluster(input_text, cluster_name):
@@ -140,11 +142,10 @@ def chat_interface_with_cluster(input_text, cluster_name):
140
  if agent_names is None:
141
  return f"Cluster {cluster_name} not found."
142
 
143
- # Load the GPT-2 model which is compatible with AutoModelForCausalLM
144
- model_name = "gpt2"
145
  try:
146
- model = AutoModelForCausalLM.from_pretrained(model_name)
147
- tokenizer = AutoTokenizer.from_pretrained(model_name)
148
  except EnvironmentError as e:
149
  return f"Error loading model: {e}"
150
 
@@ -154,14 +155,9 @@ def chat_interface_with_cluster(input_text, cluster_name):
154
  agent_prompt = load_agent_prompt(agent_name)
155
  combined_input += f"\n{agent_name}:\n{agent_prompt}\n"
156
 
157
- # Truncate input text to avoid exceeding the model's maximum length
158
- max_input_length = model.config.max_length
159
- input_ids = tokenizer.encode(combined_input, return_tensors="pt")
160
- if input_ids.shape[1] > max_input_length:
161
- input_ids = input_ids[:, :max_input_length]
162
-
163
- outputs = model.generate(input_ids, max_length=max_input_length, do_sample=True)
164
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
165
  return response
166
 
167
  # --- Code Editor ---
@@ -251,22 +247,20 @@ def sentiment_analysis(text):
251
  return result
252
 
253
  def translate_code(code, source_language, target_language):
254
- """Translates code from one programming language to another using OpenAI Codex."""
255
- # You might want to replace this with a Hugging Face translation model
256
- # for example, "Helsinki-NLP/opus-mt-en-fr"
257
- # Refer to Hugging Face documentation for model usage.
258
- prompt = f"Translate the following {source_language} code to {target_language}:\n\n{code}"
259
  try:
260
- # Use a Hugging Face translation model instead of OpenAI Codex
261
- # ...
262
- translated_code = "Translated code" # Replace with actual translation
263
- except Exception as e:
264
- translated_code = f"Error: {e}"
 
265
  return translated_code
266
 
267
  def generate_code(idea):
268
- """Generates code based on a given idea using the EleutherAI/gpt-neo-2.7B model."""
269
- model_name = "EleutherAI/gpt-neo-2.7B"
270
  try:
271
  model = AutoModelForCausalLM.from_pretrained(model_name)
272
  tokenizer = AutoTokenizer.from_pretrained(model_name)
@@ -449,14 +443,13 @@ def deploy_locally(build_dir):
449
  st.success(f"Project deployed locally!")
450
 
451
  # --- Streamlit App ---
452
- st.title("AI Agent Creator")
453
 
454
- # --- Sidebar Navigation ---
455
- st.sidebar.title("Navigation")
456
- app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
457
 
458
  # --- AI Agent Creator ---
459
- if app_mode == "AI Agent Creator":
460
  st.header("Create an AI Agent from Text")
461
 
462
  st.subheader("From Text")
@@ -480,7 +473,7 @@ if app_mode == "AI Agent Creator":
480
  st.session_state.available_clusters.append(cluster_name)
481
 
482
  # --- Tool Box ---
483
- elif app_mode == "Tool Box":
484
  st.header("Tool Box")
485
 
486
  # --- Workspace ---
@@ -497,8 +490,12 @@ elif app_mode == "Tool Box":
497
  agent_chat_input = st.text_area("Enter your message:")
498
  if st.button("Send"):
499
  if selected_agent_or_cluster in st.session_state.available_agents:
 
 
500
  agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent_or_cluster)
501
  elif selected_agent_or_cluster in st.session_state.available_clusters:
 
 
502
  agent_chat_response = chat_interface_with_cluster(agent_chat_input, selected_agent_or_cluster)
503
  else:
504
  agent_chat_response = "Invalid selection."
@@ -508,15 +505,22 @@ elif app_mode == "Tool Box":
508
  # --- Automate Build Process ---
509
  st.subheader("Automate Build Process")
510
  if st.button("Automate"):
511
- agent = AIAgent(selected_agent_or_cluster, "", []) # Load the agent without skills for now
512
- summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
513
- st.write("Autonomous Build Summary:")
514
- st.write(summary)
515
- st.write("Next Step:")
516
- st.write(next_step)
 
 
 
 
 
 
 
517
 
518
  # --- Workspace Chat App ---
519
- elif app_mode == "Workspace Chat App":
520
  st.header("Workspace Chat App")
521
 
522
  # --- Project Selection ---
@@ -530,8 +534,12 @@ elif app_mode == "Workspace Chat App":
530
  agent_chat_input = st.text_area("Enter your message:")
531
  if st.button("Send"):
532
  if selected_agent_or_cluster in st.session_state.available_agents:
 
 
533
  agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent_or_cluster)
534
  elif selected_agent_or_cluster in st.session_state.available_clusters:
 
 
535
  agent_chat_response = chat_interface_with_cluster(agent_chat_input, selected_agent_or_cluster)
536
  else:
537
  agent_chat_response = "Invalid selection."
@@ -610,6 +618,8 @@ elif app_mode == "Workspace Chat App":
610
  hf_token = st.text_input("Enter your Hugging Face token:")
611
  repo_name = st.text_input("Enter your Hugging Face Space repository name:")
612
  if st.button("Deploy to Hugging Face Spaces"):
 
 
613
  # Implement Hugging Face Spaces deployment logic here
614
  deploy_to_huggingface(build_dir, hf_token, repo_name)
615
  elif deployment_target == "Local":
@@ -619,9 +629,7 @@ elif app_mode == "Workspace Chat App":
619
  else:
620
  st.warning("Please select a project first.")
621
 
622
- # --- Run the Streamlit App ---
623
- if __name__ == "__main__":
624
- st.set_page_config(page_title="AI Agent Creator", page_icon="🤖")
625
- st.write("This is the AI Agent Creator application.")
626
- st.write("You can create AI agents and agent clusters, and use them to chat, generate code, and more.")
627
- st.write("You can also manage your project workspace, build and deploy your projects, and use AI tools.")
 
24
  st.session_state.available_clusters = []
25
  if 'current_project' not in st.session_state:
26
  st.session_state.current_project = None
27
+ if 'current_agent' not in st.session_state:
28
+ st.session_state.current_agent = None
29
+ if 'current_cluster' not in st.session_state:
30
+ st.session_state.current_cluster = None
31
+ if 'hf_token' not in st.session_state:
32
+ st.session_state.hf_token = None
33
+ if 'repo_name' not in st.session_state:
34
+ st.session_state.repo_name = None
35
 
36
  # --- Agent Class ---
37
  class AIAgent:
 
122
  if agent_prompt is None:
123
  return f"Agent {agent_name} not found."
124
 
125
+ # Use a more powerful language model (GPT-3 or similar) for better chat experience
126
+ model_name = "text-davinci-003" # Replace with your preferred GPT-3 model
127
  try:
128
+ model = transformers_pipeline("text-generation", model=model_name)
 
129
  except EnvironmentError as e:
130
  return f"Error loading model: {e}"
131
 
132
  # Combine the agent prompt with user input
133
  combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
 
 
 
 
 
 
134
 
135
+ # Generate response
136
+ response = model(combined_input, max_length=200, temperature=0.7, top_p=0.95, do_sample=True)[0]['generated_text']
137
+ response = response.split("Agent:")[1].strip() # Extract the agent's response
138
  return response
139
 
140
  def chat_interface_with_cluster(input_text, cluster_name):
 
142
  if agent_names is None:
143
  return f"Cluster {cluster_name} not found."
144
 
145
+ # Use a more powerful language model (GPT-3 or similar) for better chat experience
146
+ model_name = "text-davinci-003" # Replace with your preferred GPT-3 model
147
  try:
148
+ model = transformers_pipeline("text-generation", model=model_name)
 
149
  except EnvironmentError as e:
150
  return f"Error loading model: {e}"
151
 
 
155
  agent_prompt = load_agent_prompt(agent_name)
156
  combined_input += f"\n{agent_name}:\n{agent_prompt}\n"
157
 
158
+ # Generate response
159
+ response = model(combined_input, max_length=200, temperature=0.7, top_p=0.95, do_sample=True)[0]['generated_text']
160
+ response = response.split("User:")[1].strip() # Extract the agent's response
 
 
 
 
 
161
  return response
162
 
163
  # --- Code Editor ---
 
247
  return result
248
 
249
  def translate_code(code, source_language, target_language):
250
+ """Translates code from one programming language to another using a Hugging Face model."""
251
+ model_name = "Helsinki-NLP/opus-mt-en-fr" # Replace with your preferred translation model
 
 
 
252
  try:
253
+ translator = pipeline("translation", model=model_name)
254
+ except EnvironmentError as e:
255
+ return f"Error loading model: {e}"
256
+
257
+ # Translate code
258
+ translated_code = translator(code, target_lang=target_language)[0]['translation_text']
259
  return translated_code
260
 
261
  def generate_code(idea):
262
+ """Generates code based on a given idea using a Hugging Face model."""
263
+ model_name = "bigcode/starcoder" # Replace with your preferred code generation model
264
  try:
265
  model = AutoModelForCausalLM.from_pretrained(model_name)
266
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
443
  st.success(f"Project deployed locally!")
444
 
445
  # --- Streamlit App ---
446
+ st.set_page_config(page_title="AI Agent Creator", page_icon="🤖")
447
 
448
+ # --- Tabs for Navigation ---
449
+ tabs = st.tabs(["AI Agent Creator", "Tool Box", "Workspace Chat App"])
 
450
 
451
  # --- AI Agent Creator ---
452
+ with tabs[0]:
453
  st.header("Create an AI Agent from Text")
454
 
455
  st.subheader("From Text")
 
473
  st.session_state.available_clusters.append(cluster_name)
474
 
475
  # --- Tool Box ---
476
+ with tabs[1]:
477
  st.header("Tool Box")
478
 
479
  # --- Workspace ---
 
490
  agent_chat_input = st.text_area("Enter your message:")
491
  if st.button("Send"):
492
  if selected_agent_or_cluster in st.session_state.available_agents:
493
+ st.session_state.current_agent = selected_agent_or_cluster
494
+ st.session_state.current_cluster = None
495
  agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent_or_cluster)
496
  elif selected_agent_or_cluster in st.session_state.available_clusters:
497
+ st.session_state.current_agent = None
498
+ st.session_state.current_cluster = selected_agent_or_cluster
499
  agent_chat_response = chat_interface_with_cluster(agent_chat_input, selected_agent_or_cluster)
500
  else:
501
  agent_chat_response = "Invalid selection."
 
505
  # --- Automate Build Process ---
506
  st.subheader("Automate Build Process")
507
  if st.button("Automate"):
508
+ if st.session_state.current_agent:
509
+ agent = AIAgent(st.session_state.current_agent, "", []) # Load the agent without skills for now
510
+ summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
511
+ st.write("Autonomous Build Summary:")
512
+ st.write(summary)
513
+ st.write("Next Step:")
514
+ st.write(next_step)
515
+ elif st.session_state.current_cluster:
516
+ # Implement cluster-based automation logic here
517
+ # ...
518
+ st.warning("Cluster-based automation is not yet implemented.")
519
+ else:
520
+ st.warning("Please select an agent or cluster first.")
521
 
522
  # --- Workspace Chat App ---
523
+ with tabs[2]:
524
  st.header("Workspace Chat App")
525
 
526
  # --- Project Selection ---
 
534
  agent_chat_input = st.text_area("Enter your message:")
535
  if st.button("Send"):
536
  if selected_agent_or_cluster in st.session_state.available_agents:
537
+ st.session_state.current_agent = selected_agent_or_cluster
538
+ st.session_state.current_cluster = None
539
  agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent_or_cluster)
540
  elif selected_agent_or_cluster in st.session_state.available_clusters:
541
+ st.session_state.current_agent = None
542
+ st.session_state.current_cluster = selected_agent_or_cluster
543
  agent_chat_response = chat_interface_with_cluster(agent_chat_input, selected_agent_or_cluster)
544
  else:
545
  agent_chat_response = "Invalid selection."
 
618
  hf_token = st.text_input("Enter your Hugging Face token:")
619
  repo_name = st.text_input("Enter your Hugging Face Space repository name:")
620
  if st.button("Deploy to Hugging Face Spaces"):
621
+ st.session_state.hf_token = hf_token
622
+ st.session_state.repo_name = repo_name
623
  # Implement Hugging Face Spaces deployment logic here
624
  deploy_to_huggingface(build_dir, hf_token, repo_name)
625
  elif deployment_target == "Local":
 
629
  else:
630
  st.warning("Please select a project first.")
631
 
632
+ # --- Hugging Face Space Deployment (After Building) ---
633
+ if st.session_state.hf_token and st.session_state.repo_name:
634
+ st.write("Deploying to Hugging Face Spaces...")
635
+ deploy_to_huggingface(build_dir, st.session_state.hf_token, st.session_state.repo_name)