acecalisto3 commited on
Commit
7d66d34
·
verified ·
1 Parent(s): dd1c6bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +180 -0
app.py CHANGED
@@ -51,6 +51,59 @@ classifier = pipeline("text-classification", model="facebook/bart-large-mnli")
51
  model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1", use_auth_token=os.environ.get("huggingface_token"))
52
  tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1", use_auth_token=os.environ.get("huggingface_token"))
53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  # --- Utility Functions ---
55
  def install_and_import(package_name):
56
  """Installs a package using pip and imports it."""
@@ -282,6 +335,133 @@ inputs = tokenizer(prompt, return_tensors="pt")
282
  edited_code = tokenizer.decode(output[0], skip_special_tokens=True).split("
283
 
284
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285
  python\n")[1].split("\n
286
 
287
  st.success(f"Code edited successfully!\n{edited_code}")
 
51
  model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1", use_auth_token=os.environ.get("huggingface_token"))
52
  tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1", use_auth_token=os.environ.get("huggingface_token"))
53
 
54
+ import os
55
+ import subprocess
56
+ import streamlit as st
57
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
58
+ from langchain_community.llms import HuggingFaceHub
59
+ from langchain_community.embeddings import HuggingFaceHubEmbeddings
60
+ from langchain_community.document_loaders import PyPDFLoader
61
+ from langchain_community.vectorstores import FAISS
62
+ from langchain.chains import ConversationalRetrievalChain
63
+ from langchain.chains.question_answering import load_qa_chain
64
+ from llama_cpp import Llama, LlamaCppPythonProvider, LlamaCppAgent
65
+ from llama_cpp.llama_cpp_agent import get_messages_formatter_type, get_context_by_model
66
+ from io import StringIO
67
+ import tempfile
68
+
69
+ # --- Global Variables ---
70
+ CURRENT_PROJECT = {} # Store project data (code, packages, etc.)
71
+ MODEL_OPTIONS = {
72
+ "CodeQwen": "Qwen/CodeQwen1.5-7B-Chat-GGUF",
73
+ "Codestral": "bartowski/Codestral-22B-v0.1-GGUF",
74
+ "AutoCoder": "bartowski/AutoCoder-GGUF",
75
+ }
76
+ MODEL_FILENAMES = {
77
+ "CodeQwen": "codeqwen-1_5-7b-chat-q6_k.gguf",
78
+ "Codestral": "Codestral-22B-v0.1-Q6_K.gguf",
79
+ "AutoCoder": "AutoCoder-Q6_K.gguf",
80
+ }
81
+ HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
82
+ PROJECT_ROOT = "projects"
83
+ AGENT_DIRECTORY = "agents"
84
+
85
+ # Global state to manage communication between Tool Box and Workspace Chat App
86
+ if 'chat_history' not in st.session_state:
87
+ st.session_state.chat_history = []
88
+ if 'terminal_history' not in st.session_state:
89
+ st.session_state.terminal_history = []
90
+ if 'workspace_projects' not in st.session_state:
91
+ st.session_state.workspace_projects = {}
92
+ if 'available_agents' not in st.session_state:
93
+ st.session_state.available_agents = []
94
+ if 'current_state' not in st.session_state:
95
+ st.session_state.current_state = {
96
+ 'toolbox': {},
97
+ 'workspace_chat': {}
98
+ }
99
+
100
+ # --- Load NLP Pipelines ---
101
+ classifier = pipeline("text-classification", model="facebook/bart-large-mnli")
102
+
103
+ # --- Load the model and tokenizer ---
104
+ model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1", use_auth_token=os.environ.get("huggingface_token"))
105
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1", use_auth_token=os.environ.get("huggingface_token"))
106
+
107
  # --- Utility Functions ---
108
  def install_and_import(package_name):
109
  """Installs a package using pip and imports it."""
 
335
  edited_code = tokenizer.decode(output[0], skip_special_tokens=True).split("
336
 
337
 
338
+ python\n")[1].split("\n
339
+
340
+ st.success(f"Code edited successfully!\n{edited_code}")
341
+ update_project_data("code", edited_code)
342
+ code_area.value = edited_code
343
+ except Exception as e:
344
+ st.error(f"Error editing code: {e}")
345
+
346
+ # --- Prebuilt Tools ---
347
+ st.markdown("## Prebuilt Tools:")
348
+ with st.expander("Generate Code"):
349
+ code_input = st.text_area("Enter your code request:", key="code_input")
350
+ if st.button("Generate"):
351
+ code_output = generate_code_tool(code_input, chat_history)
352
+ st.markdown(code_output)
353
+
354
+ with st.expander("Analyze Code"):
355
+ code_input = st.text_area("Enter your code:", key="analyze_code_input")
356
+ if st.button("Analyze"):
357
+ analysis_output = analyze_code_tool(code_input, chat_history)
358
+ st.markdown(analysis_output)
359
+
360
+ # --- Additional Features ---
361
+ # Add features like:
362
+ # - Code editing
363
+ # - Integration with external APIs
364
+ # - Advanced AI agents for more complex tasks
365
+ # - User account management
366
+
367
+ # --- AI Agent Interaction ---
368
+ if USER_INTENT is None:
369
+ add_message("System", analyze_user_intent(input_text))
370
+ add_message("System", "What kind of mini-app do you have in mind?")
371
+ elif not MINI_APPS:
372
+ add_message("System", "Here are some ideas:")
373
+ for idea in generate_mini_app_ideas(input_text):
374
+ add_message("System", f"- {idea}")
375
+ add_message("System", "Which one would you like to build?")
376
+ elif CURRENT_APP["name"] is None:
377
+ selected_app = input_text
378
+ app_description = next((app for app in MINI_APPS if selected_app in app), None)
379
+ if app_description:
380
+ add_message("System", f"Generating code for {app_description}...")
381
+ code = generate_app_code(selected_app, app_description, "CodeQwen", history) # Use CodeQwen by default
382
+ add_message("System", f"
383
+
384
+
385
+ python\n{code}\n
386
+
387
+ add_message("System", "Code generated! What else can I do for you?")
388
+ update_project_data("code", code)
389
+ update_project_data("app_name", selected_app)
390
+ update_project_data("app_description", app_description)
391
+ else:
392
+ add_message("System", "Please choose from the provided mini-app ideas.")
393
+ else:
394
+ add_message("System", "You already have an app in progress. Do you want to start over?")
395
+
396
+ return history, dynamic_functions
397
+
398
+ # --- Prebuilt Tools ---
399
+ def generate_code_tool(input_text, history):
400
+ """Prebuilt tool for code generation."""
401
+ code = generate_app_code("MyTool", "A tool to do something", "CodeQwen", history) # Use CodeQwen by default
402
+ return f"
403
+
404
+
405
+ python\n{code}\n
406
+
407
+ def analyze_code_tool(input_text, history):
408
+ """Prebuilt tool for code analysis."""
409
+ agent = get_agent("Codestral")
410
+ analysis = agent.chat(input_text, history)
411
+ return analysis
412
+
413
+ # --- Streamlit Interface ---
414
+ st.title("AI4ME: Your Personal AI App Workshop")
415
+ st.markdown("## Let's build your dream app together! 🤖")
416
+
417
+ # --- Hugging Face Token Input ---
418
+ huggingface_token = st.text_input("Enter your Hugging Face Token", type="password", key="huggingface_token")
419
+ os.environ["huggingface_token"] = huggingface_token
420
+
421
+ # --- Chat Interface ---
422
+ chat_history = []
423
+ chat_input = st.text_input("Tell me your idea...", key="chat_input")
424
+ if chat_input:
425
+ chat_history, dynamic_functions = handle_chat(chat_input, chat_history)
426
+ for sender, message in chat_history:
427
+ st.markdown(f"**{sender}:** {message}")
428
+
429
+ # --- Code Execution and Deployment ---
430
+ if CURRENT_APP["code"]:
431
+ st.markdown("## Your App Code:")
432
+ code_area = st.text_area("Your App Code", value=CURRENT_APP["code"], key="code_area")
433
+
434
+ st.markdown("## Deploy Your App (Coming Soon!)")
435
+ # Add deployment functionality here using Streamlit's deployment features.
436
+ # For example, you could use Streamlit's `st.button` to trigger deployment.
437
+
438
+ # --- Code Execution ---
439
+ st.markdown("## Run Your App:")
440
+ if st.button("Execute Code"):
441
+ try:
442
+ # Use Hugging Face's text-generation pipeline for code execution
443
+ inputs = tokenizer(code_area, return_tensors="pt")
444
+ output = model.generate(**inputs, max_length=500, num_return_sequences=1)
445
+ output = tokenizer.decode(output[0], skip_special_tokens=True)
446
+ st.success(f"Code executed successfully!\n{output}")
447
+ except Exception as e:
448
+ st.error(f"Error executing code: {e}")
449
+
450
+ # --- Code Editing ---
451
+ st.markdown("## Edit Your Code:")
452
+ if st.button("Edit Code"):
453
+ try:
454
+ # Use Hugging Face's text-generation pipeline for code editing
455
+ prompt = f"Improve the following Python code:\n
456
+
457
+
458
+ python\n{code_area}\n
459
+
460
+ inputs = tokenizer(prompt, return_tensors="pt")
461
+ output = model.generate(**inputs, max_length=500, num_return_sequences=1)
462
+ edited_code = tokenizer.decode(output[0], skip_special_tokens=True).split("
463
+
464
+
465
  python\n")[1].split("\n
466
 
467
  st.success(f"Code edited successfully!\n{edited_code}")