Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,18 +1,27 @@
|
|
|
|
1 |
import json
|
2 |
import time
|
3 |
from typing import Dict, List, Tuple
|
4 |
|
5 |
import gradio as gr
|
6 |
import streamlit as st
|
7 |
-
import streamlit_chat
|
8 |
from huggingface_hub import InferenceClient, hf_hub_url, cached_download
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
import git
|
10 |
-
from
|
11 |
-
from
|
12 |
-
from
|
13 |
-
from
|
14 |
-
from
|
15 |
-
from
|
|
|
|
|
16 |
|
17 |
# --- Constants ---
|
18 |
MODEL_NAME = "google/flan-t5-xl" # Consider using a more powerful model like 'google/flan-t5-xl'
|
@@ -30,16 +39,6 @@ def load_model_and_tokenizer():
|
|
30 |
|
31 |
model, tokenizer = load_model_and_tokenizer()
|
32 |
|
33 |
-
PRETRAINED_MODEL_NAME = "distilbert-base-uncased"
|
34 |
-
model_path = os.path.join(os.getcwd(), PRETRAINED_MODEL_NAME)
|
35 |
-
if not os.path.exists(model_path):
|
36 |
-
raise FileNotFoundError("Pre-trained model weight directory {} doesn't exist".format(model_path))
|
37 |
-
else:
|
38 |
-
print("Found Pre-trained Model at:", model_path)
|
39 |
-
tokenizer = GPT2Tokenizer.from_pretrained(model_path)
|
40 |
-
# Download the DistilBERT tokenizer (~3 MB)
|
41 |
-
DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased').save_pretrained('./cache/distilbert-base-uncased-local')
|
42 |
-
|
43 |
# --- Agents ---
|
44 |
agents = {
|
45 |
"WEB_DEV": {
|
@@ -114,15 +113,8 @@ def display_workspace_projects():
|
|
114 |
|
115 |
def display_chat_history():
|
116 |
st.subheader("Chat History")
|
117 |
-
|
118 |
-
|
119 |
-
if idx % 2 == 0:
|
120 |
-
role = "User:"
|
121 |
-
else:
|
122 |
-
role = "Assistant:"
|
123 |
-
html_string += f"<p>{role}</p>"
|
124 |
-
html_string += f"<p>{message}</p>"
|
125 |
-
st.markdown(html_string, unsafe_allow_html=True)
|
126 |
|
127 |
def run_autonomous_build(selected_agents: List[str], project_name: str):
|
128 |
st.info("Starting autonomous build process...")
|
|
|
1 |
+
import os
|
2 |
import json
|
3 |
import time
|
4 |
from typing import Dict, List, Tuple
|
5 |
|
6 |
import gradio as gr
|
7 |
import streamlit as st
|
|
|
8 |
from huggingface_hub import InferenceClient, hf_hub_url, cached_download
|
9 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
10 |
+
from rich import print as rprint
|
11 |
+
from rich.panel import Panel
|
12 |
+
from rich.progress import track
|
13 |
+
from rich.table import Table
|
14 |
+
import subprocess
|
15 |
+
import threading
|
16 |
import git
|
17 |
+
from langchain.llms import HuggingFaceHub
|
18 |
+
from langchain.chains import ConversationChain
|
19 |
+
from langchain.memory import ConversationBufferMemory
|
20 |
+
from langchain.chains.question_answering import load_qa_chain
|
21 |
+
from langchain.text_splitter import CharacterTextSplitter
|
22 |
+
from langchain_community.document_loaders import TextLoader
|
23 |
+
from streamlit_ace import st_ace
|
24 |
+
from streamlit_chat import st_chat
|
25 |
|
26 |
# --- Constants ---
|
27 |
MODEL_NAME = "google/flan-t5-xl" # Consider using a more powerful model like 'google/flan-t5-xl'
|
|
|
39 |
|
40 |
model, tokenizer = load_model_and_tokenizer()
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# --- Agents ---
|
43 |
agents = {
|
44 |
"WEB_DEV": {
|
|
|
113 |
|
114 |
def display_chat_history():
|
115 |
st.subheader("Chat History")
|
116 |
+
for message in st.session_state.chat_history:
|
117 |
+
st.text(message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
def run_autonomous_build(selected_agents: List[str], project_name: str):
|
120 |
st.info("Starting autonomous build process...")
|