acecalisto3 commited on
Commit
9c5cb7f
·
verified ·
1 Parent(s): c3c0071

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -262
app.py CHANGED
@@ -1,21 +1,12 @@
1
  import streamlit as st
2
  import os
3
  import subprocess
 
 
 
4
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
5
  import black
6
- from pylint import lint
7
- from io import StringIO
8
- import sys
9
-
10
- PROJECT_ROOT = "projects"
11
-
12
- # Global state to manage communication between Tool Box and Workspace Chat App
13
- if 'chat_history' not in st.session_state:
14
- st.session_state.chat_history = []
15
- if 'terminal_history' not in st.session_state:
16
- st.session_state.terminal_history = []
17
- if 'workspace_projects' not in st.session_state:
18
- st.session_state.workspace_projects = {}
19
 
20
  # Define functions for each feature
21
 
@@ -29,36 +20,22 @@ def chat_interface(input_text):
29
  Returns:
30
  The chatbot's response.
31
  """
32
- # Load the GPT-2 model which is compatible with AutoModelForCausalLM
33
- model_name = "gpt2"
34
- try:
35
- model = AutoModelForCausalLM.from_pretrained(model_name)
36
- tokenizer = AutoTokenizer.from_pretrained(model_name)
37
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
38
- except EnvironmentError as e:
39
- return f"Error loading model: {e}"
40
-
41
- # Truncate input text to avoid exceeding the model's maximum length
42
- max_input_length = 900
43
- input_ids = tokenizer.encode(input_text, return_tensors="pt")
44
- if input_ids.shape[1] > max_input_length:
45
- input_ids = input_ids[:, :max_input_length]
46
 
47
  # Generate chatbot response
48
- outputs = model.generate(
49
- input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True
50
- )
51
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
52
  return response
53
 
54
-
55
  # 2. Terminal
56
- def terminal_interface(command, project_name=None):
57
  """Executes commands in the terminal.
58
 
59
  Args:
60
  command: User's command.
61
- project_name: Name of the project workspace to add installed packages.
62
 
63
  Returns:
64
  The terminal output.
@@ -67,18 +44,10 @@ def terminal_interface(command, project_name=None):
67
  try:
68
  process = subprocess.run(command.split(), capture_output=True, text=True)
69
  output = process.stdout
70
-
71
- # If the command is to install a package, update the workspace
72
- if "install" in command and project_name:
73
- requirements_path = os.path.join(PROJECT_ROOT, project_name, "requirements.txt")
74
- with open(requirements_path, "a") as req_file:
75
- package_name = command.split()[-1]
76
- req_file.write(f"{package_name}\n")
77
  except Exception as e:
78
- output = f"Error: {e}"
79
  return output
80
 
81
-
82
  # 3. Code Editor
83
  def code_editor_interface(code):
84
  """Provides code completion, formatting, and linting in the code editor.
@@ -97,19 +66,14 @@ def code_editor_interface(code):
97
 
98
  # Lint code using pylint
99
  try:
100
- pylint_output = StringIO()
101
- sys.stdout = pylint_output
102
- sys.stderr = pylint_output
103
- lint.Run(['--from-stdin'], stdin=StringIO(formatted_code))
104
- sys.stdout = sys.__stdout__
105
- sys.stderr = sys.__stderr__
106
- lint_message = pylint_output.getvalue()
107
  except Exception as e:
108
  lint_message = f"Pylint error: {e}"
109
 
110
  return formatted_code, lint_message
111
 
112
-
113
  # 4. Workspace
114
  def workspace_interface(project_name):
115
  """Manages projects, files, and resources in the workspace.
@@ -120,43 +84,14 @@ def workspace_interface(project_name):
120
  Returns:
121
  Project creation status.
122
  """
123
- project_path = os.path.join(PROJECT_ROOT, project_name)
124
  # Create project directory
125
  try:
126
- os.makedirs(project_path)
127
- requirements_path = os.path.join(project_path, "requirements.txt")
128
- with open(requirements_path, "w") as req_file:
129
- req_file.write("") # Initialize an empty requirements.txt file
130
- status = f'Project "{project_name}" created successfully.'
131
- st.session_state.workspace_projects[project_name] = {'files': []}
132
  except FileExistsError:
133
- status = f'Project "{project_name}" already exists.'
134
- return status
135
-
136
- def add_code_to_workspace(project_name, code, file_name):
137
- """Adds selected code files to the workspace.
138
-
139
- Args:
140
- project_name: Name of the project.
141
- code: Code to be added.
142
- file_name: Name of the file to be created.
143
-
144
- Returns:
145
- File creation status.
146
- """
147
- project_path = os.path.join(PROJECT_ROOT, project_name)
148
- file_path = os.path.join(project_path, file_name)
149
-
150
- try:
151
- with open(file_path, "w") as code_file:
152
- code_file.write(code)
153
- status = f'File "{file_name}" added to project "{project_name}" successfully.'
154
- st.session_state.workspace_projects[project_name]['files'].append(file_name)
155
- except Exception as e:
156
- status = f"Error: {e}"
157
  return status
158
 
159
-
160
  # 5. AI-Infused Tools
161
 
162
  # Define custom AI-powered tools using Hugging Face models
@@ -171,72 +106,13 @@ def summarize_text(text):
171
  Returns:
172
  Summarized text.
173
  """
174
- # Load the summarization model
175
- model_name = "facebook/bart-large-cnn"
176
- try:
177
- summarizer = pipeline("summarization", model=model_name)
178
- except EnvironmentError as e:
179
- return f"Error loading model: {e}"
180
-
181
- # Truncate input text to avoid exceeding the model's maximum length
182
- max_input_length = 1024
183
- inputs = text
184
- if len(text) > max_input_length:
185
- inputs = text[:max_input_length]
186
-
187
- # Generate summary
188
- summary = summarizer(inputs, max_length=100, min_length=30, do_sample=False)[0][
189
- "summary_text"
190
- ]
191
  return summary
192
 
193
- # Example: Sentiment analysis tool
194
- def sentiment_analysis(text):
195
- """Performs sentiment analysis on a given text using a Hugging Face model.
196
-
197
- Args:
198
- text: Text to be analyzed.
199
-
200
- Returns:
201
- Sentiment analysis result.
202
- """
203
- # Load the sentiment analysis model
204
- model_name = "distilbert-base-uncased-finetuned-sst-2-english"
205
- try:
206
- analyzer = pipeline("sentiment-analysis", model=model_name)
207
- except EnvironmentError as e:
208
- return f"Error loading model: {e}"
209
-
210
- # Perform sentiment analysis
211
- result = analyzer(text)[0]
212
- return result
213
-
214
- # Example: Text translation tool
215
- def translate_text(text, target_language="fr"):
216
- """Translates a given text to the target language using a Hugging Face model.
217
-
218
- Args:
219
- text: Text to be translated.
220
- target_language: The language to translate the text to.
221
-
222
- Returns:
223
- Translated text.
224
- """
225
- # Load the translation model
226
- model_name = f"Helsinki-NLP/opus-mt-en-{target_language}"
227
- try:
228
- translator = pipeline("translation", model=model_name)
229
- except EnvironmentError as e:
230
- return f"Error loading model: {e}"
231
-
232
- # Translate text
233
- translated_text = translator(text)[0]["translation_text"]
234
- return translated_text
235
-
236
-
237
  # 6. Code Generation
238
  def generate_code(idea):
239
- """Generates code based on a given idea using the EleutherAI/gpt-neo-2.7B model.
240
 
241
  Args:
242
  idea: The idea for the code to be generated.
@@ -246,12 +122,9 @@ def generate_code(idea):
246
  """
247
 
248
  # Load the code generation model
249
- model_name = "EleutherAI/gpt-neo-2.7B"
250
- try:
251
- model = AutoModelForCausalLM.from_pretrained(model_name)
252
- tokenizer = AutoTokenizer.from_pretrained(model_name)
253
- except EnvironmentError as e:
254
- return f"Error loading model: {e}"
255
 
256
  # Generate the code
257
  input_text = f"""
@@ -275,120 +148,63 @@ def generate_code(idea):
275
 
276
  return generated_code
277
 
278
-
279
  # Streamlit App
280
  st.title("CodeCraft: Your AI-Powered Development Toolkit")
281
 
282
- # Sidebar navigation
283
- st.sidebar.title("Navigation")
284
- app_mode = st.sidebar.selectbox("Choose the app mode", ["Tool Box", "Workspace Chat App"])
285
-
286
- if app_mode == "Tool Box":
287
- # Tool Box
288
- st.header("AI-Powered Tools")
289
-
290
- # Chat Interface
291
- st.subheader("Chat with CodeCraft")
292
- chat_input = st.text_area("Enter your message:")
293
- if st.button("Send"):
294
- chat_response = chat_interface(chat_input)
295
- st.session_state.chat_history.append((chat_input, chat_response))
296
- st.write(f"CodeCraft: {chat_response}")
297
-
298
- # Terminal Interface
299
- st.subheader("Terminal")
300
- terminal_input = st.text_input("Enter a command:")
301
- if st.button("Run"):
302
- terminal_output = terminal_interface(terminal_input)
303
- st.session_state.terminal_history.append((terminal_input, terminal_output))
304
- st.code(terminal_output, language="bash")
305
-
306
- # Code Editor Interface
307
- st.subheader("Code Editor")
308
- code_editor = st.text_area("Write your code:", height=300)
309
- if st.button("Format & Lint"):
310
- formatted_code, lint_message = code_editor_interface(code_editor)
311
- st.code(formatted_code, language="python")
312
- st.info(lint_message)
313
-
314
- # Text Summarization Tool
315
- st.subheader("Summarize Text")
316
- text_to_summarize = st.text_area("Enter text to summarize:")
317
- if st.button("Summarize"):
318
- summary = summarize_text(text_to_summarize)
319
- st.write(f"Summary: {summary}")
320
-
321
- # Sentiment Analysis Tool
322
- st.subheader("Sentiment Analysis")
323
- sentiment_text = st.text_area("Enter text for sentiment analysis:")
324
- if st.button("Analyze Sentiment"):
325
- sentiment = sentiment_analysis(sentiment_text)
326
- st.write(f"Sentiment: {sentiment}")
327
-
328
- # Text Translation Tool
329
- st.subheader("Translate Text")
330
- translation_text = st.text_area("Enter text to translate:")
331
- target_language = st.text_input("Enter target language code (e.g., 'fr' for French):")
332
- if st.button("Translate"):
333
- translated_text = translate_text(translation_text, target_language)
334
- st.write(f"Translated Text: {translated_text}")
335
-
336
- # Code Generation
337
- st.subheader("Code Generation")
338
- code_idea = st.text_input("Enter your code idea:")
339
- if st.button("Generate Code"):
340
  generated_code = generate_code(code_idea)
341
  st.code(generated_code, language="python")
 
 
342
 
343
- elif app_mode == "Workspace Chat App":
344
- # Workspace Chat App
345
- st.header("Workspace Chat App")
346
-
347
- # Project Workspace Creation
348
- st.subheader("Create a New Project")
349
- project_name = st.text_input("Enter project name:")
350
- if st.button("Create Project"):
351
- workspace_status = workspace_interface(project_name)
352
- st.success(workspace_status)
353
-
354
- # Add Code to Workspace
355
- st.subheader("Add Code to Workspace")
356
- code_to_add = st.text_area("Enter code to add to workspace:")
357
- file_name = st.text_input("Enter file name (e.g., 'app.py'):")
358
- if st.button("Add Code"):
359
- add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
360
- st.success(add_code_status)
361
-
362
- # Terminal Interface with Project Context
363
- st.subheader("Terminal (Workspace Context)")
364
- terminal_input = st.text_input("Enter a command within the workspace:")
365
- if st.button("Run Command"):
366
- terminal_output = terminal_interface(terminal_input, project_name)
367
- st.code(terminal_output, language="bash")
368
-
369
- # Chat Interface for Guidance
370
- st.subheader("Chat with CodeCraft for Guidance")
371
- chat_input = st.text_area("Enter your message for guidance:")
372
- if st.button("Get Guidance"):
373
- chat_response = chat_interface(chat_input)
374
- st.session_state.chat_history.append((chat_input, chat_response))
375
- st.write(f"CodeCraft: {chat_response}")
376
-
377
- # Display Chat History
378
- st.subheader("Chat History")
379
- for user_input, response in st.session_state.chat_history:
380
- st.write(f"User: {user_input}")
381
- st.write(f"CodeCraft: {response}")
382
-
383
- # Display Terminal History
384
- st.subheader("Terminal History")
385
- for command, output in st.session_state.terminal_history:
386
- st.write(f"Command: {command}")
387
- st.code(output, language="bash")
388
-
389
- # Display Projects and Files
390
- st.subheader("Workspace Projects")
391
- for project, details in st.session_state.workspace_projects.items():
392
- st.write(f"Project: {project}")
393
- for file in details['files']:
394
- st.write(f" - {file}")
 
1
  import streamlit as st
2
  import os
3
  import subprocess
4
+ import random
5
+ import string
6
+ from huggingface_hub import cached_download, hf_hub_url
7
  from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
8
  import black
9
+ import pylint
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  # Define functions for each feature
12
 
 
20
  Returns:
21
  The chatbot's response.
22
  """
23
+ # Load the appropriate language model from Hugging Face
24
+ model_name = 'google/flan-t5-xl' # Choose a suitable model
25
+ model_url = hf_hub_url(repo_id=model_name, revision='main', filename='config.json')
26
+ model_path = cached_download(model_url)
27
+ generator = pipeline('text-generation', model=model_path)
 
 
 
 
 
 
 
 
 
28
 
29
  # Generate chatbot response
30
+ response = generator(input_text, max_length=50, num_return_sequences=1, do_sample=True)[0]['generated_text']
 
 
 
31
  return response
32
 
 
33
  # 2. Terminal
34
+ def terminal_interface(command):
35
  """Executes commands in the terminal.
36
 
37
  Args:
38
  command: User's command.
 
39
 
40
  Returns:
41
  The terminal output.
 
44
  try:
45
  process = subprocess.run(command.split(), capture_output=True, text=True)
46
  output = process.stdout
 
 
 
 
 
 
 
47
  except Exception as e:
48
+ output = f'Error: {e}'
49
  return output
50
 
 
51
  # 3. Code Editor
52
  def code_editor_interface(code):
53
  """Provides code completion, formatting, and linting in the code editor.
 
66
 
67
  # Lint code using pylint
68
  try:
69
+ pylint_output = pylint.run(formatted_code, output=None)
70
+ lint_results = pylint_output.linter.stats.get('global_note', 0)
71
+ lint_message = f"Pylint score: {lint_results:.2f}"
 
 
 
 
72
  except Exception as e:
73
  lint_message = f"Pylint error: {e}"
74
 
75
  return formatted_code, lint_message
76
 
 
77
  # 4. Workspace
78
  def workspace_interface(project_name):
79
  """Manages projects, files, and resources in the workspace.
 
84
  Returns:
85
  Project creation status.
86
  """
 
87
  # Create project directory
88
  try:
89
+ os.makedirs(os.path.join('projects', project_name))
90
+ status = f'Project \"{project_name}\" created successfully.'
 
 
 
 
91
  except FileExistsError:
92
+ status = f'Project \"{project_name}\" already exists.'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  return status
94
 
 
95
  # 5. AI-Infused Tools
96
 
97
  # Define custom AI-powered tools using Hugging Face models
 
106
  Returns:
107
  Summarized text.
108
  """
109
+ summarizer = pipeline('summarization', model='facebook/bart-large-cnn')
110
+ summary = summarizer(text, max_length=100, min_length=30)[0]['summary_text']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  return summary
112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  # 6. Code Generation
114
  def generate_code(idea):
115
+ """Generates code based on a given idea using the bigscience/T0_3B model.
116
 
117
  Args:
118
  idea: The idea for the code to be generated.
 
122
  """
123
 
124
  # Load the code generation model
125
+ model_name = 'bigscience/T0_3B' # Choose your model
126
+ model = AutoModelForCausalLM.from_pretrained(model_name)
127
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
 
128
 
129
  # Generate the code
130
  input_text = f"""
 
148
 
149
  return generated_code
150
 
 
151
  # Streamlit App
152
  st.title("CodeCraft: Your AI-Powered Development Toolkit")
153
 
154
+ # Workspace Selection
155
+ st.header("Select Workspace")
156
+ project_name = st.selectbox("Choose a project", os.listdir('projects'))
157
+
158
+ # Chat Interface
159
+ st.header("Chat with CodeCraft")
160
+ chat_input = st.text_area("Enter your message:")
161
+ if st.button("Send"):
162
+ chat_response = chat_interface(chat_input)
163
+ st.write(f"CodeCraft: {chat_response}")
164
+
165
+ # Terminal Interface
166
+ st.header("Terminal")
167
+ terminal_input = st.text_input("Enter a command:")
168
+ if st.button("Run"):
169
+ terminal_output = terminal_interface(terminal_input)
170
+ st.code(terminal_output, language="bash")
171
+
172
+ # Code Editor Interface
173
+ st.header("Code Editor")
174
+ code_editor = st.text_area("Write your code:", language="python", height=300)
175
+ if st.button("Format & Lint"):
176
+ formatted_code, lint_message = code_editor_interface(code_editor)
177
+ st.code(formatted_code, language="python")
178
+ st.info(lint_message)
179
+
180
+ # AI-Infused Tools
181
+ st.header("AI-Powered Tools")
182
+ text_to_summarize = st.text_area("Enter text to summarize:")
183
+ if st.button("Summarize"):
184
+ summary = summarize_text(text_to_summarize)
185
+ st.write(f"Summary: {summary}")
186
+
187
+ # Code Generation
188
+ st.header("Code Generation")
189
+ code_idea = st.text_input("Enter your code idea:")
190
+ if st.button("Generate Code"):
191
+ try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  generated_code = generate_code(code_idea)
193
  st.code(generated_code, language="python")
194
+ except Exception as e:
195
+ st.error(f"Error generating code: {e}")
196
 
197
+ # Launch Chat App
198
+ if st.button("Launch Chat App"):
199
+ # Get the current working directory
200
+ cwd = os.getcwd()
201
+
202
+ # Construct the command to launch the chat app
203
+ command = f"cd projects/{project_name} && streamlit run chat_app.py"
204
+
205
+ # Execute the command
206
+ try:
207
+ process = subprocess.run(command.split(), capture_output=True, text=True)
208
+ st.write(f"Chat app launched successfully!")
209
+ except Exception as e:
210
+ st.error(f"Error launching chat app: {e}")