Andre commited on
Commit
f8b6265
·
1 Parent(s): 5005b98
Files changed (4) hide show
  1. app2.py +1 -8
  2. src/__init__.py +0 -0
  3. src/gradio_interface.py +2 -2
  4. src/img_gen_logic +1 -1
app2.py CHANGED
@@ -1,12 +1,5 @@
1
  # main.py
2
- import sys
3
- import os
4
-
5
- # Add the /src directory to the Python path
6
- sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
7
-
8
- # Import necessary modules
9
- from src.gradio_interface import demo
10
 
11
  if __name__ == "__main__":
12
  # Launch the Gradio app
 
1
  # main.py
2
+ from src.gradio_interface import demo # Direct import
 
 
 
 
 
 
 
3
 
4
  if __name__ == "__main__":
5
  # Launch the Gradio app
src/__init__.py DELETED
File without changes
src/gradio_interface.py CHANGED
@@ -1,7 +1,7 @@
1
  # src/gradio_interface.py
2
  import gradio as gr
3
- from src.img_gen_logic import generate_image
4
- from src.config import prompts, models
5
 
6
  def generate(prompt_alias, team, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1):
7
  try:
 
1
  # src/gradio_interface.py
2
  import gradio as gr
3
+ from img_gen_logic import generate_image # Direct import
4
+ from config import prompts, models # Direct import
5
 
6
  def generate(prompt_alias, team, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1):
7
  try:
src/img_gen_logic CHANGED
@@ -3,7 +3,7 @@ import random
3
  from huggingface_hub import InferenceClient
4
  from PIL import Image
5
  from datetime import datetime
6
- from src.config import api_token, models, prompts
7
 
8
  def generate_image(prompt_alias, team, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1):
9
  # Debugging: Check if the token is available
 
3
  from huggingface_hub import InferenceClient
4
  from PIL import Image
5
  from datetime import datetime
6
+ from config import api_token, models, prompts # Direct import
7
 
8
  def generate_image(prompt_alias, team, model_alias, custom_prompt, height=360, width=640, num_inference_steps=20, guidance_scale=2.0, seed=-1):
9
  # Debugging: Check if the token is available