Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -14,7 +14,7 @@ from datetime import datetime
|
|
14 |
import sqlite3
|
15 |
from datetime import datetime
|
16 |
|
17 |
-
|
18 |
# Initialize the database
|
19 |
def init_db(file='logs.db'):
|
20 |
conn = sqlite3.connect(file)
|
@@ -70,7 +70,51 @@ def check_ubuse(prompt,word_list=["little girl"]):
|
|
70 |
return prompt
|
71 |
|
72 |
def enhance_prompt(prompt, model="mistralai/Mistral-7B-Instruct-v0.1", style="photo-realistic"):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
client = Client("K00B404/Mistral-Nemo-custom")
|
75 |
|
76 |
system_prompt=f"""
|
@@ -86,7 +130,7 @@ def enhance_prompt(prompt, model="mistralai/Mistral-7B-Instruct-v0.1", style="ph
|
|
86 |
max_tokens=256,
|
87 |
model_id=model,# "mistralai/Mistral-Nemo-Instruct-2407",
|
88 |
api_name="/predict"
|
89 |
-
)
|
90 |
return result
|
91 |
|
92 |
# The output value that appears in the "Response" Textbox component.
|
@@ -100,26 +144,6 @@ def enhance_prompt(prompt, model="mistralai/Mistral-7B-Instruct-v0.1", style="ph
|
|
100 |
"""
|
101 |
|
102 |
|
103 |
-
def enhance_prompt_v2(prompt, model="mistralai/Mistral-Nemo-Instruct-2407", style="photo-realistic"):
|
104 |
-
|
105 |
-
client = Client("K00B404/Mistral-Nemo-custom")
|
106 |
-
|
107 |
-
system_prompt=f"""
|
108 |
-
You are a image generation prompt enhancer specialized in the {style} style.
|
109 |
-
You must respond only with the enhanced version of the users input prompt
|
110 |
-
Remember, image generation models can be stimulated by refering to camera 'effect' in the prompt like :4k ,award winning, super details, 35mm lens, hd
|
111 |
-
"""
|
112 |
-
user_message=f"###input image generation prompt### {prompt}"
|
113 |
-
|
114 |
-
result = client.predict(
|
115 |
-
system_prompt=system_prompt,
|
116 |
-
user_message=user_message,
|
117 |
-
max_tokens=256,
|
118 |
-
model_id=model,
|
119 |
-
api_name="/predict"
|
120 |
-
)
|
121 |
-
return result
|
122 |
-
|
123 |
|
124 |
def mistral_nemo_call(prompt, API_TOKEN, model="mistralai/Mistral-Nemo-Instruct-2407", style="photo-realistic"):
|
125 |
|
@@ -172,7 +196,7 @@ def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Ka
|
|
172 |
|
173 |
original_prompt = prompt
|
174 |
if enhance_prompt_option:
|
175 |
-
prompt =
|
176 |
print(f'\033[1mGeneration {key} enhanced prompt:\033[0m {prompt}')
|
177 |
if use_mistral_nemo:
|
178 |
prompt = mistral_nemo_call(prompt, API_TOKEN=API_TOKEN, style=nemo_enhance_prompt_style)
|
|
|
14 |
import sqlite3
|
15 |
from datetime import datetime
|
16 |
|
17 |
+
token=os.getenv('HF_TOKEN')
|
18 |
# Initialize the database
|
19 |
def init_db(file='logs.db'):
|
20 |
conn = sqlite3.connect(file)
|
|
|
70 |
return prompt
|
71 |
|
72 |
def enhance_prompt(prompt, model="mistralai/Mistral-7B-Instruct-v0.1", style="photo-realistic"):
|
73 |
+
|
74 |
+
from gradio_client import Client
|
75 |
+
|
76 |
+
client = Client("K00B404/HugChatWrap")
|
77 |
+
result = client.predict(
|
78 |
+
message=prompt,
|
79 |
+
chat_history=[],
|
80 |
+
system_message="""
|
81 |
+
You are a pragmatic coding assistant specializing in Python. Your task is to strictly respond with **Python code only**, ensuring all explanations and comments are embedded within the script using **multi-line comment blocks** (`### or #`).
|
82 |
|
83 |
+
**Response Requirements:**
|
84 |
+
- **No external ### Explanation ### ** All descriptions, justifications, and context must be inside the script.
|
85 |
+
- **Follow OOP principles** where applicable, improving maintainability and extensibility.
|
86 |
+
- **Ensure compliance with PEP8 and autopep8 formatting.**
|
87 |
+
- **Enhance and refactor the provided script**, making it a more efficient, readable, and reusable # IMPROVED PYTHON CODE #.
|
88 |
+
- **At the end of every script, include a '### Future Features ###' comment block** outlining possible enhancements.
|
89 |
+
|
90 |
+
**Example Response Format:**
|
91 |
+
```python
|
92 |
+
# filename.py
|
93 |
+
# Module: Improved Script v1.0
|
94 |
+
# Description: [Brief explanation of script functionality]
|
95 |
+
|
96 |
+
# IMPROVED PYTHON CODE #
|
97 |
+
|
98 |
+
### Explanation ###
|
99 |
+
#- inside comment block.
|
100 |
+
|
101 |
+
### Future Features ###
|
102 |
+
#- Suggested improvement 1
|
103 |
+
#- Suggested improvement 2
|
104 |
+
```
|
105 |
+
|
106 |
+
Now, improve and enhance the following script:""",
|
107 |
+
max_tokens=2048,
|
108 |
+
temperature=0.7,
|
109 |
+
top_p=0.9,
|
110 |
+
model_name= "mistralai/Mistral-Nemo-Instruct-2407",#"Qwen/Qwen2.5-Coder-3B-Instruct",
|
111 |
+
api_name="/respond"
|
112 |
+
)
|
113 |
+
print(result)
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
'''
|
118 |
client = Client("K00B404/Mistral-Nemo-custom")
|
119 |
|
120 |
system_prompt=f"""
|
|
|
130 |
max_tokens=256,
|
131 |
model_id=model,# "mistralai/Mistral-Nemo-Instruct-2407",
|
132 |
api_name="/predict"
|
133 |
+
)'''
|
134 |
return result
|
135 |
|
136 |
# The output value that appears in the "Response" Textbox component.
|
|
|
144 |
"""
|
145 |
|
146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
|
148 |
def mistral_nemo_call(prompt, API_TOKEN, model="mistralai/Mistral-Nemo-Instruct-2407", style="photo-realistic"):
|
149 |
|
|
|
196 |
|
197 |
original_prompt = prompt
|
198 |
if enhance_prompt_option:
|
199 |
+
prompt = enhance_prompt(prompt, API_TOKEN=API_TOKEN, style=enhance_prompt_style)
|
200 |
print(f'\033[1mGeneration {key} enhanced prompt:\033[0m {prompt}')
|
201 |
if use_mistral_nemo:
|
202 |
prompt = mistral_nemo_call(prompt, API_TOKEN=API_TOKEN, style=nemo_enhance_prompt_style)
|