Reality123b commited on
Commit
6b366b4
·
verified ·
1 Parent(s): d6feb80

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -177
app.py CHANGED
@@ -1,78 +1,14 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- from deep_translator import GoogleTranslator
4
- from indic_transliteration import sanscript
5
- from indic_transliteration.detect import detect as detect_script
6
- from indic_transliteration.sanscript import transliterate
7
- import langdetect
8
- import re
9
 
10
  # Initialize clients
11
  text_client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
12
  image_client = InferenceClient("SG161222/RealVisXL_V3.0")
13
 
14
- def detect_language_script(text: str) -> tuple[str, str]:
15
- """Detect language and script of the input text.
16
- Returns (language_code, script_type)"""
17
- try:
18
- # Use confidence threshold to avoid false detections
19
- lang_detect = langdetect.detect_langs(text)
20
- if lang_detect[0].prob > 0.8:
21
- lang = lang_detect[0].lang
22
- else:
23
- lang = 'en' # Default to English if unsure
24
-
25
- script = None
26
- try:
27
- script = detect_script(text)
28
- except:
29
- pass
30
- return lang, script
31
- except:
32
- return 'en', None
33
-
34
- def is_romanized_indic(text: str) -> bool:
35
- """Check if text appears to be romanized Indic language.
36
- More strict pattern matching."""
37
- bengali_patterns = [
38
- r'\b(ami|tumi|apni)\b', # Common pronouns
39
- r'\b(ache|achen|thako|thaken)\b', # Common verbs
40
- r'\b(kemon|bhalo|kharap)\b', # Common adjectives
41
- r'\b(ki|kothay|keno)\b' # Common question words
42
- ]
43
-
44
- text_lower = text.lower()
45
- matches = sum(1 for pattern in bengali_patterns if re.search(pattern, text_lower))
46
- return matches >= 2 # Require at least 2 matches to consider it Bengali
47
-
48
- def translate_text(text: str, target_lang='en') -> tuple[str, str, bool]:
49
- """Translate text to target language, with more conservative translation logic."""
50
- if len(text.split()) <= 2 or text.lower() in ['hello', 'hi', 'hey']:
51
- return text, 'en', False
52
-
53
- original_lang, script = detect_language_script(text)
54
- is_transliterated = False
55
-
56
- if original_lang != 'en' and len(text.split()) > 2:
57
- try:
58
- translator = GoogleTranslator(source='auto', target=target_lang)
59
- translated = translator.translate(text)
60
- return translated, original_lang, is_transliterated
61
- except Exception as e:
62
- print(f"Translation error: {e}")
63
- return text, 'en', False
64
-
65
- if original_lang == 'en' and len(text.split()) > 2 and is_romanized_indic(text):
66
- text = romanized_to_bengali(text)
67
- return translate_text(text, target_lang) # Recursive call with Bengali script
68
-
69
- return text, 'en', False
70
-
71
  def check_custom_responses(message: str) -> str:
72
  """Check for specific patterns and return custom responses."""
73
  message_lower = message.lower()
74
  custom_responses = {
75
- # For "what is ur name?"
76
  "what is ur name?": "xylaria",
77
  "what is ur Name?": "xylaria",
78
  "what is Ur name?": "xylaria",
@@ -227,44 +163,16 @@ def generate_image(prompt: str) -> str:
227
  "negative_prompt": "(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), open mouth",
228
  "num_inference_steps": 30,
229
  "guidance_scale": 7.5,
230
- "sampling_steps": 15, # Adjusted parameter
231
  "upscaler": "4x-UltraSharp",
232
- "denoising_strength": 0.5, # Denoising strength between 0.1 and 0.5
233
  }
234
  )
235
- return response # Assuming response contains the image as required
236
  except Exception as e:
237
  print(f"Image generation error: {e}")
238
  return None
239
 
240
- def romanized_to_bengali(text: str) -> str:
241
- """Convert romanized Bengali text to Bengali script."""
242
- bengali_mappings = {
243
- 'ami': 'আমি',
244
- 'tumi': 'তুমি',
245
- 'apni': 'আপনি',
246
- 'kemon': 'কেমন',
247
- 'achen': 'আছেন',
248
- 'acchen': 'আছেন',
249
- 'bhalo': 'ভালো',
250
- 'achi': 'আছি',
251
- 'ki': 'কি',
252
- 'kothay': 'কোথায়',
253
- 'keno': 'কেন',
254
- }
255
-
256
- text_lower = text.lower()
257
- for roman, bengali in bengali_mappings.items():
258
- text_lower = re.sub(r'\b' + roman + r'\b', bengali, text_lower)
259
-
260
- if text_lower == text.lower():
261
- try:
262
- return transliterate(text, sanscript.ITRANS, sanscript.BENGALI)
263
- except:
264
- return text
265
-
266
- return text_lower
267
-
268
  def respond(
269
  message,
270
  history: list[tuple[str, str]],
@@ -289,62 +197,42 @@ def respond(
289
  except Exception as e:
290
  return f"An error occurred while generating the image: {str(e)}"
291
 
292
- # Handle translation with more conservative approach
293
- translated_msg, original_lang, was_transliterated = translate_text(message)
294
-
295
- # Prepare conversation history - only translate if necessary
296
  messages = [{"role": "system", "content": system_message}]
297
  for val in history:
298
  if val[0]:
299
- # Only translate longer messages
300
- if len(val[0].split()) > 2:
301
- trans_user_msg, _, _ = translate_text(val[0])
302
- messages.append({"role": "user", "content": trans_user_msg})
303
- else:
304
- messages.append({"role": "user", "content": val[0]})
305
  if val[1]:
306
  messages.append({"role": "assistant", "content": val[1]})
307
 
308
- messages.append({"role": "user", "content": translated_msg})
309
 
310
  # Get response from model
311
  response = ""
312
  for message in text_client.chat_completion(
313
  messages,
314
  max_tokens=max_tokens,
315
- stream=True, # Ensure streaming is enabled
316
  temperature=temperature,
317
  top_p=top_p,
318
  ):
319
  token = message.choices[0].delta.content
320
  response += token
321
- yield response # Yield progressively for animation
322
-
323
- # Only translate back if the original was definitely non-English
324
- if original_lang != 'en' and len(message.split()) > 2:
325
- try:
326
- translator = GoogleTranslator(source='en', target=original_lang)
327
- translated_response = translator.translate(response)
328
- yield translated_response
329
- except:
330
- yield response
331
- else:
332
  yield response
333
 
334
- # Custom CSS for the Gradio interface
335
- custom_css = """
336
- @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap');
337
-
338
- /* Apply Inter font to the entire interface */
339
- body, .gradio-container {
340
- font-family: 'Inter', sans-serif;
341
- }
342
-
343
- /* Additional styling (add any other required custom CSS here) */
344
- """
345
-
346
- # System message (hidden from users, editable in code)
347
- system_message = """
348
  You are Xylaria, a friendly and capable AI assistant. Your goal is to be helpful and engaging, whether the user wants to discuss math, code, or any other topic.
349
 
350
  CORE CAPABILITIES:
@@ -359,52 +247,40 @@ COMMUNICATION STYLE:
359
  - Sprinkle in emojis and casual expressions to keep things fun 😎
360
  - Provide the right level of detail, from high-level overviews to deep dives
361
 
362
- PROBLEM-SOLVING APPROACH:
363
- - Carefully understand the user's request or problem
364
- - Identify the key information and most effective solution method
365
- - Show step-by-step work and explain your reasoning clearly
366
- - Verify the final answer is correct and provide any additional context
367
-
368
- VERSATILITY IN ACTION:
369
- - For math problems: "Ooh, a juicy math challenge! Let's do this 🧮"
370
- - For general questions: "Sure, happy to chat about that! What would you like to know?"
371
- - For casual conversation: "Hey there! What's on your mind today? I'm all ears 👂"
372
-
373
  I'm here to help with all kinds of tasks, from complex problem-solving to friendly discussion. Just let me know what you need, and I'll do my best to assist! 🙌
374
- """
375
-
376
- # Gradio chat interface with the updated CSS
377
- demo = gr.ChatInterface(
378
- respond,
379
- additional_inputs=[
380
- gr.Textbox(
381
- value=system_message,
382
- visible=False,
383
-
384
- ),
385
- gr.Slider(
386
- minimum=1,
387
- maximum=2048,
388
- value=2048,
389
- step=1,
390
- label="Max new tokens"
391
- ),
392
- gr.Slider(
393
- minimum=0.1,
394
- maximum=4.0,
395
- value=0.7,
396
- step=0.1,
397
- label="Temperature"
398
- ),
399
- gr.Slider(
400
- minimum=0.1,
401
- maximum=1.0,
402
- value=0.95,
403
- step=0.05,
404
- label="Top-p (nucleus sampling)"
405
- ),
406
- ],
407
- css=custom_css # Apply the custom CSS
408
  )
409
 
410
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
3
 
4
  # Initialize clients
5
  text_client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
  image_client = InferenceClient("SG161222/RealVisXL_V3.0")
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  def check_custom_responses(message: str) -> str:
9
  """Check for specific patterns and return custom responses."""
10
  message_lower = message.lower()
11
  custom_responses = {
 
12
  "what is ur name?": "xylaria",
13
  "what is ur Name?": "xylaria",
14
  "what is Ur name?": "xylaria",
 
163
  "negative_prompt": "(worst quality, low quality, illustration, 3d, 2d, painting, cartoons, sketch), open mouth",
164
  "num_inference_steps": 30,
165
  "guidance_scale": 7.5,
166
+ "sampling_steps": 15,
167
  "upscaler": "4x-UltraSharp",
168
+ "denoising_strength": 0.5,
169
  }
170
  )
171
+ return response
172
  except Exception as e:
173
  print(f"Image generation error: {e}")
174
  return None
175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  def respond(
177
  message,
178
  history: list[tuple[str, str]],
 
197
  except Exception as e:
198
  return f"An error occurred while generating the image: {str(e)}"
199
 
200
+ # Prepare conversation history
 
 
 
201
  messages = [{"role": "system", "content": system_message}]
202
  for val in history:
203
  if val[0]:
204
+ messages.append({"role": "user", "content": val[0]})
 
 
 
 
 
205
  if val[1]:
206
  messages.append({"role": "assistant", "content": val[1]})
207
 
208
+ messages.append({"role": "user", "content": message})
209
 
210
  # Get response from model
211
  response = ""
212
  for message in text_client.chat_completion(
213
  messages,
214
  max_tokens=max_tokens,
215
+ stream=True,
216
  temperature=temperature,
217
  top_p=top_p,
218
  ):
219
  token = message.choices[0].delta.content
220
  response += token
 
 
 
 
 
 
 
 
 
 
 
221
  yield response
222
 
223
+ yield response
224
+
225
+ # Custom CSS for the Gradio interface
226
+ custom_css = """
227
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap');
228
+
229
+ body, .gradio-container {
230
+ font-family: 'Inter', sans-serif;
231
+ }
232
+ """
233
+
234
+ # System message
235
+ system_message = """
 
236
  You are Xylaria, a friendly and capable AI assistant. Your goal is to be helpful and engaging, whether the user wants to discuss math, code, or any other topic.
237
 
238
  CORE CAPABILITIES:
 
247
  - Sprinkle in emojis and casual expressions to keep things fun 😎
248
  - Provide the right level of detail, from high-level overviews to deep dives
249
 
 
 
 
 
 
 
 
 
 
 
 
250
  I'm here to help with all kinds of tasks, from complex problem-solving to friendly discussion. Just let me know what you need, and I'll do my best to assist! 🙌
251
+ """
252
+
253
+ # Gradio chat interface
254
+ demo = gr.ChatInterface(
255
+ respond,
256
+ additional_inputs=[
257
+ gr.Textbox(
258
+ value=system_message,
259
+ visible=False,
260
+ ),
261
+ gr.Slider(
262
+ minimum=1,
263
+ maximum=2048,
264
+ value=2048,
265
+ step=1,
266
+ label="Max new tokens"
267
+ ),
268
+ gr.Slider(
269
+ minimum=0.1,
270
+ maximum=4.0,
271
+ value=0.7,
272
+ step=0.1,
273
+ label="Temperature"
274
+ ),
275
+ gr.Slider(
276
+ minimum=0.1,
277
+ maximum=1.0,
278
+ value=0.95,
279
+ step=0.05,
280
+ label="Top-p (nucleus sampling)"
281
+ ),
282
+ ],
283
+ css=custom_css
 
284
  )
285
 
286
  demo.launch()