wuhp commited on
Commit
98bb51a
·
verified ·
1 Parent(s): de4b11a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -17
app.py CHANGED
@@ -436,8 +436,7 @@ with gr.Blocks() as demo:
436
  inputs=None,
437
  outputs=gr.Textbox(label="Fine-tuning Status", interactive=False),
438
  title="⚙️ Fine-tuning (Optional)",
439
- description="This section allows you to fine-tune the custom R1 model on a small subset of the ServiceNow dataset. This step is optional but can potentially improve the model's performance on ServiceNow-related tasks. **Note:** This process may take up to 5 minutes.",
440
- # submit_button_text="🚀 Start Fine-tuning (QLoRA)" # REMOVE THIS LINE
441
  ),
442
  gr.Interface(
443
  fn=predict,
@@ -450,8 +449,7 @@ with gr.Blocks() as demo:
450
  ],
451
  outputs=gr.Textbox(label="Custom R1 Output", lines=8, interactive=False),
452
  title="✍️ Direct Generation",
453
- description="Enter a prompt to generate text directly using the custom R1 model. This is standard text generation without retrieval augmentation.",
454
- # submit_button_text="✨ Generate Text" # REMOVE THIS LINE
455
  ),
456
  gr.Interface(
457
  fn=compare_models,
@@ -467,29 +465,19 @@ with gr.Blocks() as demo:
467
  gr.Textbox(label="Official R1 Output", lines=6, interactive=False)
468
  ],
469
  title="🆚 Model Comparison",
470
- description="Enter a prompt to compare the text generation of your fine-tuned custom R1 model with the official DeepSeek-R1-Distill-Llama-8B model.",
471
- # submit_button_text="⚖️ Compare Models" # REMOVE THIS LINE
472
  ),
473
  gr.ChatInterface(
474
  fn=chat_rag,
475
  chatbot=gr.Chatbot(label="RAG Chatbot"),
476
  textbox=gr.Textbox(placeholder="Ask a question to the RAG Chatbot...", lines=2, show_label=False),
477
- inputs=[
478
- "textbox",
479
- gr.State([]), # chat_state
480
- gr.Slider(0.0, 1.5, step=0.1, value=0.7, label="Temperature"),
481
- gr.Slider(0.0, 1.0, step=0.05, value=0.9, label="Top-p"),
482
- gr.Slider(1, 2500, value=50, step=10, label="Min New Tokens"),
483
- gr.Slider(1, 2500, value=200, step=50, label="Max New Tokens")
484
- ],
485
  title="💬 RAG Chat",
486
  description="Chat with the custom R1 model, enhanced with retrieval-augmented memory. The model retrieves relevant info for informed responses.",
487
  submit_button="➡️ Send",
488
- clear_btn=None # Optional: You can add a clear button if needed
489
  )
490
  ]
491
- ).render():
492
- pass # No need for extra elements outside the tabs now
493
 
494
 
495
  demo.launch()
 
436
  inputs=None,
437
  outputs=gr.Textbox(label="Fine-tuning Status", interactive=False),
438
  title="⚙️ Fine-tuning (Optional)",
439
+ description="This section allows you to fine-tune the custom R1 model on a small subset of the ServiceNow dataset. This step is optional but can potentially improve the model's performance on ServiceNow-related tasks. **Note:** This process may take up to 5 minutes."
 
440
  ),
441
  gr.Interface(
442
  fn=predict,
 
449
  ],
450
  outputs=gr.Textbox(label="Custom R1 Output", lines=8, interactive=False),
451
  title="✍️ Direct Generation",
452
+ description="Enter a prompt to generate text directly using the custom R1 model. This is standard text generation without retrieval augmentation."
 
453
  ),
454
  gr.Interface(
455
  fn=compare_models,
 
465
  gr.Textbox(label="Official R1 Output", lines=6, interactive=False)
466
  ],
467
  title="🆚 Model Comparison",
468
+ description="Enter a prompt to compare the text generation of your fine-tuned custom R1 model with the official DeepSeek-R1-Distill-Llama-8B model."
 
469
  ),
470
  gr.ChatInterface(
471
  fn=chat_rag,
472
  chatbot=gr.Chatbot(label="RAG Chatbot"),
473
  textbox=gr.Textbox(placeholder="Ask a question to the RAG Chatbot...", lines=2, show_label=False),
 
 
 
 
 
 
 
 
474
  title="💬 RAG Chat",
475
  description="Chat with the custom R1 model, enhanced with retrieval-augmented memory. The model retrieves relevant info for informed responses.",
476
  submit_button="➡️ Send",
477
+ clear_btn=None
478
  )
479
  ]
480
+ ).render()
 
481
 
482
 
483
  demo.launch()