wuhp commited on
Commit
a8b69bf
·
verified ·
1 Parent(s): d5d2a94

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -14
app.py CHANGED
@@ -437,7 +437,7 @@ with gr.Blocks() as demo:
437
  outputs=gr.Textbox(label="Fine-tuning Status", interactive=False),
438
  title="⚙️ Fine-tuning (Optional)",
439
  description="This section allows you to fine-tune the custom R1 model on a small subset of the ServiceNow dataset. This step is optional but can potentially improve the model's performance on ServiceNow-related tasks. **Note:** This process may take up to 5 minutes.",
440
- submit_btn="🚀 Start Fine-tuning (QLoRA)" # Changed from submit_button_text to submit_btn
441
  ),
442
  gr.Interface(
443
  fn=predict,
@@ -451,7 +451,7 @@ with gr.Blocks() as demo:
451
  outputs=gr.Textbox(label="Custom R1 Output", lines=8, interactive=False),
452
  title="✍️ Direct Generation",
453
  description="Enter a prompt to generate text directly using the custom R1 model. This is standard text generation without retrieval augmentation.",
454
- submit_btn="✨ Generate Text" # Changed from submit_button_text to submit_btn
455
  ),
456
  gr.Interface(
457
  fn=compare_models,
@@ -468,27 +468,29 @@ with gr.Blocks() as demo:
468
  ],
469
  title="🆚 Model Comparison",
470
  description="Enter a prompt to compare the text generation of your fine-tuned custom R1 model with the official DeepSeek-R1-Distill-Llama-8B model.",
471
- submit_btn="⚖️ Compare Models" # Changed from submit_button_text to submit_btn
472
  ),
 
473
  gr.ChatInterface(
474
  fn=chat_rag,
475
  chatbot=gr.Chatbot(label="RAG Chatbot"),
476
  textbox=gr.Textbox(placeholder="Ask a question to the RAG Chatbot...", lines=2, show_label=False),
477
- inputs=[
478
- "textbox",
479
- gr.State([]), # chat_state
480
- gr.Slider(0.0, 1.5, step=0.1, value=0.7, label="Temperature"),
481
- gr.Slider(0.0, 1.0, step=0.05, value=0.9, label="Top-p"),
482
- gr.Slider(1, 2500, value=50, step=10, label="Min New Tokens"),
483
- gr.Slider(1, 2500, value=200, step=50, label="Max New Tokens")
484
- ],
485
  title="💬 RAG Chat",
486
  description="Chat with the custom R1 model, enhanced with retrieval-augmented memory. The model retrieves relevant info for informed responses.",
487
- submit_button="➡️ Send",
488
- clear_btn=None # Optional: You can add a clear button if needed
489
- )
 
 
 
 
 
 
 
490
  ]
491
  ).render():
492
  pass # No need for extra elements outside the tabs now
493
 
 
494
  demo.launch()
 
437
  outputs=gr.Textbox(label="Fine-tuning Status", interactive=False),
438
  title="⚙️ Fine-tuning (Optional)",
439
  description="This section allows you to fine-tune the custom R1 model on a small subset of the ServiceNow dataset. This step is optional but can potentially improve the model's performance on ServiceNow-related tasks. **Note:** This process may take up to 5 minutes.",
440
+ submit_btn="🚀 Start Fine-tuning (QLoRA)"
441
  ),
442
  gr.Interface(
443
  fn=predict,
 
451
  outputs=gr.Textbox(label="Custom R1 Output", lines=8, interactive=False),
452
  title="✍️ Direct Generation",
453
  description="Enter a prompt to generate text directly using the custom R1 model. This is standard text generation without retrieval augmentation.",
454
+ submit_btn="✨ Generate Text"
455
  ),
456
  gr.Interface(
457
  fn=compare_models,
 
468
  ],
469
  title="🆚 Model Comparison",
470
  description="Enter a prompt to compare the text generation of your fine-tuned custom R1 model with the official DeepSeek-R1-Distill-Llama-8B model.",
471
+ submit_btn="⚖️ Compare Models"
472
  ),
473
+ # Corrected gr.ChatInterface - inputs are NOT passed here
474
  gr.ChatInterface(
475
  fn=chat_rag,
476
  chatbot=gr.Chatbot(label="RAG Chatbot"),
477
  textbox=gr.Textbox(placeholder="Ask a question to the RAG Chatbot...", lines=2, show_label=False),
478
+ # inputs parameter REMOVED from gr.ChatInterface
 
 
 
 
 
 
 
479
  title="💬 RAG Chat",
480
  description="Chat with the custom R1 model, enhanced with retrieval-augmented memory. The model retrieves relevant info for informed responses.",
481
+ submit_button="➡️ Send", # Correct parameter name is submit_button for ChatInterface
482
+ clear_btn=None
483
+ ),
484
+ # Sliders defined OUTSIDE ChatInterface, but within the gr.Blocks() for the RAG Chat Tab
485
+ with gr.Row():
486
+ temperature_chat = gr.Slider(0.0, 1.5, step=0.1, value=0.7, label="Temperature", visible=False) # Initially hidden, can be shown if needed
487
+ top_p_chat = gr.Slider(0.0, 1.0, step=0.05, value=0.9, label="Top-p", visible=False) # Initially hidden, can be shown if needed
488
+ min_tokens_chat = gr.Slider(1, 2500, value=50, step=10, label="Min New Tokens", visible=False) # Initially hidden, can be shown if needed
489
+ max_tokens_chat = gr.Slider(1, 2500, value=200, step=50, label="Max New Tokens", visible=False) # Initially hidden, can be shown if needed
490
+
491
  ]
492
  ).render():
493
  pass # No need for extra elements outside the tabs now
494
 
495
+
496
  demo.launch()