wuhp commited on
Commit
de4b11a
·
verified ·
1 Parent(s): a8b69bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -15
app.py CHANGED
@@ -437,7 +437,7 @@ with gr.Blocks() as demo:
437
  outputs=gr.Textbox(label="Fine-tuning Status", interactive=False),
438
  title="⚙️ Fine-tuning (Optional)",
439
  description="This section allows you to fine-tune the custom R1 model on a small subset of the ServiceNow dataset. This step is optional but can potentially improve the model's performance on ServiceNow-related tasks. **Note:** This process may take up to 5 minutes.",
440
- submit_btn="🚀 Start Fine-tuning (QLoRA)"
441
  ),
442
  gr.Interface(
443
  fn=predict,
@@ -451,7 +451,7 @@ with gr.Blocks() as demo:
451
  outputs=gr.Textbox(label="Custom R1 Output", lines=8, interactive=False),
452
  title="✍️ Direct Generation",
453
  description="Enter a prompt to generate text directly using the custom R1 model. This is standard text generation without retrieval augmentation.",
454
- submit_btn="✨ Generate Text"
455
  ),
456
  gr.Interface(
457
  fn=compare_models,
@@ -468,26 +468,25 @@ with gr.Blocks() as demo:
468
  ],
469
  title="🆚 Model Comparison",
470
  description="Enter a prompt to compare the text generation of your fine-tuned custom R1 model with the official DeepSeek-R1-Distill-Llama-8B model.",
471
- submit_btn="⚖️ Compare Models"
472
  ),
473
- # Corrected gr.ChatInterface - inputs are NOT passed here
474
  gr.ChatInterface(
475
  fn=chat_rag,
476
  chatbot=gr.Chatbot(label="RAG Chatbot"),
477
  textbox=gr.Textbox(placeholder="Ask a question to the RAG Chatbot...", lines=2, show_label=False),
478
- # inputs parameter REMOVED from gr.ChatInterface
 
 
 
 
 
 
 
479
  title="💬 RAG Chat",
480
  description="Chat with the custom R1 model, enhanced with retrieval-augmented memory. The model retrieves relevant info for informed responses.",
481
- submit_button="➡️ Send", # Correct parameter name is submit_button for ChatInterface
482
- clear_btn=None
483
- ),
484
- # Sliders defined OUTSIDE ChatInterface, but within the gr.Blocks() for the RAG Chat Tab
485
- with gr.Row():
486
- temperature_chat = gr.Slider(0.0, 1.5, step=0.1, value=0.7, label="Temperature", visible=False) # Initially hidden, can be shown if needed
487
- top_p_chat = gr.Slider(0.0, 1.0, step=0.05, value=0.9, label="Top-p", visible=False) # Initially hidden, can be shown if needed
488
- min_tokens_chat = gr.Slider(1, 2500, value=50, step=10, label="Min New Tokens", visible=False) # Initially hidden, can be shown if needed
489
- max_tokens_chat = gr.Slider(1, 2500, value=200, step=50, label="Max New Tokens", visible=False) # Initially hidden, can be shown if needed
490
-
491
  ]
492
  ).render():
493
  pass # No need for extra elements outside the tabs now
 
437
  outputs=gr.Textbox(label="Fine-tuning Status", interactive=False),
438
  title="⚙️ Fine-tuning (Optional)",
439
  description="This section allows you to fine-tune the custom R1 model on a small subset of the ServiceNow dataset. This step is optional but can potentially improve the model's performance on ServiceNow-related tasks. **Note:** This process may take up to 5 minutes.",
440
+ # submit_button_text="🚀 Start Fine-tuning (QLoRA)" # REMOVE THIS LINE
441
  ),
442
  gr.Interface(
443
  fn=predict,
 
451
  outputs=gr.Textbox(label="Custom R1 Output", lines=8, interactive=False),
452
  title="✍️ Direct Generation",
453
  description="Enter a prompt to generate text directly using the custom R1 model. This is standard text generation without retrieval augmentation.",
454
+ # submit_button_text="✨ Generate Text" # REMOVE THIS LINE
455
  ),
456
  gr.Interface(
457
  fn=compare_models,
 
468
  ],
469
  title="🆚 Model Comparison",
470
  description="Enter a prompt to compare the text generation of your fine-tuned custom R1 model with the official DeepSeek-R1-Distill-Llama-8B model.",
471
+ # submit_button_text="⚖️ Compare Models" # REMOVE THIS LINE
472
  ),
 
473
  gr.ChatInterface(
474
  fn=chat_rag,
475
  chatbot=gr.Chatbot(label="RAG Chatbot"),
476
  textbox=gr.Textbox(placeholder="Ask a question to the RAG Chatbot...", lines=2, show_label=False),
477
+ inputs=[
478
+ "textbox",
479
+ gr.State([]), # chat_state
480
+ gr.Slider(0.0, 1.5, step=0.1, value=0.7, label="Temperature"),
481
+ gr.Slider(0.0, 1.0, step=0.05, value=0.9, label="Top-p"),
482
+ gr.Slider(1, 2500, value=50, step=10, label="Min New Tokens"),
483
+ gr.Slider(1, 2500, value=200, step=50, label="Max New Tokens")
484
+ ],
485
  title="💬 RAG Chat",
486
  description="Chat with the custom R1 model, enhanced with retrieval-augmented memory. The model retrieves relevant info for informed responses.",
487
+ submit_button="➡️ Send",
488
+ clear_btn=None # Optional: You can add a clear button if needed
489
+ )
 
 
 
 
 
 
 
490
  ]
491
  ).render():
492
  pass # No need for extra elements outside the tabs now