Spaces:
Sleeping
Sleeping
File size: 3,530 Bytes
9385133 83a0c1c a6c95f0 44f3235 a6c95f0 83a0c1c ced35f5 83a0c1c ced35f5 83a0c1c ced35f5 83a0c1c ced35f5 a6c95f0 44f3235 2392edf a6c95f0 f8050cc a6c95f0 0fe79b2 2392edf a6c95f0 0fe79b2 a6c95f0 ca96d09 9424140 ca96d09 e6899f3 356fbbf 83a0c1c a6c95f0 83a0c1c 0fe79b2 2776f22 a6c95f0 44f3235 a6c95f0 df01613 83a0c1c f383dcc 83a0c1c f383dcc a6c95f0 83a0c1c a6c95f0 83a0c1c a6c95f0 9385133 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import gradio as gr
import os
# Function for Main content (takes user input and returns a response)
def process_input(user_input):
return f"You entered: {user_input}"
# Function to generate predefined examples
def get_example():
# Define the path to the 'examples' directory
example_root = os.path.join(os.path.dirname(__file__), "examples")
# Get list of all example text file paths
example_files = [os.path.join(example_root, _) for _ in os.listdir(example_root) if _.endswith("txt")]
# Read the content of each file (assuming they're plain text files)
examples = []
for file_path in example_files:
example_content = ""
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
example_content = f.read()
examples.append(example_content) # Append the content to the list
return examples
# Create the header section
def create_header():
agent_header = """
#Content Agent
"""
with gr.Row():
gr.Markdown("<div id='header'>" + agent_header + "</div>")
# Create the user guidance section
def create_user_guidance():
guidance = """
Please enter text below to get started. The AI Agent will try to determine whether the language is polite and uses the following classification:
- `polite`
- `somewhat polite`
- `neutral`
- `impolite`
App is running `deepseek-ai/DeepSeek-R1-Distill-Qwen-32B` text generation model.
Uses Intel's Polite Guard NLP library.
Compute is GCP · Nvidia L4 · 4x GPUs · 96 GB
"""
with gr.Row():
gr.Markdown("<div id='user-guidance'>" + guidance+ "</div>")
# Create the main content section
def create_main():
with gr.Row():
with gr.Column():
user_input = gr.Textbox(label="Your Input", placeholder="Enter something here...")
submit_button = gr.Button("Submit")
output = gr.Textbox(label="Content feedback", interactive=False, lines=10, max_lines=20 )
# Define the function to be called when the button is clicked or Enter is pressed
submit_button.click(process_input, inputs=user_input, outputs=output)
user_input.submit(process_input, inputs=user_input, outputs=output)
return user_input, output # Return both input and output components
# Create the examples section
def create_examples(user_input):
# Fetch examples by calling get_example() here
examples = get_example()
example_radio = gr.Radio(choices=examples, label="Try one of these examples:")
# When an example is selected, populate the input field
example_radio.change(fn=lambda example: example, inputs=example_radio, outputs=user_input)
# Create the footer section
def create_footer():
with gr.Row():
gr.Markdown("<div id='footer'>Thanks for trying it out!</div>")
# Main function to bring all sections together
def ContentAgentUI():
# Set the path to the external CSS file
css_path = os.path.join(os.getcwd(), "ui", "styles.css")
with gr.Blocks(css=css_path) as ca_gui:
create_header() # Create the header
create_user_guidance() # Create user guidance section
user_input, output = create_main() # Create the main section (returns the input/output components)
create_examples(user_input) # Create the examples section
create_footer() # Create the footer section
# Launch the Gradio interface
ca_gui.launch()
|