Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Commit 
							
							·
						
						ec7540b
	
1
								Parent(s):
							
							d4d5003
								
Added test code for app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -1,16 +1,30 @@ | |
| 1 | 
             
            import gradio as gr
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 2 |  | 
| 3 | 
            -
             | 
|  | |
| 4 |  | 
| 5 | 
            -
             | 
|  | |
|  | |
|  | |
| 6 |  | 
| 7 | 
            -
             | 
| 8 | 
            -
             | 
| 9 | 
            -
             | 
| 10 | 
            -
             | 
| 11 | 
            -
             | 
| 12 | 
            -
             | 
| 13 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
| 14 | 
             
            )
         | 
| 15 |  | 
| 16 | 
            -
             | 
|  | |
| 1 | 
             
            import gradio as gr
         | 
| 2 | 
            +
            import os
         | 
| 3 | 
            +
            import json
         | 
| 4 | 
            +
            import openai
         | 
| 5 | 
            +
            import torch
         | 
| 6 | 
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM
         | 
| 7 | 
            +
            from run_llm import model_mapping, fastchat  # Import the necessary function from run_llm.py
         | 
| 8 |  | 
| 9 | 
            +
            # Set your OpenAI API key
         | 
| 10 | 
            +
            openai.api_key = "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"
         | 
| 11 |  | 
| 12 | 
            +
            def generate_text(input_text, model, prompt_type):
         | 
| 13 | 
            +
                # Use the fastchat function from run_llm.py
         | 
| 14 | 
            +
                outputs = fastchat(input_text, model, prompt_type)
         | 
| 15 | 
            +
                return outputs
         | 
| 16 |  | 
| 17 | 
            +
            iface = gr.Interface(
         | 
| 18 | 
            +
                fn=generate_text,
         | 
| 19 | 
            +
                inputs=[
         | 
| 20 | 
            +
                    gr.Textbox("input_text", label="Input Text"),
         | 
| 21 | 
            +
                    gr.Dropdown(
         | 
| 22 | 
            +
                        list(model_mapping.keys()),
         | 
| 23 | 
            +
                        label="Model"
         | 
| 24 | 
            +
                    ),
         | 
| 25 | 
            +
                    gr.Radio([1, 2], label="Prompt Type"),
         | 
| 26 | 
            +
                ],
         | 
| 27 | 
            +
                outputs=gr.Textbox("output_text", label="Generated Text")
         | 
| 28 | 
             
            )
         | 
| 29 |  | 
| 30 | 
            +
            iface.launch()
         |