File size: 4,448 Bytes
83de32a
c3fc45d
83de32a
 
 
22f120c
cb5be95
22f120c
83de32a
 
c29c648
7676b1c
 
 
83de32a
6786405
 
 
4d91c16
 
6786405
 
4d91c16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6786405
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad8b74a
83de32a
 
cb5be95
 
83de32a
7c4cca4
 
83de32a
 
 
 
7f1cae9
c3fc45d
 
83de32a
c3fc45d
83de32a
 
 
df68234
7676b1c
 
 
 
 
 
 
 
 
83de32a
179dde0
52ed413
 
c29c648
52ed413
 
4f4b28e
83de32a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import os

import gradio as gr
import openai

#from dotenv import load_dotenv

#load_dotenv()

llm_api_options = ["OpenAI API","Azure OpenAI API","Google PaLM API", "Llama 2"]
TEST_MESSAGE = "Write an introductory paragraph to explain Generative AI to the reader of this content."
openai_models = ["gpt-4", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo", 
                     "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "text-davinci-003", 
                     "text-davinci-002", "text-curie-001", "text-babbage-001", "text-ada-001"]

azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
azure_deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME")

def openai_text_completion(prompt: str, model: str):
    try:
        system_prompt: str = "Explain in detail to help student understand the concept.",
 
        assistant_prompt: str = None,

        messages = [
            {"role": "user", "content": f"{prompt}"},
            {"role": "system", "content": f"{system_prompt}"},
            {"role": "assistant", "content": f"{assistant_prompt}"}
        ]
        
        openai.api_key = os.getenv("OPENAI_API_KEY")
        openai.api_version = '2020-11-07'
        
        completion = openai.ChatCompletion.create(
            model = model, 
            messages = messages,
            temperature = 0.7
        )           
        response = completion["choices"][0]["message"].content
        return "", response
    except openai.error.ServiceUnavailableError:
        print(f"Exception Name: {type(exception).__name__}")
        print(exception)
        return f" {optionSelection} test_handler Error - {exception}", ""

def azure_openai_text_completion(prompt: str, model: str):
    try:
        system_prompt: str = "Explain in detail to help student understand the concept.",
 
        assistant_prompt: str = None,

        messages = [
            {"role": "user", "content": f"{prompt}"},
            {"role": "system", "content": f"{system_prompt}"},
            {"role": "assistant", "content": f"{assistant_prompt}"}
        ]
        
        openai.api_key = os.getenv("AZURE_OPENAI_KEY")
        openai.api_type = "azure"
        openai.api_version = "2023-05-15" 
        openai.api_base = f"https://{azure_endpoint}.openai.azure.com"
        
        completion = openai.ChatCompletion.create(
            model = model, 
            engine = azure_deployment_name,
            messages = messages,
            temperature = 0.7
        )           
        response = completion["choices"][0]["message"].content
        return "", response
    except openai.error.ServiceUnavailableError:
        print(f"Exception Name: {type(exception).__name__}")
        print(exception)
        return f" {optionSelection} test_handler Error - {exception}", ""


def test_handler(optionSelection, prompt: str = TEST_MESSAGE, model: str ="gpt-4"):
    match optionSelection:
        case  "OpenAI API":
            message, response = openai_text_completion(prompt,model)
            return message, response
        case  "Azure OpenAI API":
            message, response = azure_openai_text_completion(prompt,model)
            return message, response
        case  "Google PaLM API":
            return "", ""
        case  "Llama 2":
            return "", ""
        case _:
            if optionSelection not in llm_api_options:
                return ValueError("Invalid choice!"), ""

        

with gr.Blocks() as LLMDemoTabbedScreen:
    with gr.Tab("Text-to-Text (Text Completion)"):
        llm_options = gr.Radio(llm_api_options, label="Select one", info="Which service do you want to use?", value="OpenAI API")
        with gr.Tab("Open AI"):
            openai_model = gr.Dropdown(openai_models, value="gpt-4", label="Model", info="Select one, for Natural language")

        with gr.Row():
            with gr.Column(): 
                test_string = gr.Textbox(label="Try String", value=TEST_MESSAGE, lines=2)
                test_string_response = gr.Textbox(label="Response")
                test_string_output_info = gr.Label(value="Output Info", label="Info")
                test_button = gr.Button("Try it")


    test_button.click(
            fn=test_handler,
            inputs=[llm_options, test_string, openai_model],
            outputs=[test_string_output_info, test_string_response]
    )
    
if __name__ == "__main__":
    LLMDemoTabbedScreen.launch()