File size: 5,242 Bytes
98f0104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import os
import websockets
import gradio as gr
import fireworks.client
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms.fireworks import Fireworks
from langchain.chat_models.fireworks import ChatFireworks

inputs = []
outputs = []
used_ports = []
server_ports = []
client_ports = []

GOOGLE_CSE_ID = "f3882ab3b67cc4923"
GOOGLE_API_KEY = "AIzaSyBNvtKE35EAeYO-ECQlQoZO01RSHWhfIws"
FIREWORKS_API_KEY = "WZGOkHQbZULIzA6u83kyLGBKPigs1HmK9Ec8DEKmGOtu45zx"
FIREWORKS_API_KEY1 = "WZGOkHQbZULIzA6u83kyLGBKPigs1HmK9Ec8DEKmGOtu45zx"

def get_response(input):
    os.environ["GOOGLE_CSE_ID"] = GOOGLE_CSE_ID
    os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
    os.environ["FIREWORKS_API_KEY"] = FIREWORKS_API_KEY

    llm = Fireworks(model="accounts/fireworks/models/llama-v2-13b-chat", model_kwargs={"temperature":0, "max_tokens":4000, "top_p":1.0})

    tools = load_tools(["google-search", "llm-math"], llm=llm)
    agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, return_intermediate_steps=True, handle_parsing_errors=True)

    response = agent({"input": input})
    return response["output"], response["intermediate_steps"]

async def handleWebSocket(ws):
    print('New connection')
    await ws.send('Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT. Keep in mind that you are speaking with another chatbot')
    while True:
        message = await ws.recv()
        print(f'Received message: {message}')
        try:        
            answer = await get_response(message)  # Use the message directly
            await ws.send(answer)

        except websockets.exceptions.ConnectionClosedError as e:
            print(f"Connection closed: {e}")
            continue

        except Exception as e:
            print(f"Error: {e}")

# Start the WebSocket server 
async def start_websockets(websocketPort):
    global server
    server = await(websockets.serve(handleWebSocket, 'localhost', websocketPort))
    server_ports.append(websocketPort)
    print(f"Starting WebSocket server on port {websocketPort}...")
    return "Used ports:\n" + '\n'.join(map(str, server_ports))
    await asyncio.Future()  

async def start_client(clientPort):
    uri = f'ws://localhost:{clientPort}'
    client_ports.append(clientPort)
    async with websockets.connect(uri) as ws:        
        while True:
            # Listen for messages from the server            
            input_message = await ws.recv()
            output_message = await get_response(input_message)
            await ws.send(output_message)

# Stop the WebSocket server
async def stop_websockets():    
    global server
    if server:
        # Close all connections gracefully
        server.close()
        # Wait for the server to close
        await server.wait_closed()
        print("Stopping WebSocket server...")
    else:
        print("WebSocket server is not running.")

# Stop the WebSocket client
async def stop_client():
    global ws
    # Close the connection with the server
    ws.close()
    print("Stopping WebSocket client...")            

# error capturing in integration as a component
with gr.Blocks() as demo:
    with gr.Tabs(elem_classes="tab-buttons") as tabs:
        with gr.TabItem("Agents GPT", elem_id="agents_gpt", id=0):
            with gr.Column(scale=1, min_width=600):   
                with gr.Row():
                    userInput = gr.Textbox(label="User Input")
                with gr.Row():
                    ask_Qestion = gr.Button("Ask question")    
                with gr.Row():
                    goalOutput = gr.Textbox(lines=15, max_lines=130, label="Goal output:")
                    steps = gr.Json(label="Intermediate Steps")

        with gr.TabItem("Agents GPT", elem_id="agents_gpt", id=0):
            with gr.Column(scale=1, min_width=600):
                with gr.Row():
                    websocketPort = gr.Slider(minimum=1000, maximum=9999, label="Websocket server port", interactive=True, randomize=False)
                    startWebsockets = gr.Button("Start WebSocket Server")            
                    stopWebsockets = gr.Button("Stop WebSocket Server")
                with gr.Row():  
                    port = gr.Textbox()
                with gr.Row():
                    clientPort = gr.Slider(minimum=1000, maximum=9999, label="Websocket server port", interactive=True, randomize=False)
                    startClient = gr.Button("Start WebSocket client")
                    stopClient = gr.Button("Stop WebSocket client")
                with gr.Row():
                    PortInUse = gr.Textbox()
                    ask_Qestion.click(get_response, inputs=userInput, outputs=[goalOutput, steps])
                    startWebsockets.click(start_websockets, inputs=websocketPort, outputs=port)
                    startClient.click(start_client, inputs=clientPort, outputs=None)
                    stopWebsockets.click(stop_websockets, inputs=None, outputs=port)
                    stopClient.click(stop_client, inputs=None, outputs=PortInUse)

demo.queue()    
demo.launch(share=True, server_port=1112)