File size: 5,644 Bytes
0ebed5d
 
66677ed
643e2e2
 
66677ed
 
 
3081bba
27b2ddc
66677ed
 
643e2e2
66677ed
 
3e4e7ef
66677ed
3e4e7ef
66677ed
 
3e4e7ef
3081bba
 
66677ed
3081bba
 
66677ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3e4e7ef
66677ed
 
 
 
 
 
 
 
9223902
66677ed
3e4e7ef
af96161
6cc9789
 
 
66677ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3e4e7ef
66677ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3e4e7ef
66677ed
 
 
 
 
 
 
3081bba
 
66677ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3081bba
 
 
 
 
b1036f0
3081bba
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import os
import subprocess
import random
from huggingface_hub import InferenceClient
import gradio as gr
from safe_search import safe_search
from i_search import google
from i_search import i_search as i_s
from agent import ( run_agent, create_interface, format_prompt_var, generate, MAX_HISTORY, client, VERBOSE, date_time_str, )

from utils import parse_action, parse_file_content, read_python_module_structure
from datetime import datetime

now = datetime.now()
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")

client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

VERBOSE = True
MAX_HISTORY = 100

def format_prompt_var(message, history):
    prompt = " "
    for user_prompt, bot_response in history:
        prompt += f"[INST] {user_prompt} [/usr]\n{bot_response}\n"
    prompt += f"[INST] {message} [/usr]\n"
    return prompt

def run_gpt(prompt_template, stop_tokens, max_tokens, purpose, **prompt_kwargs):
    seed = random.randint(1, 1111111111111111)
    print(seed)
    generate_kwargs = dict(
        temperature=1.0,
        max_new_tokens=2096,
        top_p=0.99,
        repetition_penalty=1.0,
        do_sample=True,
        seed=seed,
    )

    content = PREFIX.format(
        date_time_str=date_time_str,
        purpose=purpose,
        safe_search=safe_search,
    ) + prompt_template.format(**prompt_kwargs)
    if VERBOSE:
        print(LOG_PROMPT.format(content))

    stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
    resp = ""
    for response in stream:
        resp += response.token.text

    if VERBOSE:
        print(LOG_RESPONSE.format(resp))
    return resp
def compress_history(purpose, task, history, directory):
    resp = run_gpt(
        COMPRESS_HISTORY_PROMPT,
        stop_tokens=["observation:", "task:", "action:", "thought:"],
        max_tokens=512,
        purpose=purpose,
        task=task,
        history=history,
    )
    history = "observation: {}\n".format(resp)
    return history

def call_search(purpose, task, history, directory, action_input):
    print("CALLING SEARCH")
    try:
        if "http" in action_input:
            if "<" in action_input:
                action_input = action_input.strip("<")
            if ">" in action_input:
                action_input = action_input.strip(">")
            response = i_s(action_input)
            print(response)
            history += "observation: search result is: {}\n".format(response)
        else:
            history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
    except Exception as e:
        history += "{}\n".format(e)  # Fixing this line to include the exception message
    if "COMPLETE" in action_name or "COMPLETE" in action_input:
        task = "END"
    return action_name, action_input, history, task
def call_set_task(purpose, task, history, directory, action_input):
    task = run_gpt(
        TASK_PROMPT,
        stop_tokens=[],
        max_tokens=64,
        purpose=purpose,
        task=task,
        history=history,
    ).strip("\n")
    history += "observation: task has been updated to: {}\n".format(task)
    return "MAIN", None, history, task

def end_fn(purpose, task, history, directory, action_input):
    task = "END"
    return "COMPLETE", "COMPLETE", history, task

NAME_TO_FUNC = {
    "MAIN": call_main,
    "UPDATE-TASK": call_set_task,
    "SEARCH": call_search,
    "COMPLETE": end_fn,
}

def run_action(purpose, task, history, directory, action_name, action_input):
    print(f'action_name::{action_name}')
    try:
        if "RESPONSE" in action_name or "COMPLETE" in action_name:
            action_name = "COMPLETE"
            task = "END"
            return action_name, "COMPLETE", history, task

        if len(history.split("\n")) > MAX_HISTORY:
            if VERBOSE:
                print("COMPRESSING HISTORY")
            history = compress_history(purpose, task, history, directory)
        if not action_name in NAME_TO_FUNC:
            action_name = "MAIN"
        if action_name == "" or action_name is None:
            action_name = "MAIN"
        assert action_name in NAME_TO_FUNC

        print("RUN: ", action_name, action_input)
        return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
    except Exception as e:
        history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
        return "MAIN", None, history, task
def run(purpose, history):
    task = None
    directory = "./"
    if history:
        history = str(history).strip("[]")
        if not history:
            history = ""

    action_name = "UPDATE-TASK" if task is None else "MAIN"
    action_input = None
    while True:
        print("")
        print("")
        print("---")
        print("purpose:", purpose)
        print("task:", task)
        print("---")
        print(history)
        print("---")

        action_name, action_input, history, task = run_action(
            purpose,
            task,
            history,
            directory,
            action_name,
            action_input,
        )
        yield (history)
        if task == "END":
            return (history)
    iface = gr.Interface(fn=run, inputs=["text", "text"], outputs="text", title="Interactive AI Assistant", description="Enter your purpose and history to interact with the AI assistant.")
    
    # Launch the Gradio interface
    iface.launch(share=True)
  
if __name__ == "__main__":
    main("Sample Purpose", "Sample History")