Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| #import urllib.request | |
| #import requests | |
| #import bs4 | |
| #import lxml | |
| import os | |
| #import subprocess | |
| from huggingface_hub import InferenceClient,HfApi | |
| import random | |
| import json | |
| import datetime | |
| #from query import tasks | |
| from agent import ( | |
| PREFIX, | |
| COMPRESS_DATA_PROMPT, | |
| COMPRESS_DATA_PROMPT_SMALL, | |
| LOG_PROMPT, | |
| LOG_RESPONSE, | |
| ) | |
| api=HfApi() | |
| client = InferenceClient( | |
| "mistralai/Mixtral-8x7B-Instruct-v0.1" | |
| ) | |
| def parse_action(string: str): | |
| print("PARSING:") | |
| print(string) | |
| assert string.startswith("action:") | |
| idx = string.find("action_input=") | |
| print(idx) | |
| if idx == -1: | |
| print ("idx == -1") | |
| print (string[8:]) | |
| return string[8:], None | |
| print ("last return:") | |
| print (string[8 : idx - 1]) | |
| print (string[idx + 13 :].strip("'").strip('"')) | |
| return string[8 : idx - 1], string[idx + 13 :].strip("'").strip('"') | |
| VERBOSE = True | |
| MAX_HISTORY = 100 | |
| MAX_DATA = 1000 | |
| def format_prompt(message, history): | |
| prompt = "<s>" | |
| for user_prompt, bot_response in history: | |
| prompt += f"[INST] {user_prompt} [/INST]" | |
| prompt += f" {bot_response}</s> " | |
| prompt += f"[INST] {message} [/INST]" | |
| return prompt | |
| def run_gpt( | |
| prompt_template, | |
| stop_tokens, | |
| max_tokens, | |
| seed, | |
| **prompt_kwargs, | |
| ): | |
| print(seed) | |
| timestamp=datetime.datetime.now() | |
| generate_kwargs = dict( | |
| temperature=0.9, | |
| max_new_tokens=max_tokens, | |
| top_p=0.95, | |
| repetition_penalty=1.0, | |
| do_sample=True, | |
| seed=seed, | |
| ) | |
| content = PREFIX.format( | |
| timestamp=timestamp, | |
| ) + prompt_template.format(**prompt_kwargs) | |
| if VERBOSE: | |
| print(LOG_PROMPT.format(content)) | |
| #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history) | |
| #formatted_prompt = format_prompt(f'{content}', history) | |
| stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
| resp = "" | |
| for response in stream: | |
| resp += response.token.text | |
| #yield resp | |
| if VERBOSE: | |
| print(LOG_RESPONSE.format(resp)) | |
| return resp | |
| def compress_data(c, history): | |
| seed=random.randint(1,1000000000) | |
| print (c) | |
| #tot=len(purpose) | |
| #print(tot) | |
| divr=int(c)/MAX_DATA | |
| divi=int(divr)+1 if divr != int(divr) else int(divr) | |
| chunk = int(int(c)/divr) | |
| print(f'chunk:: {chunk}') | |
| print(f'divr:: {divr}') | |
| print (f'divi:: {divi}') | |
| out = [] | |
| #out="" | |
| s=0 | |
| e=chunk | |
| print(f'e:: {e}') | |
| new_history="" | |
| #task = f'Compile this data to fulfill the task: {task}, and complete the purpose: {purpose}\n' | |
| for z in range(divi): | |
| print(f's:e :: {s}:{e}') | |
| hist = history[s:e] | |
| resp = run_gpt( | |
| COMPRESS_DATA_PROMPT_SMALL, | |
| stop_tokens=["observation:", "task:", "action:", "thought:"], | |
| max_tokens=2048, | |
| seed=seed, | |
| purpose="Compress data", | |
| knowledge=new_history, | |
| history=hist, | |
| ) | |
| new_history = resp | |
| print (resp) | |
| out+=resp | |
| e=e+chunk | |
| s=s+chunk | |
| resp = run_gpt( | |
| COMPRESS_DATA_PROMPT, | |
| stop_tokens=["observation:", "task:", "action:", "thought:"], | |
| max_tokens=1024, | |
| seed=seed, | |
| purpose="Compile report", | |
| knowledge=new_history, | |
| history="All data has been recieved.", | |
| ) | |
| print ("final" + resp) | |
| history = "observation: {}\n".format(resp) | |
| return history | |
| def summarize(inp,file=None): | |
| print(inp) | |
| out = str(inp) | |
| rl = len(out) | |
| print(f'rl:: {rl}') | |
| c=0 | |
| for i in str(out): | |
| if i == " " or i=="," or i=="\n": | |
| c +=1 | |
| print (f'c:: {c}') | |
| if rl > MAX_DATA: | |
| print("compressing...") | |
| rawp = compress_data(c,out) | |
| print (rawp) | |
| print (f'out:: {out}') | |
| #history += "observation: the search results are:\n {}\n".format(out) | |
| task = "complete?" | |
| return rawp | |
| ################################# | |
| examples =[ | |
| "what are todays breaking news stories?", | |
| "find the most popular model that I can use to generate an image by providing a text prompt", | |
| "return the top 10 models that I can use to identify objects in images", | |
| "which models have the most likes from each category?" | |
| ] | |
| app = gr.ChatInterface( | |
| fn=summarize, | |
| chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
| title="Mixtral 46.7B Powered <br> Search", | |
| examples=examples, | |
| concurrency_limit=20, | |
| ) | |
| ''' | |
| with gr.Blocks() as app: | |
| with gr.Row(): | |
| inp_query=gr.Textbox() | |
| models_dd=gr.Dropdown(choices=[m for m in return_list],interactive=True) | |
| with gr.Row(): | |
| button=gr.Button() | |
| stop_button=gr.Button("Stop") | |
| text=gr.JSON() | |
| inp_query.change(search_models,inp_query,models_dd) | |
| go=button.click(test_fn,None,text) | |
| stop_button.click(None,None,None,cancels=[go]) | |
| ''' | |
| app.launch(server_port=7860,show_api=False) | |