import langchain
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.document_loaders import UnstructuredPDFLoader,UnstructuredWordDocumentLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.vectorstores import FAISS
from zipfile import ZipFile
import gradio as gr
import openpyxl
import os
import shutil
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
import tiktoken
import secrets
import time
import requests
import tempfile

from groq import Groq



tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo")

# create the length function
def tiktoken_len(text):
    tokens = tokenizer.encode(
        text,
        disallowed_special=()
    )
    return len(tokens)

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=800,
    chunk_overlap=400,
    length_function=tiktoken_len,
    separators=["\n\n", "\n", " ", ""]
)

embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
foo = Document(page_content='foo is fou!',metadata={"source":'foo source'})


def reset_database(ui_session_id):
  session_id = f"PDFAISS-{ui_session_id}"
  if 'drive' in session_id:
    print("RESET DATABASE: session_id contains 'drive' !!")
    return None

  try:
    shutil.rmtree(session_id)
  except:
    print(f'no {session_id} directory present')
  
  try:
    os.remove(f"{session_id}.zip")
  except:
    print("no {session_id}.zip present")

  return None

def is_duplicate(split_docs,db):
  epsilon=0.0
  print(f"DUPLICATE: Treating: {split_docs[0].metadata['source'].split('/')[-1]}")
  for i in range(min(3,len(split_docs))):
    query = split_docs[i].page_content
    docs = db.similarity_search_with_score(query,k=1)
    _ , score = docs[0]
    epsilon += score
  print(f"DUPLICATE: epsilon: {epsilon}")
  return epsilon < 0.1

def merge_split_docs_to_db(split_docs,db,progress,progress_step=0.1):
  progress(progress_step,desc="merging docs")
  if len(split_docs)==0:
    print("MERGE to db: NO docs!!")
    return

  filename = split_docs[0].metadata['source']
  if is_duplicate(split_docs,db):
    print(f"MERGE: Document is duplicated: {filename}")
    return
  print(f"MERGE: number of split docs: {len(split_docs)}")
  batch = 10
  for i in range(0, len(split_docs), batch):
    progress(i/len(split_docs),desc=f"added {i} chunks of {len(split_docs)} chunks")
    db1 = FAISS.from_documents(split_docs[i:i+batch], embeddings)
    db.merge_from(db1)
  return db

def merge_pdf_to_db(filename,db,progress,progress_step=0.1):
  progress_step+=0.05
  progress(progress_step,'unpacking pdf')
  doc = UnstructuredPDFLoader(filename).load()
  doc[0].metadata['source'] = filename.split('/')[-1]
  split_docs = text_splitter.split_documents(doc)
  progress_step+=0.3
  progress(progress_step,'docx unpacked')
  return merge_split_docs_to_db(split_docs,db,progress,progress_step)

def merge_docx_to_db(filename,db,progress,progress_step=0.1):
  progress_step+=0.05
  progress(progress_step,'unpacking docx')
  doc = UnstructuredWordDocumentLoader(filename).load()
  doc[0].metadata['source'] = filename.split('/')[-1]
  split_docs = text_splitter.split_documents(doc)
  progress_step+=0.3
  progress(progress_step,'docx unpacked')
  return merge_split_docs_to_db(split_docs,db,progress,progress_step)

def merge_txt_to_db(filename,db,progress,progress_step=0.1):
  progress_step+=0.05
  progress(progress_step,'unpacking txt')
  with open(filename) as f:
      docs = text_splitter.split_text(f.read())
      split_docs = [Document(page_content=doc,metadata={'source':filename.split('/')[-1]}) for doc in docs]
  progress_step+=0.3
  progress(progress_step,'txt unpacked')
  return merge_split_docs_to_db(split_docs,db,progress,progress_step)

def unpack_zip_file(filename,db,progress):
    with ZipFile(filename, 'r') as zipObj:
        contents = zipObj.namelist()
        print(f"unpack zip: contents: {contents}")
        tmp_directory = filename.split('/')[-1].split('.')[-2]
        shutil.unpack_archive(filename, tmp_directory)

        if 'index.faiss' in [item.lower() for item in contents]:
            db2 = FAISS.load_local(tmp_directory, embeddings, allow_dangerous_deserialization=True)
            db.merge_from(db2)
            return db
        
        for file in contents:
            if file.lower().endswith('.docx'):
              db = merge_docx_to_db(f"{tmp_directory}/{file}",db,progress)
            if file.lower().endswith('.pdf'):
              db = merge_pdf_to_db(f"{tmp_directory}/{file}",db,progress)
            if file.lower().endswith('.txt'):
              db = merge_txt_to_db(f"{tmp_directory}/{file}",db,progress)
        return db

def add_files_to_zip(session_id):
    zip_file_name = f"{session_id}.zip"
    with ZipFile(zip_file_name, "w") as zipObj:
        for root, dirs, files in os.walk(session_id):
            for file_name in files:
                file_path = os.path.join(root, file_name)
                arcname = os.path.relpath(file_path, session_id)
                zipObj.write(file_path, arcname)

#### UI Functions ####

def embed_files(files,ui_session_id,progress=gr.Progress(),progress_step=0.05):
    if ui_session_id not in os.environ['users'].split(', '):
        return "README.md", ""
    print(files)
    progress(progress_step,desc="Starting...")
    split_docs=[]
    if len(ui_session_id)==0:
      ui_session_id = secrets.token_urlsafe(16)
    session_id = f"PDFAISS-{ui_session_id}"

    try:
      db = FAISS.load_local(session_id,embeddings, allow_dangerous_deserialization=True)
    except:
      print(f"SESSION: {session_id} database does not exist, create a FAISS db")
      db =  FAISS.from_documents([foo], embeddings)
      db.save_local(session_id)
      print(f"SESSION: {session_id} database created")
    
    print("EMBEDDED, before embeddeding: ",session_id,len(db.index_to_docstore_id))
    for file_id,file in enumerate(files):
        print("ID : ", file_id, "FILE : ", file)
        file_type = file.name.split('.')[-1].lower()
        source = file.name.split('/')[-1]
        print(f"current file: {source}")
        progress(file_id/len(files),desc=f"Treating {source}")

        if file_type == 'pdf':
            db2 = merge_pdf_to_db(file.name,db,progress)
        
        if file_type == 'txt':
            db2 = merge_txt_to_db(file.name,db,progress)
        
        if file_type == 'docx':
            db2 = merge_docx_to_db(file.name,db,progress)

        if file_type == 'zip':
            db2 = unpack_zip_file(file.name,db,progress)

        if db2 != None:
            db = db2
            db.save_local(session_id)
            ### move file to store ###
            progress(progress_step, desc = 'moving file to store')
            directory_path = f"{session_id}/store/"
            if not os.path.exists(directory_path):
                os.makedirs(directory_path)
            try:
                shutil.move(file.name, directory_path)
            except:
                pass

    ### load the updated db and zip it ###
    progress(progress_step, desc = 'loading db')
    db = FAISS.load_local(session_id,embeddings, allow_dangerous_deserialization=True)
    print("EMBEDDED, after embeddeding: ",session_id,len(db.index_to_docstore_id))
    progress(progress_step, desc = 'zipping db for download')
    add_files_to_zip(session_id)
    print(f"EMBEDDED: db zipped")
    progress(progress_step, desc = 'db zipped')
    return f"{session_id}.zip", ui_session_id, ""



def add_to_db(references,ui_session_id):
    files = store_files(references)
    return embed_files(files,ui_session_id)

def export_files(references):
    files = store_files(references, ret_names=True)
    #paths = [file.name for file in files]
    return files
    

def display_docs(docs):
  output_str = ''
  for i, doc in enumerate(docs):
      source = doc.metadata['source'].split('/')[-1]
      output_str += f"Ref: {i+1}\n{repr(doc.page_content)}\nSource: {source}\n\n"
  return output_str


def display_docs_modal(docs):
  output_list = []
  for i, doc in enumerate(docs):
      source = doc.metadata['source'].split('/')[-1]
      output_str.append(f"Ref: {i+1}\n{repr(doc.page_content)}\nSource: {source}\n*§*§*\n")
  return output_list


def ask_llm(system, user_input):
    messages = [
        {
            "role": "system",
            "content": system
        },
        {
            "role": "user",
            "content": user_input,
        }
    ]
    client = Groq(api_key=os.environ["GROQ_KEY"])
    chat_completion = client.chat.completions.create(
        messages=messages,
        model='mixtral-8x7b-32768',
    )
    return chat_completion.choices[0].message.content

def ask_llm_stream(system, user_input):
    llm_response = ""
    client = Groq(api_key=os.environ["GROQ_KEY"])
    if user_input is None or user_input == "":
        user_input = "What is the introduction of the document about?"
    messages = [
        {
            "role": "system",
            "content": system
        },
        {
            "role": "user",
            "content": user_input,
        }
    ]

    stream = client.chat.completions.create(
        messages=messages,
        model="mixtral-8x7b-32768",
        temperature=0.5,
        max_tokens=1024,
        top_p=1,
        stop=None,
        stream=True,
    )

    for chunk in stream:
        llm_response += str(chunk.choices[0].delta.content) if chunk.choices[0].delta.content is not None else ""
        yield llm_response
    

def ask_gpt(query, ui_session_id, history):
    if ui_session_id not in os.environ['users'].split(', '):
        return "Please Login", "", ""
    session_id = f"PDFAISS-{ui_session_id}"
    try:
      db = FAISS.load_local(session_id,embeddings, allow_dangerous_deserialization=True)
      print("ASKGPT after loading",session_id,len(db.index_to_docstore_id))
    except:
      print(f"SESSION: {session_id} database does not exist")
      return f"SESSION: {session_id} database does not exist","","".

    docs = db.similarity_search(query, k=5)

    documents = "\n\n*-*-*-*-*-*\n\n".join(f"Content: {doc.page_content}\n" for doc in docs)
    system = f"# Instructions\nTake a deep breath and resonate step by step.\nYou are a helpful standard assistant. Your have only one mission and that consists in answering to the user input based on the **provided documents**. If the answer to the question that is asked by the user isn't contained in the **provided documents**, say so but **don't make up an answer**. I chose you because you can say 'I don't know' so please don't do like the other LLMs and don't define acronyms that aren\'t present in the following **PROVIDED DOCUMENTS** double check if it is present before answering. If some of the information can be useful for the user you can tell him.\nFinish your response by **ONE** follow up question that the provided documents could answer.\n\nThe documents are separated by the string \'*-*-*-*-*-*\'. Do not provide any explanations or details.\n\n# **Provided documents**: {documents}."    
    gen = ask_llm_stream(system, query)
    last_value=""
    displayable_docs = display_docs(docs)
    while True:
        try:
            last_value = next(gen)
            yield last_value, displayable_docs, history + f"[query]\n{query}\n[answer]\n{last_value}\n[references]\n{displayable_docs}\n\n"
        except StopIteration as e:
            break
    history += f"[query]\n{query}\n[answer]\n{last_value}\n[references]\n{displayable_docs}\n\n"
    return last_value, displayable_docs, history


def auth_user(ui_session_id):
    if ui_session_id in os.environ['users'].split(', '):
        return gr.Textbox(label='Username', visible=False), gr.File(file_count="multiple", file_types=[".txt", ".pdf",".zip",".docx"], visible=True), gr.Button("Reset AI Knowledge", visible=True), gr.Markdown(label='AI Answer', visible=True), gr.Textbox(placeholder="Type your question", label="Question ❔", scale=9, visible=True), gr.Button("▶", scale=1, visible=True), gr.Textbox(label='Sources', show_copy_button=True, visible=True), gr.File(label="Zipped database", visible=True), gr.Textbox(label='History', show_copy_button=True, visible=True)
    else:
        return gr.Textbox(label='Username', visible=True), gr.File(file_count="multiple", file_types=[".txt", ".pdf",".zip",".docx"], visible=False), gr.Button("Reset AI Knowledge", visible=False), gr.Markdown(label='AI Answer', visible=False), gr.Textbox(placeholder="Type your question", label="Question ❔", scale=9, visible=False), gr.Button("▶", scale=1, visible=False), gr.Textbox(label='Sources', show_copy_button=True, visible=False), gr.File(label="Zipped database", visible=False), gr.Textbox(label='History', show_copy_button=True, visible=False)

def display_info0(documents):
    try:
        gr.Info(documents.split("\n*§*§*\n")[0])
    except Exception as e:
        gr.Info("No Document")

def display_info1(documents):
    try:
        gr.Info(documents.split("\n*§*§*\n")[1])
    except Exception as e:
        gr.Info("No Document")
        
def display_info2(documents):
    try:
        gr.Info(documents.split("\n*§*§*\n")[2])
    except Exception as e:
        gr.Info("No Document")
    
def display_info3(documents):
    try:
        gr.Info(documents.split("\n*§*§*\n")[3])
    except Exception as e:
        gr.Info("No Document")
    
def display_info4(documents):
    try:
        gr.Info(documents.split("\n*§*§*\n")[4])
    except Exception as e:
        gr.Info("No Document")

with gr.Blocks() as demo:
    gr.Markdown("# Enrich an LLM knowledge with your own documents 🧠🤖")
        
    with gr.Column():        
        tb_session_id = gr.Textbox(label='Username')
        docs_input = gr.File(file_count="multiple", file_types=[".txt", ".pdf",".zip",".docx"], visible=False)
        btn_reset_db = gr.Button("Reset AI Knowledge", visible=False)


    with gr.Column():
        answer_output = gr.Markdown(label='AI Answer', visible=False)
        with gr.Row():
            query_input = gr.Textbox(placeholder="Type your question", label="Question ❔", scale=9, visible=False)
            btn_askGPT = gr.Button("▶", scale=1, visible=False)
        with gr.Row():
            btn1 = gr.Button("Ref 1")
            btn2 = gr.Button("Ref 2")
            btn3 = gr.Button("Ref 3")
            btn4 = gr.Button("Ref 4")
            btn5 = gr.Button("Ref 5")
        
        
        tb_sources = gr.Textbox(label='Sources', show_copy_button=True, visible=False)
        

    with gr.Accordion("Download your knowledge base and see your conversation history", open=False):
        db_output = gr.File(label="Zipped database", visible=False)
        tb_history = gr.Textbox(label='History', show_copy_button=True, visible=False, interactive=False)
        

    tb_session_id.submit(auth_user, inputs=tb_session_id, outputs=[tb_session_id, docs_input, btn_reset_db, answer_output, query_input, btn_askGPT, tb_sources, db_output, tb_history])

    docs_input.upload(embed_files, inputs=[docs_input,tb_session_id], outputs=[db_output,tb_session_id, query_input])
    btn_reset_db.click(reset_database,inputs=[tb_session_id],outputs=[db_output])
    btn_askGPT.click(ask_gpt, inputs=[query_input, tb_session_id, tb_history], outputs=[answer_output, tb_sources, tb_history])
    query_input.submit(ask_gpt, inputs=[query_input, tb_session_id, tb_history], outputs=[answer_output, tb_sources, tb_history])

    btn1.click(display_info0, inputs=tb_sources, outputs=None)
    btn2.click(display_info1, inputs=tb_sources, outputs=None)
    btn3.click(display_info2, inputs=tb_sources, outputs=None)
    btn4.click(display_info3, inputs=tb_sources, outputs=None)
    btn5.click(display_info4, inputs=tb_sources, outputs=None)
    


demo.launch(debug=False,share=False)