File size: 6,344 Bytes
8f6035e
 
 
 
 
d74ddfc
b3758b8
163f1eb
8f6035e
 
 
 
 
a136991
5171d49
 
3400476
e71614a
7879d7a
ad0d74d
 
5171d49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ad0d74d
6723b69
e71614a
8f6035e
 
 
 
 
 
 
 
e71614a
8f6035e
8421fc1
200153d
0351fe3
200153d
 
 
 
5171d49
0351fe3
 
 
200153d
8f6035e
 
 
140d9f4
8f6035e
 
 
 
 
 
 
 
 
 
5171d49
 
8f6035e
cbe2df8
 
8f6035e
5171d49
8f6035e
5171d49
8f6035e
 
 
cbe2df8
b0f340c
8f6035e
 
 
e71614a
8f6035e
 
cbe2df8
8f6035e
 
3400476
8f6035e
3400476
8f6035e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import spaces
import torch
import torch.nn.functional as F
from torch import Tensor
from transformers import AutoTokenizer, AutoModel
import gradio as gr
import os

title = """
# 👋🏻Welcome to 🙋🏻‍♂️Tonic's 🐣e5-mistral🛌🏻Embeddings """
description = """
You can use this Space to test out the current model [intfloat/e5-mistral-7b-instruct](https://huggingface.co/intfloat/e5-mistral-7b-instruct). e5mistral has a larger context window, a different prompting/return mechanism and generally better results than other embedding models. 
You can also use 🐣e5-mistral🛌🏻 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/e5?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3> 
Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻  [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [Poly](https://github.com/tonic-ai/poly) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗

You can use this space in **two ways !** either select an embeddings mode or 'None' to speak with the e5mistral LLM 🤗
"""

os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:30'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tasks = {
        'ArguAna': 'Given a claim, find documents that refute the claim',
        'ClimateFEVER': 'Given a claim about climate change, retrieve documents that support or refute the claim',
        'DBPedia': 'Given a query, retrieve relevant entity descriptions from DBPedia',
        'FEVER': 'Given a claim, retrieve documents that support or refute the claim',
        'FiQA2018': 'Given a financial question, retrieve user replies that best answer the question',
        'HotpotQA': 'Given a multi-hop question, retrieve documents that can help answer the question',
        'MSMARCO': 'Given a web search query, retrieve relevant passages that answer the query',
        'NFCorpus': 'Given a question, retrieve relevant documents that best answer the question',
        'NQ': 'Given a question, retrieve Wikipedia passages that answer the question',
        'QuoraRetrieval': 'Given a question, retrieve questions that are semantically equivalent to the given question',
        'SCIDOCS': 'Given a scientific paper title, retrieve paper abstracts that are cited by the given paper',
        'SciFact': 'Given a scientific claim, retrieve documents that support or refute the claim',
        'Touche2020': 'Given a question, retrieve detailed and persuasive arguments that answer the question',
        'TRECCOVID': 'Given a query on COVID-19, retrieve documents that answer the query',
}

tokenizer = AutoTokenizer.from_pretrained('intfloat/e5-mistral-7b-instruct')
model = AutoModel.from_pretrained('intfloat/e5-mistral-7b-instruct', torch_dtype=torch.float16, device_map=device)

def last_token_pool(last_hidden_states: Tensor, attention_mask: Tensor) -> Tensor:
    left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0])
    if left_padding:
        return last_hidden_states[:, -1]
    else:
        sequence_lengths = attention_mask.sum(dim=1) - 1
        batch_size = last_hidden_states.shape[0]
        return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths]

@spaces.GPU
def compute_embeddings(selected_task, input_text, system_prompt):
    max_length = 2042
    if selected_task == "None":
        if system_prompt:
            processed_texts = [f'Instruct: {system_prompt}\nQuery: {input_text}']
        else:
            processed_texts = [f'Query: {input_text}']
    else:
        task_description = tasks[selected_task]
        processed_texts = [f'Instruct: {task_description}\nQuery: {input_text}']


    batch_dict = tokenizer(processed_texts, max_length=max_length - 1, return_attention_mask=False, padding=False, truncation=True)
    batch_dict['input_ids'] = [input_ids + [tokenizer.eos_token_id] for input_ids in batch_dict['input_ids']]
    batch_dict = tokenizer.pad(batch_dict, padding=True, return_attention_mask=True, return_tensors='pt')
    batch_dict = {k: v.to(device) for k, v in batch_dict.items()}
    outputs = model(**batch_dict)
    embeddings = last_token_pool(outputs.last_hidden_state, batch_dict['attention_mask'])
    embeddings = F.normalize(embeddings, p=2, dim=1)
    embeddings_list = embeddings.detach().cpu().numpy().tolist()
    return embeddings_list
    
def app_interface():
    with gr.Blocks() as demo:
        gr.Markdown(title)
        gr.Markdown(description)

        task_dropdown = gr.Dropdown(list(tasks.keys()) + ["None"], label="Select a Task (Optional)", value="None")
        
        input_text_box = gr.Textbox(label="📖Input Text")
        system_prompt_box = gr.Textbox(label="🤖System Prompt (Optional)")
        
        compute_button = gr.Button("Try🐣🛌🏻e5")
        
        output_display = gr.Textbox(label="🐣e5-mistral🛌🏻")
        
        with gr.Row():
            with gr.Column():
                system_prompt_box
                input_text_box
            with gr.Column():
                compute_button
                output_display

        compute_button.click(
            fn=compute_embeddings,
            inputs=[task_dropdown, input_text_box, system_prompt_box], 
            outputs=output_display
        )

    return demo

app_interface().launch()