|
import gradio as gr |
|
import spaces |
|
import torch |
|
from transformers import AutoTokenizer, AutoModel |
|
from sklearn.decomposition import PCA |
|
import plotly.graph_objects as go |
|
from huggingface_hub import HfApi |
|
from huggingface_hub import hf_hub_download |
|
import os |
|
import sys |
|
|
|
model_name = "sentence-transformers/all-MiniLM-L6-v2" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModel.from_pretrained(model_name) |
|
|
|
@spaces.GPU |
|
def get_embedding(text): |
|
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512) |
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
return outputs.last_hidden_state.mean(dim=1).squeeze().numpy() |
|
|
|
def compress_to_3d(embedding): |
|
pca = PCA(n_components=3) |
|
return pca.fit_transform(embedding.reshape(1, -1))[0] |
|
|
|
@spaces.GPU |
|
def compare_embeddings(text1, text2): |
|
emb1 = get_embedding(text1) |
|
emb2 = get_embedding(text2) |
|
|
|
emb1_3d = compress_to_3d(emb1) |
|
emb2_3d = compress_to_3d(emb2) |
|
|
|
fig = go.Figure(data=[ |
|
go.Scatter3d(x=[0, emb1_3d[0]], y=[0, emb1_3d[1]], z=[0, emb1_3d[2]], mode='lines+markers', name='Text 1'), |
|
go.Scatter3d(x=[0, emb2_3d[0]], y=[0, emb2_3d[1]], z=[0, emb2_3d[2]], mode='lines+markers', name='Text 2') |
|
]) |
|
|
|
fig.update_layout(scene=dict(xaxis_title='X', yaxis_title='Y', zaxis_title='Z')) |
|
|
|
return fig |
|
|
|
iface = gr.Interface( |
|
fn=compare_embeddings, |
|
inputs=[ |
|
gr.Textbox(label="Text 1"), |
|
gr.Textbox(label="Text 2") |
|
], |
|
outputs=gr.Plot(), |
|
title="3D Embedding Comparison", |
|
description="Compare the embeddings of two strings visualized in 3D space." |
|
) |
|
|
|
iface.launch() |
|
demo.launch() |