Jan Mühlnikel
experiment
09c16ce
raw
history blame
1.76 kB
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix, coo_matrix
import streamlit as st
# multi_project_matching
def calc_matches(filtered_df, project_df, similarity_matrix, top_x):
# Ensure the matrix is in a suitable format for manipulation
if not isinstance(similarity_matrix, csr_matrix):
similarity_matrix = csr_matrix(similarity_matrix)
filtered_indices = filtered_df.index.to_list()
project_indices = project_df.index.to_list()
match_matrix = similarity_matrix[project_indices, :][:, filtered_indices] # row / column
dense_match_matrix = match_matrix.toarray()
flat_matrix = dense_match_matrix.flatten()
# Get the indices of the top 15 values in the flattened matrix
top_15_indices = np.argsort(flat_matrix)[-top_x:]
# Convert flat indices back to 2D indices
top_15_2d_indices = np.unravel_index(top_15_indices, dense_match_matrix.shape)
# Extract the corresponding values
top_15_values = flat_matrix[top_15_indices]
# Prepare the result with row and column indices from original dataframes
top_15_matches = []
org_rows = []
org_cols = []
for value, row, col in zip(top_15_values, top_15_2d_indices[0], top_15_2d_indices[1]):
original_row_index = project_indices[row]
original_col_index = filtered_indices[col]
org_rows.append(original_row_index)
org_cols.append(original_col_index)
top_15_matches.append((value, original_row_index, original_col_index))
p1_df = filtered_df.loc[org_cols].copy()
p1_df['similarity'] = top_15_values
p2_df = project_df.loc[org_rows].copy()
p2_df['similarity'] = top_15_values
print("finished calc matches")
return p1_df, p2_df