Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -14,15 +14,15 @@ def main():
|
|
14 |
file_path = "merged-averaged-model_timings_2.1.0_12.1_NVIDIA_A10G_False.csv"
|
15 |
# Columns to display
|
16 |
columns_to_display = [
|
17 |
-
"model_name", "pretrained", "image_time", "text_time",
|
18 |
-
"
|
19 |
"output shape",
|
20 |
"params (M)", "FLOPs (B)"
|
21 |
] # Specify the columns you want to display
|
22 |
|
23 |
# Add header and description
|
24 |
st.header("CLIP benchmarks - retrieval and inference")
|
25 |
-
st.write("CLIP benchmarks for inference and retrieval performance. Image size, context length and output dimensions are also
|
26 |
|
27 |
# Call the display_csv function with the hardcoded file path and selected columns
|
28 |
display_csv(file_path, columns_to_display)
|
|
|
14 |
file_path = "merged-averaged-model_timings_2.1.0_12.1_NVIDIA_A10G_False.csv"
|
15 |
# Columns to display
|
16 |
columns_to_display = [
|
17 |
+
"model_name", "pretrained", "avg_score", "image_time", "text_time",
|
18 |
+
"image_shape", "text_shape",
|
19 |
"output shape",
|
20 |
"params (M)", "FLOPs (B)"
|
21 |
] # Specify the columns you want to display
|
22 |
|
23 |
# Add header and description
|
24 |
st.header("CLIP benchmarks - retrieval and inference")
|
25 |
+
st.write("CLIP benchmarks for inference and retrieval performance. Image size, context length and output dimensions are also included. Retrieval performance comes from https://github.com/mlfoundations/open_clip/blob/main/docs/openclip_retrieval_results.csv.\n A10G, CUDA 12.1, Torch 2.1.0")
|
26 |
|
27 |
# Call the display_csv function with the hardcoded file path and selected columns
|
28 |
display_csv(file_path, columns_to_display)
|