File size: 4,540 Bytes
748cc87
 
 
 
 
d79174d
748cc87
 
 
 
 
 
 
 
54b89ad
 
 
 
 
748cc87
 
 
 
 
54b89ad
748cc87
 
 
 
 
 
 
 
 
54b89ad
 
 
 
 
 
748cc87
 
 
 
 
 
 
 
54b89ad
748cc87
54b89ad
 
 
 
748cc87
 
 
 
 
 
 
 
 
 
54b89ad
 
 
 
 
 
748cc87
 
 
 
 
 
 
 
54b89ad
748cc87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import streamlit as st
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from PIL import Image
from pages.Functions.Dashboard_functions import plot_style_simple, plot_style_combined, print_results_tabs, pre_assessment_visualisation
side_image = Image.open('Graphics/IL_Logo.png')
st.sidebar.image(side_image)

@st.cache
def convert_df_to_csv(df):
  # IMPORTANT: Cache the conversion to prevent computation on every rerun
  return df[['File_name','Prompt_no','Task','Score']].to_csv().encode('utf-8')

assessment_result_frames = {}


st.title('Assessment Summary')
st.header('Manual assessment')

try:
  if sum(st.session_state['eval_df']['manual_eval_completed'])>0:
    # Display file uploader
    manual_file_upload = st.file_uploader("Upload .csv with saved manual assessment for model comparison")

    # Create dataset for manual summary plots
    manual_eval_df = st.session_state['eval_df']
    manual_eval_df['Score'] = manual_eval_df['manual_eval_task_score'].map({'Yes':True, 'No':False})
    manual_results_df = manual_eval_df.loc[
      (manual_eval_df['manual_eval']==True)&
      (manual_eval_df['manual_eval_completed']==True)]

    assessment_result_frames['Manual assessment'] = manual_results_df

    # Add plots / tables to page
    try:
      manual_file_upload_df = pd.read_csv(manual_file_upload).copy()
      print_results_tabs(file_upload=manual_file_upload, results_df=manual_results_df, file_upload_df=manual_file_upload_df)
    except ValueError:
      print_results_tabs(file_upload=manual_file_upload, results_df=manual_results_df)

    st.download_button(
      label="Download manual assessment data",
      data=convert_df_to_csv(manual_results_df),
      file_name='manual_assessment.csv',
      mime='text/csv',
    )
  else:
    pre_assessment_visualisation(type_str='manual')
except KeyError:
  pre_assessment_visualisation(type_str='manual')



st.write(' ')
st.header('Automated assessment')
try:
  # Create dataset for automated summary plots
  auto_eval_df = st.session_state['auto_eval_df']
  assessment_result_frames['Automated assessment'] = auto_eval_df

  # Display file uploader
  auto_file_upload = st.file_uploader("Upload .csv with saved automated assessment for model comparison")  

  # Add plots / tables to page
  try:
    auto_file_upload_df = pd.read_csv(auto_file_upload).copy()
    print_results_tabs(file_upload=auto_file_upload, results_df=auto_eval_df, file_upload_df=auto_file_upload_df)
  except ValueError:
    print_results_tabs(file_upload=auto_file_upload, results_df=auto_eval_df)

  st.download_button(
    label="Download automated assessment data",
    data=convert_df_to_csv(auto_eval_df),
    file_name='automated_assessment.csv',
    mime='text/csv',
  )
except KeyError:
  pre_assessment_visualisation(type_str='automated')


try:
  # Start gallery
  st.header('Assessment gallery')

  assessment_method_selected = st.selectbox(
      'Select generation method',
      assessment_result_frames.keys())

  if len(assessment_result_frames.keys())<1:
    st.write('Complete manual or automated assessment to access images in the gallery.')

  # Create needed info frames
  gallery_df = assessment_result_frames[assessment_method_selected]
  curr_prompt_dir = st.session_state['prompt_dir']

  # Select task
  tasks_available = gallery_df.Task.unique().tolist()
  task_selected = st.selectbox('Select task type',tasks_available)
  # Select image type
  type_selected = st.selectbox(
      'Select image type',
      ('Correctly generated images', 'Incorrectly generated images'))
  type_selected_dict = {'Correctly generated images':True, 'Incorrectly generated images':False}
  # Create df for presented images
  gallery_df_print = gallery_df.loc[
    (gallery_df['Score']==type_selected_dict[type_selected])&
    (gallery_df['Task']==task_selected)]
  # Select presented image and prompt
  generation_number = st.number_input('Generation number',min_value=1, max_value=len(gallery_df_print), step=1)
  gallery_row_print = gallery_df_print.iloc[int(generation_number-1)]
  curr_Prompt_no = gallery_row_print.Prompt_no
  curr_Prompt = curr_prompt_dir[curr_prompt_dir['ID']==int(curr_Prompt_no)].Prompt
  curr_Picture_index = gallery_row_print.Picture_index.item()
  # Plot prompt and image
  st.write('Prompt: '+curr_Prompt.item())
  st.image(st.session_state['uploaded_img'][curr_Picture_index],width=350)

  #st.write(auto_df_print)
except IndexError:
  st.write('There is no image availabe in your selected category.')
except KeyError:
  pass