APP test
Browse files- __init__.py +0 -0
- config.yaml +87 -0
- const.py +5 -0
- data/__init__.py +0 -0
- data/cli_dropbox.py +214 -0
- data/dataframes.py +116 -0
- data/datastructures.py +109 -0
- data/excels.py +336 -0
- data/test.py +36 -0
- dockerfile +33 -0
- main.py +173 -0
- requirements.txt +8 -0
- utils/__init__.py +0 -0
- utils/__pycache__/__init__.cpython-310.pyc +0 -0
- utils/__pycache__/btn_behaviors.cpython-310.pyc +0 -0
- utils/__pycache__/device.cpython-310.pyc +0 -0
- utils/__pycache__/indemnites.cpython-310.pyc +0 -0
- utils/__pycache__/times.cpython-310.pyc +0 -0
- utils/btn_behaviors.py +219 -0
- utils/device.py +11 -0
- utils/indemnites.py +78 -0
- utils/times.py +56 -0
__init__.py
ADDED
File without changes
|
config.yaml
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
data:
|
2 |
+
vehicules:
|
3 |
+
path: 20230731_GTP2023_crea_futur_EBP_AFF_PARAM_table_vehicules.xlsx
|
4 |
+
sheet_name: "type contrats"
|
5 |
+
columns: "B:C"
|
6 |
+
rows: 8
|
7 |
+
skiprows: Null
|
8 |
+
societes:
|
9 |
+
path: 20230731_GTP2023_sec_grh_pay_bd_gtp_v04_PARAM_table_services_Groupe1.xlsx
|
10 |
+
sheet_name: "SERVICES"
|
11 |
+
columns: "A"
|
12 |
+
rows: Null
|
13 |
+
skiprows: Null
|
14 |
+
contrats:
|
15 |
+
path: 20230731_GTP2023_sec_grh_pay_bd_gtp_v04_PARAM_table_contrats.xlsx
|
16 |
+
sheet_name: "type contrats"
|
17 |
+
columns: "B:C"
|
18 |
+
rows: Null
|
19 |
+
skiprows: Null
|
20 |
+
feries:
|
21 |
+
path: 20230731_GTP2023_sec_grh_pay_bd_gtp_v04_PARAM_table_feries_rtt.xlsx
|
22 |
+
sheet_name: "Calcul RTT"
|
23 |
+
columns: "E"
|
24 |
+
rows: Null
|
25 |
+
skiprows: Null
|
26 |
+
intervenants:
|
27 |
+
path: 20230823_GTP2023_EBP_MNR2021G_exp_intervenant_table_v02_00.xlsx
|
28 |
+
sheet_name: "Sheet"
|
29 |
+
columns: "A,D:F,M,N,Y,AN"
|
30 |
+
rows: Null
|
31 |
+
skiprows: Null
|
32 |
+
|
33 |
+
MNR_Affaire:
|
34 |
+
path: 20230823_GTP2023_EBP_MNR2021G_AFF_liste.xlsx
|
35 |
+
sheet_name: "Sheet"
|
36 |
+
columns: "A,D,E,J,K"
|
37 |
+
rows: Null
|
38 |
+
skiprows: Null
|
39 |
+
STM_Affaire:
|
40 |
+
path: 20230823_GTP2023_EBP_STM2021G_AFF_liste.xlsx
|
41 |
+
sheet_name: "Sheet"
|
42 |
+
columns: "A,D,E,J,K"
|
43 |
+
rows: Null
|
44 |
+
skiprows: Null
|
45 |
+
VALM_Affaire:
|
46 |
+
path: 20230823_GTP2023_EBP_VALM2019_AFF_liste.xlsx
|
47 |
+
sheet_name: "Sheet"
|
48 |
+
columns: "A,D,E,J,K"
|
49 |
+
rows: Null
|
50 |
+
skiprows: Null
|
51 |
+
MNR_Client:
|
52 |
+
path: 20230828_GTP2023_EBP_MNR2021G_CLI_liste_v02.xlsx
|
53 |
+
sheet_name: "Sheet"
|
54 |
+
columns: "A,B,E,Q:T"
|
55 |
+
rows: Null
|
56 |
+
skiprows: Null
|
57 |
+
STM_Client:
|
58 |
+
path: 20230828_GTP2023_EBP_STM2021G_CLI_liste_v02.xlsx
|
59 |
+
sheet_name: "Sheet"
|
60 |
+
columns: "A,W,E,Q:T"
|
61 |
+
rows: Null
|
62 |
+
skiprows: Null
|
63 |
+
VALM_Client:
|
64 |
+
path: 20230828_GTP2023_EBP_VALM2019_CLI_liste_v02.xlsx
|
65 |
+
sheet_name: "Sheet"
|
66 |
+
columns: "A,B,C,Q:T"
|
67 |
+
rows: Null
|
68 |
+
skiprows: Null
|
69 |
+
|
70 |
+
supplements:
|
71 |
+
path: 20230731_GTP2023_sec_grh_pay_bd_gtp_v04_PARAM_table_suppl_remu.xlsx
|
72 |
+
sheet_name: "Feuil1"
|
73 |
+
columns: "D"
|
74 |
+
rows: 8
|
75 |
+
skiprows: 3
|
76 |
+
absences:
|
77 |
+
path: 20230731_GTP2023_sec_grh_pay_bd_gtp_v04_PARAM_table_motif_absences_input.xlsx
|
78 |
+
sheet_name: "Feuil1"
|
79 |
+
columns: "D:E"
|
80 |
+
rows: 10
|
81 |
+
skiprows: 2
|
82 |
+
arrets:
|
83 |
+
path: bareme_arrets.xlsx
|
84 |
+
sheet_name: "Feuille1"
|
85 |
+
columns: "H:L"
|
86 |
+
rows: null
|
87 |
+
skiprows: null
|
const.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
root_path = os.path.abspath(os.path.dirname(__file__))
|
4 |
+
while not os.path.isfile(os.path.join(root_path, '.gitignore')):
|
5 |
+
root_path = os.path.dirname(root_path)
|
data/__init__.py
ADDED
File without changes
|
data/cli_dropbox.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pathlib
|
3 |
+
from typing import List
|
4 |
+
import dropbox
|
5 |
+
from dropbox.exceptions import AuthError
|
6 |
+
from dropbox import DropboxOAuth2FlowNoRedirect
|
7 |
+
|
8 |
+
import pandas as pd
|
9 |
+
import streamlit as st
|
10 |
+
from dotenv import load_dotenv
|
11 |
+
|
12 |
+
load_dotenv()
|
13 |
+
DROPBOX_ACCESS_TOKEN = os.environ.get("DROPBOX_ACCESS_TOKEN")
|
14 |
+
DROPBOX_REFRESH_TOKEN = os.environ.get("DROPBOX_REFRESH_TOKEN")
|
15 |
+
DROPBOX_APP_KEY = os.environ.get("DROPBOX_APP_KEY")
|
16 |
+
DROPBOX_APP_SECRET = os.environ.get("DROPBOX_APP_SECRET")
|
17 |
+
pd.set_option('display.max_columns', None)
|
18 |
+
|
19 |
+
|
20 |
+
def obtain_refresh_token(app_key, app_secret):
|
21 |
+
# Create a NoRedirectFlow object
|
22 |
+
auth_flow = DropboxOAuth2FlowNoRedirect(app_key, app_secret, token_access_type='offline')
|
23 |
+
|
24 |
+
# Generate the authorization URL
|
25 |
+
authorize_url = auth_flow.start()
|
26 |
+
|
27 |
+
# Print the authorization URL and instruct the user to visit it
|
28 |
+
print("1. Go to:", authorize_url)
|
29 |
+
print("2. Click 'Allow' (you might have to log in first)")
|
30 |
+
print("3. Copy the authorization code.")
|
31 |
+
|
32 |
+
# Prompt the user to enter the authorization code
|
33 |
+
auth_code = input("Enter the authorization code here: ").strip()
|
34 |
+
|
35 |
+
# Exchange the authorization code for an access token and refresh token
|
36 |
+
auth_result = auth_flow.finish(auth_code)
|
37 |
+
access_token = auth_result.access_token
|
38 |
+
refresh_token = auth_result.refresh_token
|
39 |
+
print(access_token)
|
40 |
+
print('\n\n')
|
41 |
+
print(refresh_token)
|
42 |
+
return access_token, refresh_token
|
43 |
+
|
44 |
+
def dropbox_connect():
|
45 |
+
try:
|
46 |
+
dbx = dropbox.Dropbox(oauth2_access_token=DROPBOX_ACCESS_TOKEN,
|
47 |
+
oauth2_refresh_token=DROPBOX_REFRESH_TOKEN,
|
48 |
+
app_key=DROPBOX_APP_KEY,
|
49 |
+
app_secret=DROPBOX_APP_SECRET)
|
50 |
+
except AuthError as err:
|
51 |
+
st.error(f'Erreur: Impossible de se connecter à la base de données - {err}')
|
52 |
+
return dbx
|
53 |
+
|
54 |
+
def dropbox_list_files(path):
|
55 |
+
"""Return a Pandas dataframe of files in a given Dropbox folder path in the Apps directory.
|
56 |
+
"""
|
57 |
+
|
58 |
+
dbx = dropbox_connect()
|
59 |
+
|
60 |
+
try:
|
61 |
+
files = dbx.files_list_folder(path).entries
|
62 |
+
files_list = []
|
63 |
+
for file in files:
|
64 |
+
if isinstance(file, dropbox.files.FileMetadata):
|
65 |
+
metadata = {
|
66 |
+
'name': file.name,
|
67 |
+
'path_display': file.path_display,
|
68 |
+
'client_modified': file.client_modified,
|
69 |
+
'server_modified': file.server_modified
|
70 |
+
}
|
71 |
+
files_list.append(metadata)
|
72 |
+
|
73 |
+
df = pd.DataFrame.from_records(files_list)
|
74 |
+
return df.sort_values(by='server_modified', ascending=False)
|
75 |
+
|
76 |
+
except Exception as e:
|
77 |
+
print('Error getting list of files from Dropbox: ' + str(e))
|
78 |
+
|
79 |
+
def dropbox_download_file(dropbox_file_path, local_file_path, load_data: bool = True):
|
80 |
+
"""Download a file from Dropbox to the local machine."""
|
81 |
+
|
82 |
+
try:
|
83 |
+
dbx = dropbox_connect()
|
84 |
+
|
85 |
+
metadata = dbx.files_get_metadata(dropbox_file_path)
|
86 |
+
if metadata:
|
87 |
+
metadata, result = dbx.files_download(path=dropbox_file_path)
|
88 |
+
if load_data:
|
89 |
+
with open(local_file_path, 'wb') as f:
|
90 |
+
f.write(result.content)
|
91 |
+
return result.content
|
92 |
+
|
93 |
+
else:
|
94 |
+
print("Fichier non trouvé")
|
95 |
+
return None
|
96 |
+
except Exception as e:
|
97 |
+
if e.error.is_path() and e.error.get_path().is_not_found():
|
98 |
+
print("Fichier non trouvé")
|
99 |
+
return None
|
100 |
+
|
101 |
+
def dropbox_upload_file(local_path, local_filename, dropbox_data_path, dropbox_file_name: str = None):
|
102 |
+
"""Upload a file from the local machine to a path in the Dropbox app directory.
|
103 |
+
|
104 |
+
Args:
|
105 |
+
local_path (str): The path to the local file.
|
106 |
+
local_file (str): The name of the local file.
|
107 |
+
dropbox_file_path (str): The path to the file in the Dropbox app directory.
|
108 |
+
|
109 |
+
Example:
|
110 |
+
dropbox_upload_file('.', 'test.csv', '/stuff/test.csv')
|
111 |
+
|
112 |
+
Returns:
|
113 |
+
meta: The Dropbox file metadata.
|
114 |
+
"""
|
115 |
+
try:
|
116 |
+
dbx = dropbox_connect()
|
117 |
+
|
118 |
+
local_file_path = os.path.join(local_path, local_filename)
|
119 |
+
dropbox_file_name = local_filename if not dropbox_file_name else dropbox_file_name
|
120 |
+
dropbox_file_path = os.path.join(dropbox_data_path, dropbox_file_name)
|
121 |
+
|
122 |
+
with open(local_file_path,"rb") as f:
|
123 |
+
if not dropbox_check_path_exists(dropbox_data_path):
|
124 |
+
dropbox_create_folder(os.path.dirname(dropbox_data_path))
|
125 |
+
|
126 |
+
meta = dbx.files_upload(f.read(), dropbox_file_path, mode=dropbox.files.WriteMode("overwrite"))
|
127 |
+
print("File uploaded successfully!")
|
128 |
+
return meta
|
129 |
+
except Exception as e:
|
130 |
+
print('Error uploading file to Dropbox: ' + str(e))
|
131 |
+
|
132 |
+
def dropbox_upload_bytefile(dropbox_data_path, dropbox_file_name: str, bytes):
|
133 |
+
dropbox_file_path = os.path.join(dropbox_data_path, dropbox_file_name)
|
134 |
+
|
135 |
+
try:
|
136 |
+
dbx = dropbox_connect()
|
137 |
+
if not dropbox_check_path_exists(dropbox_data_path):
|
138 |
+
dropbox_create_folder(os.path.dirname(dropbox_data_path))
|
139 |
+
meta = dbx.files_upload(bytes.getvalue(), dropbox_file_path, mode=dropbox.files.WriteMode("overwrite"))
|
140 |
+
print("File uploaded successfully!")
|
141 |
+
return meta
|
142 |
+
except Exception as e:
|
143 |
+
print('Error uploading file to Dropbox: ' + str(e))
|
144 |
+
|
145 |
+
def dropbox_create_folder(dropbox_folder_path):
|
146 |
+
"""Create a folder in the Dropbox app directory.
|
147 |
+
|
148 |
+
Args:
|
149 |
+
dropbox_folder_path (str): The path to the folder in the Dropbox app directory.
|
150 |
+
|
151 |
+
Example:
|
152 |
+
dropbox_create_folder('/stuff')
|
153 |
+
|
154 |
+
Returns:
|
155 |
+
meta: The Dropbox folder metadata.
|
156 |
+
"""
|
157 |
+
|
158 |
+
try:
|
159 |
+
dbx = dropbox_connect()
|
160 |
+
|
161 |
+
dbx.files_create_folder_v2(dropbox_folder_path)
|
162 |
+
print("Folder created successfully!")
|
163 |
+
except dropbox.exceptions.ApiError as e:
|
164 |
+
if e.error.is_path() and e.error.get_path().is_conflict():
|
165 |
+
print("Folder already exists!")
|
166 |
+
elif e.error.is_path() and e.error.get_path().is_not_found():
|
167 |
+
# Split the folder path into individual components
|
168 |
+
path_components = dropbox_folder_path.split("/")[1:]
|
169 |
+
current_path = ""
|
170 |
+
for component in path_components:
|
171 |
+
current_path += "/" + component
|
172 |
+
try:
|
173 |
+
dbx.files_create_folder_v2(current_path)
|
174 |
+
print(f"Created folder: {current_path}")
|
175 |
+
except dropbox.exceptions.ApiError as e:
|
176 |
+
print(f"Error creating folder: {e}")
|
177 |
+
else:
|
178 |
+
print(f"Error creating folder: {e}")
|
179 |
+
|
180 |
+
def dropbox_check_path_exists(dropbox_folder_path):
|
181 |
+
"""Check if a folder exists in the Dropbox app directory.
|
182 |
+
|
183 |
+
Args:
|
184 |
+
dropbox_folder_path (str): The path to the folder in the Dropbox app directory.
|
185 |
+
|
186 |
+
Example:
|
187 |
+
dropbox_check_path_exists('/stuff')
|
188 |
+
|
189 |
+
Returns:
|
190 |
+
meta: The Dropbox folder metadata.
|
191 |
+
"""
|
192 |
+
|
193 |
+
try:
|
194 |
+
dbx = dropbox_connect()
|
195 |
+
|
196 |
+
dbx.files_get_metadata(dropbox_folder_path)
|
197 |
+
return True
|
198 |
+
except dropbox.exceptions.ApiError as e:
|
199 |
+
if e.error.is_path() and e.error.get_path().is_not_found():
|
200 |
+
return False
|
201 |
+
else:
|
202 |
+
print(f"Error checking if folder exists: {e}", dropbox_folder_path)
|
203 |
+
return False
|
204 |
+
|
205 |
+
@st.cache_data
|
206 |
+
def dropbox_load_config_files(dropbox_datapath: str, local_datapath: str, excel_sources: List[dict]):
|
207 |
+
for key, value in excel_sources.items():
|
208 |
+
dropbox_download_file(os.path.join(dropbox_datapath, excel_sources[key]['path']), os.path.join(local_datapath, excel_sources[key]['path']))
|
209 |
+
|
210 |
+
# obtain_refresh_token(DROPBOX_APP_KEY, DROPBOX_APP_SECRET)
|
211 |
+
# df = dropbox_list_files('/SEC_IND_GTP2023_OUTPUT')
|
212 |
+
# dropbox_create_folder('/SEC_IND_GTP2023_OUTPUT/TEST/FIT')
|
213 |
+
# dropbox_create_folder('/SEC_IND_GTP2023_OZUTPUT/TEST/FIT')
|
214 |
+
# print(df)
|
data/dataframes.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
|
5 |
+
def merge_clients(data):
|
6 |
+
mnr_df = data['MNR_Client']
|
7 |
+
mnr_df['Prestataire'] = 'MNR'
|
8 |
+
|
9 |
+
stm_df = data['STM_Client']
|
10 |
+
stm_df['Prestataire'] = 'STM'
|
11 |
+
|
12 |
+
valm_df = data['VALM_Client']
|
13 |
+
valm_df['Prestataire'] = 'VALM'
|
14 |
+
|
15 |
+
data['Clients'] = pd.concat([mnr_df, stm_df, valm_df], ignore_index=True)
|
16 |
+
data['Clients'].rename(columns=dict(zip(data['Clients'].columns,['Code client', 'Type client', 'Nom client', 'Dist STE', 'Duration STE', 'Dist VAL', 'Duration VAL', 'Prestataire'])), inplace=True)
|
17 |
+
data['Clients'] = data['Clients'][data['Clients']['Type client'] == 'Client']
|
18 |
+
|
19 |
+
data['Clients']['Dist STE'] = data['Clients']['Dist STE'].fillna(0)
|
20 |
+
data['Clients']['Duration STE'] = data['Clients']['Duration STE'].fillna(0)
|
21 |
+
data['Clients']['Dist VAL'] = data['Clients']['Dist VAL'].fillna(0)
|
22 |
+
data['Clients']['Duration VAL'] = data['Clients']['Duration VAL'].fillna(0)
|
23 |
+
|
24 |
+
del data['MNR_Client']
|
25 |
+
del data['STM_Client']
|
26 |
+
del data['VALM_Client']
|
27 |
+
|
28 |
+
|
29 |
+
def add_motifs_absences(df: pd.DataFrame, motifs: List[str]):
|
30 |
+
filtered_df = df[df['Nom client'].isin(['SECMI'])]
|
31 |
+
intervenants = df['Code intervenant (Identifiant du document de référence -> Document de vente)'].unique()
|
32 |
+
|
33 |
+
for intervenant in intervenants:
|
34 |
+
rows = []
|
35 |
+
for motif in motifs['Libellé GTP2023'].tolist():
|
36 |
+
rows.append({
|
37 |
+
'Code intervenant (Identifiant du document de référence -> Document de vente)': intervenant,
|
38 |
+
'Code client': filtered_df.iloc[0]['Code client'] if filtered_df.shape[0] > 0 else 'SECMI',
|
39 |
+
'Nom client': filtered_df.iloc[0]['Nom client'] if filtered_df.shape[0] > 0 else 'SECMI',
|
40 |
+
'Code chantier': '000000',
|
41 |
+
'Libellé': motif
|
42 |
+
|
43 |
+
})
|
44 |
+
df = pd.concat([df, pd.DataFrame(rows)], ignore_index=True)
|
45 |
+
return df
|
46 |
+
|
47 |
+
|
48 |
+
def merge_affaires(data):
|
49 |
+
mnr_df = data['MNR_Affaire']
|
50 |
+
mnr_df = add_motifs_absences(mnr_df, data['absences'])
|
51 |
+
mnr_df['Prestataire'] = 'MNR'
|
52 |
+
|
53 |
+
stm_df = data['STM_Affaire']
|
54 |
+
stm_df = add_motifs_absences(stm_df, data['absences'])
|
55 |
+
stm_df['Prestataire'] = 'STM'
|
56 |
+
|
57 |
+
valm_df = data['VALM_Affaire']
|
58 |
+
valm_df = add_motifs_absences(valm_df, data['absences'])
|
59 |
+
valm_df['Prestataire'] = 'VALM'
|
60 |
+
|
61 |
+
data['affaire'] = pd.concat([mnr_df, stm_df, valm_df], ignore_index=True)
|
62 |
+
data['affaire']['Code affaire'] = data['affaire']['Code chantier'] + ' - ' + data['affaire']['Libellé']
|
63 |
+
del data['affaire']['Code chantier']
|
64 |
+
del data['affaire']['Libellé']
|
65 |
+
data['affaire'].rename(columns=dict(zip(data['affaire'].columns,['Code intervenant', 'Code client', 'Nom client', 'prestataire', 'Code affaire'])), inplace=True)
|
66 |
+
del data['MNR_Affaire']
|
67 |
+
del data['STM_Affaire']
|
68 |
+
del data['VALM_Affaire']
|
69 |
+
|
70 |
+
def merge_intervenants_affaires(data):
|
71 |
+
data['all'] = data['affaire'].merge(data['intervenants'], on='Code intervenant', how='left')
|
72 |
+
data['all'].rename(columns=dict(zip(['Code intervenant', 'Code affaire', 'Nom client', 'Code secteur géographique', 'Veh_Affecte', 'Durée Hebdo', 'Coût horaire'],['intervenant', 'affaire', 'client', 'secteur', 'vehicule affecte', 'contrat heures', 'cout horaire'])), inplace=True)
|
73 |
+
new_row = pd.DataFrame([['-'] * (data['all'].shape[1])], columns=data['all'].columns)
|
74 |
+
data['all'] = pd.concat([new_row, data['all']], ignore_index=True)
|
75 |
+
data['all']['secteur'] = data['all']['secteur'].fillna('STE')
|
76 |
+
data['all']['contrat heures'] = data['all']['contrat heures'].fillna(0)
|
77 |
+
data['all']['cout horaire'] = data['all']['cout horaire'].fillna(0)
|
78 |
+
data['all'] = data['all'].dropna(subset=['intervenant', 'client', 'affaire'])
|
79 |
+
|
80 |
+
def complete_vehicules(data):
|
81 |
+
data['vehicules']['vehicules'] = 'Collectif - ' + data['vehicules']['IMMAT'] + ' - ' + data['vehicules']['Description']
|
82 |
+
data['vehicules'].loc[len(data['vehicules'])] = [None, None, 'Perso']
|
83 |
+
data['vehicules'].loc[len(data['vehicules'])] = [None, None, 'Service affecté']
|
84 |
+
|
85 |
+
def complete_supplements(data):
|
86 |
+
data['supplements'].rename(columns=dict(zip(data['supplements'].columns,['supplements'])), inplace=True)
|
87 |
+
data['supplements']['supplements'] = data['supplements']['supplements'].str.replace('_x000D_', ' ')
|
88 |
+
new_row = pd.DataFrame([['-'] * (data['supplements'].shape[1])], columns=data['supplements'].columns)
|
89 |
+
data['supplements'] = pd.concat([new_row, data['supplements']], ignore_index=True)
|
90 |
+
data['supplements'] = data['supplements'][~data['supplements']['supplements'].isin([' H Peinture ', ' H Qualif. Électrique ', ' H Soudure '])]
|
91 |
+
|
92 |
+
def complete_affaires(data):
|
93 |
+
data['affaire'].rename(columns=dict(zip(['Code intervenant', 'Code affaire', 'Nom client'],['intervenant', 'affaire', 'client'])), inplace=True)
|
94 |
+
new_row = pd.DataFrame([['-'] * (data['affaire'].shape[1])], columns=data['affaire'].columns)
|
95 |
+
data['affaire'] = pd.concat([new_row, data['affaire']], ignore_index=True)
|
96 |
+
|
97 |
+
def complete_intervenants(data):
|
98 |
+
data['intervenants'].rename(columns=dict(zip(['Code intervenant', 'Code secteur géographique', 'Veh_Affecte', 'Durée Hebdo', 'Coût horaire'],['intervenant', 'secteur', 'vehicule affecte', 'contrat heures', 'cout horaire'])), inplace=True)
|
99 |
+
data['intervenants']['Employeur'] = data['intervenants']['Employeur'].apply(lambda x: x.split()[1] if pd.notna(x) else None)
|
100 |
+
data['intervenants']['Employeur'] = data['intervenants']['Employeur'].fillna('SECMI')
|
101 |
+
new_row = pd.DataFrame([['-'] * (data['intervenants'].shape[1])], columns=data['intervenants'].columns)
|
102 |
+
data['intervenants'] = pd.concat([new_row, data['intervenants']], ignore_index=True)
|
103 |
+
#TODO to remove, for test only. Fill with 0
|
104 |
+
data['intervenants']['contrat heures'] = data['intervenants']['contrat heures'].fillna(35)
|
105 |
+
|
106 |
+
def complete_arrets(data):
|
107 |
+
data['arrets'].rename(columns=dict(zip(data['arrets'].columns,['code_absence', 'H-35.0', 'H-37.0', 'H-37.5', 'H-39.0'])), inplace=True)
|
108 |
+
data['arrets']['code_absence'] = data['arrets']['code_absence'].str.replace('_x000D_', ' ')
|
109 |
+
data['arrets'].fillna(0, inplace=True)
|
110 |
+
|
111 |
+
def cast_specifics_to_str(data):
|
112 |
+
data['all'] = data['all'].astype({'intervenant': str, 'affaire': str, 'client': str, 'prestataire': str})
|
113 |
+
data['vehicules'] = data['vehicules'].astype({'IMMAT': str, 'Description': str})
|
114 |
+
# print(data['affaire'])
|
115 |
+
data['affaire'] = data['affaire'].astype({'intervenant': str, 'affaire': str, 'client': str, 'prestataire': str})
|
116 |
+
data['intervenants'] = data['intervenants'].astype({'intervenant': str, 'secteur': str, 'vehicule affecte': str})
|
data/datastructures.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import numpy as np
|
3 |
+
from utils.times import date_to_week_day
|
4 |
+
|
5 |
+
def generate_society_fit(form: dict):
|
6 |
+
society_form = {
|
7 |
+
"year": form["year"],
|
8 |
+
"month": form["month"],
|
9 |
+
"week": form["week"],
|
10 |
+
"date": form["date"],
|
11 |
+
"prestataire": form["prestataire"],
|
12 |
+
"client": form["client"],
|
13 |
+
"affaire": form["affaire"],
|
14 |
+
"intervenant": form["intervenant"],
|
15 |
+
"vehicule": form["vehicule"],
|
16 |
+
"location": form["location"],
|
17 |
+
"activities": form["activities"],
|
18 |
+
"worked_hours": form["worked_hours"],
|
19 |
+
"night_hours": form["night_hours"],
|
20 |
+
"drive_hours": form["drive_hours"],
|
21 |
+
}
|
22 |
+
return society_form
|
23 |
+
|
24 |
+
def generate_intervenant_monthly_payroll(form: pd.DataFrame):
|
25 |
+
historic_df = form
|
26 |
+
column_sums = {}
|
27 |
+
for column in historic_df.columns:
|
28 |
+
if historic_df[column].dtype in [int, float] \
|
29 |
+
and not np.isnan(historic_df[column]).all() \
|
30 |
+
and column not in ['year', 'month', 'week', 'date', 'public_holyday']:
|
31 |
+
column_sums[column] = historic_df[column].sum()
|
32 |
+
|
33 |
+
payroll = {
|
34 |
+
"Nom": historic_df["nom"].values[0],
|
35 |
+
"Prénom": historic_df["prenom"].values[0],
|
36 |
+
"Semaine": f'{historic_df["year"].values[0]}-s{historic_df["week"].values[0]}',
|
37 |
+
"H Réal.": column_sums['worked_hours'],
|
38 |
+
"H Récup.": 0.,
|
39 |
+
"H Sup. Contrat": 0.,
|
40 |
+
"H Nuit": column_sums['night_hours'],
|
41 |
+
"H route": column_sums['drive_hours'],
|
42 |
+
"H Neg Non Just": 0.,
|
43 |
+
"H Neg Just": 0.,
|
44 |
+
"Ast Sem": column_sums['on_call_bonus'],
|
45 |
+
"Ast Sem/JF": 0.,
|
46 |
+
"Prime Interv.": column_sums['intervention_bonus'],
|
47 |
+
"Prime Chef Equipe": column_sums['team_leader_bonus'],
|
48 |
+
"Prime Transp. Caisse": column_sums['personal_tools_bonus'],
|
49 |
+
"Maladie": column_sums['maladie'],
|
50 |
+
"Arret Travail": column_sums['arret_travail'],
|
51 |
+
"Congés Payés": column_sums['conges_payes'],
|
52 |
+
"Congés Sans Solde": column_sums['conges_sans_solde'],
|
53 |
+
"RTT": column_sums['rtt'],
|
54 |
+
"Formation": column_sums['formation'],
|
55 |
+
"Evénement Familial": column_sums['evenement_familial'],
|
56 |
+
"Panier": column_sums['meal_nonus'],
|
57 |
+
"frais": 0.,
|
58 |
+
"H Samedi": column_sums['saturday_hours'],
|
59 |
+
# "H Dimanche / Férié": column_sums['sunday_hours'] + column_sums['holyday_hours'],
|
60 |
+
"H Dimanche": column_sums['sunday_hours'],
|
61 |
+
"H Férié": column_sums['holyday_hours'],
|
62 |
+
|
63 |
+
}
|
64 |
+
return payroll
|
65 |
+
|
66 |
+
|
67 |
+
def generate_intervenant_fit(form: dict):
|
68 |
+
data = form
|
69 |
+
fit_dict = {
|
70 |
+
"CLIENT": data["client"],
|
71 |
+
"Nom du responsable client": np.nan,
|
72 |
+
"AFFAIRE": data["affaire"],
|
73 |
+
"Lundi": data["worked_hours"] if date_to_week_day(data["date"]) == 'lundi' else 0,
|
74 |
+
"Mardi": data["worked_hours"] if date_to_week_day(data["date"]) == 'mardi' else 0,
|
75 |
+
"Mercredi": data["worked_hours"] if date_to_week_day(data["date"]) == 'mercredi' else 0,
|
76 |
+
"Jeudi": data["worked_hours"] if date_to_week_day(data["date"]) == 'jeudi' else 0,
|
77 |
+
"Vendredi": data["worked_hours"] if date_to_week_day(data["date"]) == 'vendredi' else 0,
|
78 |
+
"Samedi": data["worked_hours"] if date_to_week_day(data["date"]) == 'samedi' else 0,
|
79 |
+
"Dimanche": data["worked_hours"] if date_to_week_day(data["date"]) == 'dimanche' else 0,
|
80 |
+
"Travaux réalisés": data["activities"],
|
81 |
+
"H.\njour": data["worked_hours"] - data["night_hours"],
|
82 |
+
"H.\nnuit (1)": data["night_hours"],
|
83 |
+
"H.\nroute": data["drive_hours"],
|
84 |
+
"Panier (EUR)": data["meal_nonus"],
|
85 |
+
"Déplacement(EUR)": data["mileage_allowances_bonus"],
|
86 |
+
"Suppléments (EUR)": data["personal_tools_bonus"] + data["intervention_bonus"] + data["on_call_bonus"] + data["team_leader_bonus"],
|
87 |
+
"Véh. Perso (VP)\nou\nSociété (VS)": "VP" if data["vehicule"] == 'Perso' else "VS",
|
88 |
+
"Localisation": data["location"],
|
89 |
+
}
|
90 |
+
return fit_dict
|
91 |
+
|
92 |
+
def generate_society_payroll(form: dict):
|
93 |
+
data = form
|
94 |
+
payroll = {
|
95 |
+
"Prestataire": data["employeur"],
|
96 |
+
"Fournisseur": data["prestataire"],
|
97 |
+
"year": data["year"],
|
98 |
+
"month": data["month"],
|
99 |
+
"client": data["client"],
|
100 |
+
"affaire": data["affaire"],
|
101 |
+
"intervenant": data["intervenant"],
|
102 |
+
"vehicule": data["vehicule"],
|
103 |
+
"Semaine": f'{data["year"]}-s{data["week"]}',
|
104 |
+
"H Réal.": data["worked_hours"],
|
105 |
+
"H Nuit": data["night_hours"],
|
106 |
+
"H route": data["drive_hours"],
|
107 |
+
|
108 |
+
}
|
109 |
+
return payroll
|
data/excels.py
ADDED
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from io import BytesIO
|
2 |
+
import os
|
3 |
+
from typing import List, Tuple
|
4 |
+
from openpyxl import Workbook
|
5 |
+
from openpyxl.styles import Font
|
6 |
+
from openpyxl.styles import Alignment
|
7 |
+
from openpyxl.styles import numbers
|
8 |
+
from openpyxl.styles.borders import Border, Side
|
9 |
+
import streamlit as st
|
10 |
+
import pandas as pd
|
11 |
+
import numpy as np
|
12 |
+
from data.cli_dropbox import dropbox_download_file, dropbox_upload_bytefile
|
13 |
+
|
14 |
+
from .dataframes import complete_affaires, complete_arrets, complete_intervenants, complete_supplements, complete_vehicules, merge_clients, merge_affaires, merge_intervenants_affaires,cast_specifics_to_str
|
15 |
+
|
16 |
+
def check_paths(paths: List[str]) -> bool:
|
17 |
+
files = []
|
18 |
+
for path in paths:
|
19 |
+
if not os.path.exists(path) or not os.path.isfile(path):
|
20 |
+
files.append(path)
|
21 |
+
return files
|
22 |
+
|
23 |
+
def load_excels(datapath: str, excel_sources: List[dict]) -> pd.DataFrame:
|
24 |
+
data = {}
|
25 |
+
for key, value in excel_sources.items():
|
26 |
+
data[key] = pd.read_excel(os.path.join(datapath, excel_sources[key]['path']),
|
27 |
+
sheet_name=excel_sources[key]['sheet_name'],
|
28 |
+
usecols=excel_sources[key]['columns'],
|
29 |
+
nrows=excel_sources[key]['rows'],
|
30 |
+
skiprows=excel_sources[key]['skiprows'],
|
31 |
+
)
|
32 |
+
return data
|
33 |
+
|
34 |
+
@st.cache_data
|
35 |
+
def load_transform_data(datapath: str, excel_sources: List[dict]) -> pd.DataFrame:
|
36 |
+
not_files = check_paths([os.path.join(datapath, excel['path']) for excel in excel_sources.values()])
|
37 |
+
if len(not_files):
|
38 |
+
st.error(f'Erreur: une partie de la base de données n\'est pas accessible. {not_files}')
|
39 |
+
return
|
40 |
+
|
41 |
+
data = load_excels(datapath, excel_sources)
|
42 |
+
merge_clients(data)
|
43 |
+
merge_affaires(data)
|
44 |
+
merge_intervenants_affaires(data)
|
45 |
+
|
46 |
+
complete_vehicules(data)
|
47 |
+
complete_supplements(data)
|
48 |
+
complete_intervenants(data)
|
49 |
+
complete_affaires(data)
|
50 |
+
complete_arrets(data)
|
51 |
+
cast_specifics_to_str(data)
|
52 |
+
return data
|
53 |
+
|
54 |
+
|
55 |
+
def filter_multiple_conditions_data(df, filters):
|
56 |
+
filtered_df = df[df[filters.keys()].isin(filters.values()).all(axis=1)]
|
57 |
+
return filtered_df
|
58 |
+
|
59 |
+
def draw_border(sheet, start_cell: Tuple[int, int], end_cell: Tuple[int, int]):
|
60 |
+
# Define the border style
|
61 |
+
border_style = Border(left=Side(style='thin'),
|
62 |
+
right=Side(style='thin'),
|
63 |
+
top=Side(style='thin'),
|
64 |
+
bottom=Side(style='thin'))
|
65 |
+
|
66 |
+
# Define the range of cells to apply borders to
|
67 |
+
start_cell = sheet.cell(row=start_cell[0], column=start_cell[1])
|
68 |
+
end_cell = sheet.cell(row=end_cell[0] , column=end_cell[1])
|
69 |
+
cell_range = '{}:{}'.format(start_cell.coordinate, end_cell.coordinate)
|
70 |
+
|
71 |
+
# Apply borders to the range of cells
|
72 |
+
for row in sheet[cell_range]:
|
73 |
+
for cell in row:
|
74 |
+
cell.border = border_style
|
75 |
+
|
76 |
+
def get_fit_totals(dataframe):
|
77 |
+
column_sums = {}
|
78 |
+
for column in dataframe.columns:
|
79 |
+
if dataframe[column].dtype in [int, float] and not np.isnan(dataframe[column]).all():
|
80 |
+
column_sums[column] = dataframe[column].sum()
|
81 |
+
column_sums['rows'] = dataframe.shape[0]
|
82 |
+
column_sums['worked_hours'] = column_sums['H.\njour'] + column_sums['H.\nnuit (1)']
|
83 |
+
return column_sums
|
84 |
+
|
85 |
+
def load_fit(datapath: str, intervenant: str, year: str, month: str, week: str):
|
86 |
+
filename = f'{intervenant}_{year}_{month}_{week}_FIT.xlsx'
|
87 |
+
if dropbox_download_file(f'/SEC_IND_GTP2023_OUTPUT/FIT/{intervenant}/{year}/{month}/{week}/{filename}', os.path.join(datapath, filename)):
|
88 |
+
#TODO get file from dropbox
|
89 |
+
if os.path.exists(os.path.join(datapath, filename)) and os.path.isfile(os.path.join(datapath, filename)):
|
90 |
+
data = pd.read_excel(os.path.join(datapath, filename), sheet_name='Sheet', skiprows=6, nrows=10)
|
91 |
+
data.dropna(axis=0, how='all', inplace=True)
|
92 |
+
totals = get_fit_totals(data)
|
93 |
+
|
94 |
+
if 'fit' not in st.session_state.keys():
|
95 |
+
st.session_state['fit'] = {}
|
96 |
+
|
97 |
+
if intervenant not in st.session_state['fit'].keys():
|
98 |
+
st.session_state['fit'][intervenant] = {}
|
99 |
+
if year not in st.session_state['fit'][intervenant].keys():
|
100 |
+
st.session_state['fit'][intervenant][year] = {}
|
101 |
+
if month not in st.session_state['fit'][intervenant][year].keys():
|
102 |
+
st.session_state['fit'][intervenant][year][month] = {}
|
103 |
+
if week not in st.session_state['fit'][intervenant][year][month].keys():
|
104 |
+
st.session_state['fit'][intervenant][year][month][week] = {}
|
105 |
+
st.session_state['fit'][intervenant][year][month][week] = {
|
106 |
+
'data': data,
|
107 |
+
'totals': totals
|
108 |
+
}
|
109 |
+
|
110 |
+
return data
|
111 |
+
print('error loading fit')
|
112 |
+
return None
|
113 |
+
|
114 |
+
async def update_society_fit(dropbox_datapath: str, form: dict):
|
115 |
+
society = form['prestataire']
|
116 |
+
year = form['year']
|
117 |
+
month = form['month']
|
118 |
+
week = form['week']
|
119 |
+
dropbox_path = f'{dropbox_datapath}/SOCIETE/{society}/{year}/{month}/{week}'
|
120 |
+
|
121 |
+
filename = f'{society}_{year}_{month}_{week}_FIT.csv'
|
122 |
+
fit_df = pd.DataFrame([form])
|
123 |
+
|
124 |
+
fit = dropbox_download_file(os.path.join(dropbox_path, filename), '', False)
|
125 |
+
if fit:
|
126 |
+
fit = pd.read_csv(BytesIO(fit), index_col=0)
|
127 |
+
fit_df = pd.concat([fit, fit_df], ignore_index=True)
|
128 |
+
|
129 |
+
csv_data = BytesIO()
|
130 |
+
fit_df.to_csv(csv_data, index = False)
|
131 |
+
dropbox_upload_bytefile(dropbox_data_path=dropbox_path, dropbox_file_name=filename, bytes=csv_data)
|
132 |
+
return fit_df
|
133 |
+
|
134 |
+
|
135 |
+
async def update_society_payroll(dropbox_datapath: str, form: dict):
|
136 |
+
prestataire = form['Prestataire']
|
137 |
+
fournisseur = form['Fournisseur']
|
138 |
+
year = form['year']
|
139 |
+
month = form['month']
|
140 |
+
dropbox_path = f'{dropbox_datapath}/PRESTATIONS/{prestataire}/{year}/{month}'
|
141 |
+
|
142 |
+
filename = f'{prestataire}_{fournisseur}_{year}_{month}_PRESTATIONS_CROISEES.xlsx'
|
143 |
+
payroll_df = pd.DataFrame([form])
|
144 |
+
|
145 |
+
payroll = dropbox_download_file(os.path.join(dropbox_path, filename), '', False)
|
146 |
+
if payroll:
|
147 |
+
payroll = pd.read_excel(BytesIO(payroll))
|
148 |
+
payroll_df = pd.concat([payroll, payroll_df], ignore_index=True)
|
149 |
+
|
150 |
+
excel_data = BytesIO()
|
151 |
+
payroll_df.to_excel(excel_data, index = False)
|
152 |
+
dropbox_upload_bytefile(dropbox_data_path=dropbox_path, dropbox_file_name=filename, bytes=excel_data)
|
153 |
+
return payroll_df
|
154 |
+
|
155 |
+
async def update_historical_week(dropbox_datapath: str, form: dict):
|
156 |
+
intervenant = form['intervenant']
|
157 |
+
year = form['year']
|
158 |
+
month = form['month']
|
159 |
+
week = form['week']
|
160 |
+
dropbox_path = f'{dropbox_datapath}/FIT/{intervenant}/{year}/{month}/{week}'
|
161 |
+
|
162 |
+
historic_df = pd.DataFrame([form])
|
163 |
+
|
164 |
+
historic = dropbox_download_file(dropbox_path + '/historique.xlsx', '', False)
|
165 |
+
if historic:
|
166 |
+
historic = pd.read_excel(historic)
|
167 |
+
historic_df = pd.concat([historic, historic_df], ignore_index=True)
|
168 |
+
|
169 |
+
excel_data = BytesIO()
|
170 |
+
historic_df.to_excel(excel_data, index = False)
|
171 |
+
dropbox_upload_bytefile(dropbox_data_path=dropbox_path, dropbox_file_name='historique.xlsx', bytes=excel_data)
|
172 |
+
return historic_df
|
173 |
+
|
174 |
+
async def update_monthly_payroll(dropbox_datapath: str, payroll_dict: dict, year: str, month: str, week: str):
|
175 |
+
|
176 |
+
dropbox_path = f'{dropbox_datapath}/PAYES'
|
177 |
+
# dropbox_path = dropbox_datapath
|
178 |
+
nom = payroll_dict['Nom']
|
179 |
+
prenom = payroll_dict['Prénom']
|
180 |
+
payroll_df = pd.DataFrame([payroll_dict])
|
181 |
+
|
182 |
+
payroll = dropbox_download_file(dropbox_path + f'/tableau_prepaye_{year}_{month}.xlsx', '', False)
|
183 |
+
if payroll:
|
184 |
+
payroll = pd.read_excel(payroll)
|
185 |
+
intervenant_rows = payroll[(payroll['Nom'] == nom) & (payroll['Prénom'] == prenom)]
|
186 |
+
# print(len(intervenant_rows))
|
187 |
+
if len(intervenant_rows):
|
188 |
+
current_week_row = payroll[(payroll['Nom'] == nom) & (payroll_df['Prénom'] == prenom) & (payroll['Semaine'] == f'{year}-s{week}')]
|
189 |
+
# print(len(current_week_row))
|
190 |
+
if len(current_week_row):
|
191 |
+
payroll.iloc[current_week_row.index] = payroll_df.loc[0]
|
192 |
+
payroll_df = payroll
|
193 |
+
else:
|
194 |
+
payroll_df = pd.concat([payroll, payroll_df], ignore_index=True)
|
195 |
+
else:
|
196 |
+
payroll_df = pd.concat([payroll, payroll_df], ignore_index=True)
|
197 |
+
rows_for_total = payroll_df[(payroll_df['Nom'] == nom) & (payroll_df['Prénom'] == prenom) & (payroll_df['Semaine'].str.contains(f'{year}-s'))]
|
198 |
+
total = pd.DataFrame([rows_for_total.drop(columns=['Nom', 'Prénom', 'Semaine']).sum()])
|
199 |
+
total['Nom'] = nom
|
200 |
+
total['Prénom'] = prenom
|
201 |
+
total['Semaine'] = 'TOTAL'
|
202 |
+
all_but_total = payroll_df[~((payroll_df['Nom'] == nom) & (payroll_df['Prénom'] == prenom) & (payroll_df['Semaine'].str.contains('TOTAL')))]
|
203 |
+
# print(all_but_total)
|
204 |
+
payroll_df = pd.concat([total, all_but_total], ignore_index=True, axis = 0)
|
205 |
+
# print(payroll_df)
|
206 |
+
payroll_df = payroll_df.sort_values(by=['Nom', 'Prénom', 'Semaine'])
|
207 |
+
column_order = payroll_df.columns[-3:].tolist() + payroll_df.columns[:-3].tolist()
|
208 |
+
payroll_df = payroll_df[column_order]
|
209 |
+
excel_data = BytesIO()
|
210 |
+
payroll_df.to_excel(excel_data, index = False)
|
211 |
+
dropbox_upload_bytefile(dropbox_data_path=dropbox_path, dropbox_file_name=f'tableau_prepaye_{year}_{month}.xlsx', bytes=excel_data)
|
212 |
+
return payroll_df
|
213 |
+
|
214 |
+
def write_excel_fit(datapath: str, filename: str, data, starting_row = 7):
|
215 |
+
workbook = Workbook()
|
216 |
+
sheet = workbook.active
|
217 |
+
|
218 |
+
|
219 |
+
sheet.column_dimensions['A'].width = 60
|
220 |
+
sheet.column_dimensions['B'].width = 40
|
221 |
+
sheet.column_dimensions['C'].width = 80
|
222 |
+
sheet.column_dimensions['D'].width = 40
|
223 |
+
sheet.column_dimensions['E'].width = 20
|
224 |
+
sheet.column_dimensions['K'].width = 60
|
225 |
+
sheet.column_dimensions['L'].width = 40
|
226 |
+
sheet.column_dimensions['M'].width = 40
|
227 |
+
sheet.column_dimensions['O'].width = 20
|
228 |
+
sheet.column_dimensions['P'].width = 40
|
229 |
+
sheet.column_dimensions['Q'].width = 40
|
230 |
+
sheet.column_dimensions['R'].width = 40
|
231 |
+
sheet.column_dimensions['S'].width = 20
|
232 |
+
sheet.row_dimensions[29].height = 30
|
233 |
+
sheet.row_dimensions[31].height = 40
|
234 |
+
|
235 |
+
sheet['A1'] = 'SECMI'
|
236 |
+
|
237 |
+
sheet['D1'] = 'FICHE D\'INTERVENTION ET DE TEMPS (FIT)'
|
238 |
+
|
239 |
+
sheet['A3'] = f'Intervenant: {data["intervenant"]}'
|
240 |
+
draw_border(sheet, (3, 1), (3, 1))
|
241 |
+
|
242 |
+
sheet.merge_cells('M1:N1')
|
243 |
+
sheet['M1'] = f'Année: {data["year"]}'
|
244 |
+
draw_border(sheet, (1, 13), (1, 13))
|
245 |
+
|
246 |
+
sheet.merge_cells('M2:N2')
|
247 |
+
sheet['M2'] = f'Semaine: {data["week"]}'
|
248 |
+
draw_border(sheet, (2, 13), (2, 13))
|
249 |
+
|
250 |
+
|
251 |
+
sheet['A18'] = '(1) travail effectué entre 21h00 et 06h00'
|
252 |
+
|
253 |
+
sheet.merge_cells('A19:S22')
|
254 |
+
sheet['A19'] = 'Commentaires SECMI:'
|
255 |
+
draw_border(sheet, (19, 1), (22, 19))
|
256 |
+
|
257 |
+
sheet.merge_cells('A23:S26')
|
258 |
+
sheet['A23'] = 'Commentaires Client:'
|
259 |
+
draw_border(sheet, (23, 1), (26, 19))
|
260 |
+
|
261 |
+
sheet.merge_cells('A30:D30')
|
262 |
+
sheet['A30'] = 'Signature client:'
|
263 |
+
draw_border(sheet, (30, 1), (30, 4))
|
264 |
+
sheet.merge_cells('A31:D32')
|
265 |
+
draw_border(sheet, (31, 1), (32, 4))
|
266 |
+
|
267 |
+
|
268 |
+
sheet['E30'] = 'Note\n(de 0 à 10)'
|
269 |
+
draw_border(sheet, (30, 5), (30, 5))
|
270 |
+
sheet.merge_cells('E31:E32')
|
271 |
+
draw_border(sheet, (31, 5), (32, 5))
|
272 |
+
|
273 |
+
sheet['L30'] = 'Signature chargé d\'affaire:'
|
274 |
+
draw_border(sheet, (30, 12), (30, 12))
|
275 |
+
sheet.merge_cells('L31:L32')
|
276 |
+
draw_border(sheet, (31, 12), (32, 12))
|
277 |
+
|
278 |
+
sheet['M30'] = 'Signature intervenant:'
|
279 |
+
draw_border(sheet, (30, 13), (30, 13))
|
280 |
+
sheet.merge_cells('M31:M32')
|
281 |
+
draw_border(sheet, (31, 13), (32, 13))
|
282 |
+
|
283 |
+
|
284 |
+
sheet.merge_cells('A33:T33')
|
285 |
+
sheet.merge_cells('A34:T34')
|
286 |
+
draw_border(sheet, (33, 1), (34, 19))
|
287 |
+
sheet['A33'] = 'Service Administratif'
|
288 |
+
sheet['A34'] = 'Tel: +33 6 02 14 55 16 - Email : [email protected]'
|
289 |
+
# Define the starting row for writing the dictionary
|
290 |
+
|
291 |
+
draw_border(sheet, (starting_row, 1), (starting_row + 10, 19))
|
292 |
+
|
293 |
+
draw_border(sheet, (starting_row - 2, 4), (starting_row -2, 19))
|
294 |
+
|
295 |
+
|
296 |
+
sheet.cell(row=starting_row - 2, column=4, value='TOTAUX (en heure)')
|
297 |
+
# Write the dictionary to the sheet starting from the specified row
|
298 |
+
header = list(data['data'].keys())
|
299 |
+
for col_i, key in enumerate(header):
|
300 |
+
sheet.cell(row=starting_row, column=col_i + 1, value=key)
|
301 |
+
if key in data['totals'].keys():
|
302 |
+
sheet.cell(row=starting_row - 2, column=col_i + 1, value=data['totals'][key])
|
303 |
+
|
304 |
+
for cell in sheet[starting_row - 2]:
|
305 |
+
cell.font = Font(bold=True)
|
306 |
+
for cell in sheet[starting_row]:
|
307 |
+
cell.font = Font(bold=True)
|
308 |
+
|
309 |
+
starting_row += 1
|
310 |
+
for col_i, key in enumerate(data['data'].keys()):
|
311 |
+
values = data['data'][key]
|
312 |
+
for j, value in enumerate(values):
|
313 |
+
|
314 |
+
sheet.cell(row=starting_row + j, column=col_i + 1, value=value)
|
315 |
+
|
316 |
+
alignment = Alignment(horizontal='center', vertical='center')
|
317 |
+
# Set the decimal format
|
318 |
+
decimal_format = numbers.FORMAT_NUMBER_00
|
319 |
+
|
320 |
+
for row in sheet.iter_rows():
|
321 |
+
for cell in row:
|
322 |
+
cell.alignment = alignment
|
323 |
+
cell.number_format = decimal_format
|
324 |
+
|
325 |
+
sheet['A1'].font = Font(bold=True, underline='single', size=11)
|
326 |
+
sheet['D1'].font = Font(bold=True, underline='single', size=11)
|
327 |
+
sheet['A19'].font = Font(bold=True, underline='single')
|
328 |
+
sheet['A19'].alignment = Alignment(horizontal='left', vertical='top')
|
329 |
+
sheet['A23'].font = Font(bold=True, underline='single')
|
330 |
+
sheet['A23'].alignment = Alignment(horizontal='left', vertical='top')
|
331 |
+
sheet['A33'].font = Font(bold=True)
|
332 |
+
sheet['A33'].alignment = Alignment(horizontal='center')
|
333 |
+
sheet['A34'].font = Font(bold=True)
|
334 |
+
sheet['A34'].alignment = Alignment(horizontal='center')
|
335 |
+
|
336 |
+
workbook.save(os.path.join(datapath, filename))
|
data/test.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import yaml
|
2 |
+
from yaml.loader import SafeLoader
|
3 |
+
|
4 |
+
# from excels import load_excels
|
5 |
+
# from dataframes import merge_clients, merge_affaires, merge_intervenants_affaires
|
6 |
+
# from const import root_path
|
7 |
+
|
8 |
+
# with open(f'{root_path}/config.yaml') as file:
|
9 |
+
# config = yaml.load(file, Loader=SafeLoader)
|
10 |
+
|
11 |
+
|
12 |
+
# print(config['data'])
|
13 |
+
# data = load_excels(root_path, config['data'])
|
14 |
+
# merge_clients(data)
|
15 |
+
# merge_affaires(data)
|
16 |
+
# merge_intervenants_affaires(data)
|
17 |
+
# print(data)
|
18 |
+
# print(data['Clients'])
|
19 |
+
# print(data['Affaire'])
|
20 |
+
|
21 |
+
def find_substring_in_list(substring, string_list):
|
22 |
+
found_strings = [string for string in string_list if string.find(substring) != -1]
|
23 |
+
return found_strings
|
24 |
+
|
25 |
+
string_list = ['absence test', 'absence test 2', 'absence test 3', '0000- absence']
|
26 |
+
substring = 'zzz'
|
27 |
+
print(find_substring_in_list(substring, string_list))
|
28 |
+
# Example usage
|
29 |
+
# device_id = get_device_id()
|
30 |
+
# current_ip = get_current_ip()
|
31 |
+
# print("Current IP:", current_ip)
|
32 |
+
# print("Device ID:", device_id)
|
33 |
+
|
34 |
+
# film
|
35 |
+
# bail jojo
|
36 |
+
# garage
|
dockerfile
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app/Dockerfile
|
2 |
+
|
3 |
+
FROM python:3.10.6-slim
|
4 |
+
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
ENV PYTHONPATH=/app:$PYTHONPATH
|
8 |
+
|
9 |
+
RUN apt-get update && apt-get install -y \
|
10 |
+
build-essential \
|
11 |
+
curl \
|
12 |
+
software-properties-common \
|
13 |
+
git \
|
14 |
+
locales \
|
15 |
+
&& rm -rf /var/lib/apt/lists/*
|
16 |
+
|
17 |
+
RUN sed -i -e 's/# fr_FR.UTF-8 UTF-8/fr_FR.UTF-8 UTF-8/' /etc/locale.gen && \
|
18 |
+
locale-gen
|
19 |
+
|
20 |
+
# Set the locale environment variables
|
21 |
+
ENV LANG=fr_FR.UTF-8
|
22 |
+
ENV LC_ALL=fr_FR.UTF-8
|
23 |
+
|
24 |
+
COPY . .
|
25 |
+
|
26 |
+
RUN pip3 install --upgrade pip
|
27 |
+
RUN pip3 install -r requirements.txt
|
28 |
+
|
29 |
+
EXPOSE 8501
|
30 |
+
|
31 |
+
HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
|
32 |
+
|
33 |
+
ENTRYPOINT ["streamlit", "run", "main.py", "--server.port=8501", "--server.address=0.0.0.0"]
|
main.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime as dt
|
2 |
+
import os
|
3 |
+
import numpy as np
|
4 |
+
import pandas as pd
|
5 |
+
import streamlit as st
|
6 |
+
import yaml
|
7 |
+
from yaml.loader import SafeLoader
|
8 |
+
from data.cli_dropbox import dropbox_load_config_files
|
9 |
+
|
10 |
+
from data.excels import load_fit, load_transform_data, filter_multiple_conditions_data
|
11 |
+
from utils.times import calculate_night_time, calculate_work_time, date_to_week_number
|
12 |
+
from utils.btn_behaviors import reset, validate_duplicate, validate_end
|
13 |
+
from utils.indemnites import calculate_astreinte, calculate_indemnites_km, calculate_overtimes
|
14 |
+
|
15 |
+
from const import root_path
|
16 |
+
|
17 |
+
st.set_page_config(layout="wide")
|
18 |
+
|
19 |
+
with open(os.path.join(root_path, 'config.yaml')) as file:
|
20 |
+
config = yaml.load(file, Loader=SafeLoader)
|
21 |
+
pd.set_option('display.max_columns', None)
|
22 |
+
|
23 |
+
#TODO download dropbox config files
|
24 |
+
if not 'dropbox_update' in st.session_state.keys():
|
25 |
+
dropbox_load_config_files('/SEC_IND_GTP2023_INPUT', os.path.join(root_path, 'data/input'), config['data'])
|
26 |
+
st.session_state.dropbox_update = True
|
27 |
+
df = load_transform_data(os.path.join(root_path, 'data/input'), config['data'])
|
28 |
+
|
29 |
+
# print("session_state", st.session_state)
|
30 |
+
def affaire_change(indexes, df_key):
|
31 |
+
filters = {}
|
32 |
+
new_values = []
|
33 |
+
|
34 |
+
for key, value in st.session_state.items():
|
35 |
+
if key in df[df_key].columns and key not in ['intervenant', 'prestataire']:
|
36 |
+
if st.session_state[key] != '-':
|
37 |
+
filters[key] = value
|
38 |
+
new_values.insert(len(new_values), value)
|
39 |
+
indexes[key] = 0
|
40 |
+
|
41 |
+
st.session_state.df[df_key] = filter_multiple_conditions_data(df[df_key], filters)
|
42 |
+
|
43 |
+
new_row = pd.DataFrame([['-'] * (df[df_key].shape[1])], columns=df[df_key].columns)
|
44 |
+
for key, value in filters.items():
|
45 |
+
indexes[key] = 1 if value != '-'else 0
|
46 |
+
|
47 |
+
st.session_state.df[df_key] = pd.concat([new_row, st.session_state.df[df_key]], ignore_index=True)
|
48 |
+
|
49 |
+
def intervenant_change():
|
50 |
+
if "intervenant" in st.session_state and st.session_state.intervenant != '-':
|
51 |
+
st.session_state.contract_hours = st.session_state.df['intervenants'][st.session_state.df['intervenants']['intervenant'] == st.session_state.intervenant]['contrat heures'].values[0]
|
52 |
+
st.session_state.supp_contract_hours = st.session_state.df['intervenants'][st.session_state.df['intervenants']['intervenant'] == st.session_state.intervenant]['contrat heures'].values[0]
|
53 |
+
load_fit(datapath=os.path.join(root_path, 'data/output'), intervenant=st.session_state.intervenant, year=st.session_state.date_input.year, month= st.session_state.date_input.month ,week=date_to_week_number(st.session_state.date_input))
|
54 |
+
|
55 |
+
if 'df' not in st.session_state:
|
56 |
+
st.session_state.df = df
|
57 |
+
st.session_state.indexes_all = {k: 0 for k in df['all'].columns}
|
58 |
+
st.session_state.indexes_vehicules = {k: 0 for k in df['vehicules'].columns}
|
59 |
+
st.session_state.indexes_affaire = {k: 0 for k in df['affaire'].columns}
|
60 |
+
st.session_state.indexes_intervenants = {k: 0 for k in df['intervenants'].columns}
|
61 |
+
|
62 |
+
|
63 |
+
st.title('SECMI - Gestion des temps passés')
|
64 |
+
codes = st.columns(3)
|
65 |
+
|
66 |
+
intervenant = codes[0].selectbox('Intervenant', np.sort(st.session_state.df['intervenants'].intervenant.unique()), key='intervenant', on_change=intervenant_change)
|
67 |
+
client = codes[1].selectbox('Client', np.sort(st.session_state.df['affaire'].client.unique()), key='client', on_change=affaire_change, kwargs={'indexes': st.session_state.indexes_affaire, 'df_key': 'affaire'}, index=st.session_state.indexes_affaire['client'])
|
68 |
+
location = codes[2].selectbox('Localisation', np.sort(['En Atelier', 'En Clientèle']), key='location')
|
69 |
+
|
70 |
+
# prestataire = codes[3].selectbox('Prestataire', np.sort(st.session_state.df['all'].prestataire.unique()), key='prestataire', on_change=value_change, kwargs={'indexes': st.session_state.indexes_all, 'df_key': 'all'}, index=st.session_state.indexes_all['prestataire'])
|
71 |
+
st.session_state.prestataire = st.session_state.df['affaire']['prestataire'][1]
|
72 |
+
affaire = st.selectbox('Affaire', np.sort(st.session_state.df['affaire'].affaire.unique()), key='affaire', on_change=affaire_change, kwargs={'indexes': st.session_state.indexes_affaire, 'df_key': 'affaire'}, index=st.session_state.indexes_affaire['affaire'])
|
73 |
+
vehicules = st.selectbox('vehicules', np.sort(st.session_state.df['vehicules'].vehicules.unique()), key='vehicules')#, on_change=value_change,kwargs={'indexes': st.session_state.indexes_vehicules, 'df_key': 'vehicules'}, index=st.session_state.indexes_vehicules['Description'])
|
74 |
+
|
75 |
+
absences = st.session_state.df['absences']['Libellé GTP2023'].tolist()
|
76 |
+
absences.remove('Absence Formation')
|
77 |
+
st.session_state.disable_times = True if len([string for string in absences if affaire.find(string) != -1]) else False
|
78 |
+
|
79 |
+
st.divider()
|
80 |
+
activites = st.text_area('Description des activités effectuées', key="activities_text_area")
|
81 |
+
|
82 |
+
st.divider()
|
83 |
+
temps = st.columns(5)
|
84 |
+
|
85 |
+
if 'date_input' not in st.session_state:
|
86 |
+
st.session_state.date_input = dt.datetime.now()
|
87 |
+
|
88 |
+
# print(df['arrets'])
|
89 |
+
date = temps[0].date_input(
|
90 |
+
label="Date de la mission",
|
91 |
+
value=st.session_state.date_input,
|
92 |
+
min_value=dt.datetime.now() - dt.timedelta(days=15),
|
93 |
+
max_value=dt.datetime.now(),
|
94 |
+
on_change=load_fit,
|
95 |
+
kwargs={'datapath': os.path.join(root_path, 'data/output'), 'intervenant': st.session_state.intervenant, 'year': st.session_state.date_input.year, 'month': st.session_state.date_input.month, 'week': date_to_week_number(st.session_state.date_input)},
|
96 |
+
key='date_input')
|
97 |
+
|
98 |
+
# if 'fit' in st.session_state.keys():
|
99 |
+
# print(st.session_state.fit)
|
100 |
+
# print(date_to_week_number(date), date_to_week_day(date))
|
101 |
+
public_holyday = temps[0].checkbox('Jour Férié', key="public_holyday")
|
102 |
+
start_time = temps[1].time_input('heure début', dt.time(8, 00), key='start_time_input', disabled=st.session_state.disable_times)
|
103 |
+
end_time = temps[2].time_input('heure fin', dt.time(16, 00), key='end_time_input', disabled=st.session_state.disable_times)
|
104 |
+
|
105 |
+
|
106 |
+
pause_time = temps[3].time_input('temps de pause', dt.time(1, 00), step=300, key='pause_time_input')
|
107 |
+
|
108 |
+
st.session_state.total_hours = calculate_work_time(start_time, end_time, pause_time, date)
|
109 |
+
st.session_state.night_hours = calculate_night_time(start_time, end_time, pause_time, date)
|
110 |
+
|
111 |
+
if st.session_state.total_hours > 8:
|
112 |
+
st.warning('Les heures de travail enregistrées sont supérieures à 8 heures !')
|
113 |
+
|
114 |
+
|
115 |
+
# print(st.session_state.df['supplements'].columns)
|
116 |
+
supplements = temps[4]
|
117 |
+
supplement1 = supplements.selectbox('Supplement 1', st.session_state.df['supplements'].supplements, key = 'supplement1')
|
118 |
+
supplement2 = supplements.selectbox('Supplement 2', st.session_state.df['supplements'].supplements, key = 'supplement2')
|
119 |
+
supplement3 = supplements.selectbox('Supplement 3', st.session_state.df['supplements'].supplements, key = 'supplement3')
|
120 |
+
|
121 |
+
st.session_state.meal_bonus = 9.15 if st.session_state.total_hours >= 6.15 else 0
|
122 |
+
st.session_state.personal_tools_bonus = 3.2 if ' Transp. Caisse ' in [supplement1, supplement2, supplement3 ] else 0
|
123 |
+
st.session_state.intervention_bonus = 1 * st.session_state.total_hours if 'Prime Interv°' in [supplement1, supplement2, supplement3 ] else 0
|
124 |
+
st.session_state.on_call_bonus = calculate_astreinte() if st.session_state.intervenant != '-' and 'Astreinte Sem' in [supplement1, supplement2, supplement3 ] else 0
|
125 |
+
st.session_state.team_leader_bonus = 10 if 'Prime Chef Equ' in [supplement1, supplement2, supplement3 ] else 0
|
126 |
+
|
127 |
+
# st.session_state.painting_bonus = 1 * st.session_state.total_hours if ' H Peinture ' in [supplement1, supplement2, supplement3 ] else 0
|
128 |
+
# st.session_state.electrical_bonus = 1 * st.session_state.total_hours if ' H Qualif. Électrique ' in [supplement1, supplement2, supplement3 ] else 0
|
129 |
+
# st.session_state.welding_bonus = 1 * st.session_state.total_hours if ' H Soudure ' in [supplement1, supplement2, supplement3 ] else 0
|
130 |
+
|
131 |
+
st.session_state.overtime25, st.session_state.overtime50, st.session_state.overtime100 = calculate_overtimes()
|
132 |
+
|
133 |
+
st.session_state.mileage_allowances_bonus , st.session_state.drive_hours = calculate_indemnites_km()
|
134 |
+
|
135 |
+
year = st.session_state.date_input.year
|
136 |
+
month = st.session_state.date_input.month
|
137 |
+
week = date_to_week_number(st.session_state.date_input)
|
138 |
+
st.divider()
|
139 |
+
infos = st.columns(3, gap="large")
|
140 |
+
infos[0].header(f'Temps total: ')
|
141 |
+
infos[0].write(f'Temps de travail: {st.session_state.total_hours } heure(s)')
|
142 |
+
infos[0].write(f'Temps de pause: {pause_time.hour + (pause_time.minute / 60)} heure(s)')
|
143 |
+
infos[0].write(f'Heure de nuit: {st.session_state.night_hours} heure(s)')
|
144 |
+
infos[0].write(f'Jour Férié: {"oui" if public_holyday else "non"}')
|
145 |
+
|
146 |
+
if 'fit' in st.session_state.keys() and st.session_state.intervenant in st.session_state['fit'].keys() and year in st.session_state.fit[st.session_state.intervenant].keys() and month in st.session_state.fit[st.session_state.intervenant][year].keys() and week in st.session_state.fit[st.session_state.intervenant][year][month].keys():
|
147 |
+
infos[0].write(f'Heures indiquées dans le contrat de travail: {st.session_state.contract_hours} heures')
|
148 |
+
if st.session_state.contract_hours > 35:
|
149 |
+
infos[0].write(f'Dont Heures supplémentaires inclues dans le contrat: {st.session_state.supp_contract_hours} heures')
|
150 |
+
|
151 |
+
infos[0].write(f'Heures totales de travail pour la semaine {week} : {st.session_state.fit[st.session_state["intervenant"]][year][month][week]["totals"]["worked_hours"]} heures')
|
152 |
+
|
153 |
+
infos[0].write(f'Heure supp 25%: {st.session_state.overtime25} heure(s)')
|
154 |
+
infos[0].write(f'Heure supp 50%: {st.session_state.overtime50} heure(s)')
|
155 |
+
infos[0].write(f'Heure supp 100%: {st.session_state.overtime100} heure(s)')
|
156 |
+
|
157 |
+
infos[1].header('Informations:')
|
158 |
+
if st.session_state.mileage_allowances_bonus > 0:
|
159 |
+
infos[0].write(f'Heure route: {st.session_state.drive_hours} heure(s)')
|
160 |
+
infos[1].write(f'Indemnités kilométriques: {st.session_state.mileage_allowances_bonus}€')
|
161 |
+
|
162 |
+
infos[1].write(f'Panier: {st.session_state.meal_bonus}€')
|
163 |
+
infos[1].write(f'Transport de caisse: {st.session_state.personal_tools_bonus}€')
|
164 |
+
infos[1].write(f'Prime d\'intervention: {st.session_state.intervention_bonus}€')
|
165 |
+
infos[1].write(f'Prime d\'astreinte: {st.session_state.on_call_bonus}€')
|
166 |
+
infos[1].write(f'Prime chef d\'équipe: {st.session_state.team_leader_bonus}€')
|
167 |
+
# infos[1].write(f'Autre (Peinture, Soudure, Electricité): {st.session_state.electrical_bonus + st.session_state.painting_bonus + st.session_state.welding_bonus}€')
|
168 |
+
|
169 |
+
reset_btn = infos[2].button('Effacer', on_click=reset, kwargs={"df": df})
|
170 |
+
validate_duplicate_btn = infos[2].button('Valider et Dupliquer', on_click=validate_duplicate)
|
171 |
+
validate_end_btn = infos[2].button('Valider et Terminer', on_click=validate_end)
|
172 |
+
|
173 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Babel==2.12.1
|
2 |
+
dropbox==11.36.2
|
3 |
+
streamlit==1.25.0
|
4 |
+
pandas==1.5.3
|
5 |
+
numpy==1.23.5
|
6 |
+
openpyxl==3.1.2
|
7 |
+
python-dotenv==1.0.0
|
8 |
+
pyyaml
|
utils/__init__.py
ADDED
File without changes
|
utils/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (141 Bytes). View file
|
|
utils/__pycache__/btn_behaviors.cpython-310.pyc
ADDED
Binary file (8.29 kB). View file
|
|
utils/__pycache__/device.cpython-310.pyc
ADDED
Binary file (493 Bytes). View file
|
|
utils/__pycache__/indemnites.cpython-310.pyc
ADDED
Binary file (3.47 kB). View file
|
|
utils/__pycache__/times.cpython-310.pyc
ADDED
Binary file (1.77 kB). View file
|
|
utils/btn_behaviors.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import numpy as np
|
3 |
+
import pandas as pd
|
4 |
+
import streamlit as st
|
5 |
+
import datetime as dt
|
6 |
+
from data.cli_dropbox import dropbox_upload_file
|
7 |
+
from data.datastructures import generate_intervenant_fit, generate_intervenant_monthly_payroll, generate_society_fit, generate_society_payroll
|
8 |
+
|
9 |
+
from data.excels import get_fit_totals, update_historical_week, update_monthly_payroll, update_society_fit, update_society_payroll, write_excel_fit
|
10 |
+
from utils.indemnites import calculate_arrets
|
11 |
+
from utils.times import date_to_week_day, date_to_week_number
|
12 |
+
from utils.device import get_current_ip, get_device_id
|
13 |
+
from const import root_path
|
14 |
+
import asyncio
|
15 |
+
|
16 |
+
|
17 |
+
def merge_dicts(list_of_dicts):
|
18 |
+
merged_dict = {}
|
19 |
+
|
20 |
+
for dictionary in list_of_dicts:
|
21 |
+
for key, value in dictionary.items():
|
22 |
+
if key not in merged_dict:
|
23 |
+
merged_dict[key] = []
|
24 |
+
merged_dict[key].append(value)
|
25 |
+
|
26 |
+
return merged_dict
|
27 |
+
|
28 |
+
@st.cache_resource
|
29 |
+
async def read_transform_write_excel(datapath: str = f'{root_path}/data/output', form: dict = None):
|
30 |
+
fit_data = {
|
31 |
+
"year": form["year"],
|
32 |
+
"month": form["month"],
|
33 |
+
"week": form["week"],
|
34 |
+
"intervenant": form["intervenant"],
|
35 |
+
}
|
36 |
+
|
37 |
+
fit_dict = generate_intervenant_fit(form)
|
38 |
+
fit_df = pd.DataFrame(fit_dict, index=[0])
|
39 |
+
|
40 |
+
|
41 |
+
if 'fit' in st.session_state.keys() \
|
42 |
+
and form["intervenant"] in st.session_state['fit'].keys() \
|
43 |
+
and form["year"] in st.session_state['fit'][form["intervenant"]].keys() \
|
44 |
+
and form["month"] in st.session_state['fit'][form["intervenant"]][form["year"]].keys() \
|
45 |
+
and form["week"] in st.session_state['fit'][form["intervenant"]][form["year"]][form["month"]].keys():
|
46 |
+
df = st.session_state['fit'][form["intervenant"]][form["year"]][form["month"]][form["week"]]['data']
|
47 |
+
# Concatenate DataFrame and dictionary DataFrame
|
48 |
+
fit_df = pd.concat([df, fit_df], ignore_index=True)
|
49 |
+
|
50 |
+
totals = get_fit_totals(fit_df)
|
51 |
+
fit_data['data'] = fit_df
|
52 |
+
fit_data['totals'] = totals
|
53 |
+
path = f'{form["intervenant"]}_{form["year"]}_{form["month"]}_{form["week"]}_FIT.xlsx'
|
54 |
+
|
55 |
+
write_excel_fit(datapath, path, fit_data)
|
56 |
+
dropbox_upload_file(datapath, path, f'/SEC_IND_GTP2023_OUTPUT/FIT/{form["intervenant"]}/{form["year"]}/{form["month"]}/{form["week"]}', path)
|
57 |
+
|
58 |
+
|
59 |
+
if 'fit' not in st.session_state.keys():
|
60 |
+
st.session_state['fit'] = {}
|
61 |
+
|
62 |
+
if form["intervenant"] not in st.session_state['fit'].keys():
|
63 |
+
st.session_state['fit'][form["intervenant"]] = {}
|
64 |
+
if form["year"] not in st.session_state['fit'][form["intervenant"]].keys():
|
65 |
+
st.session_state['fit'][form["intervenant"]][form["year"]] = {}
|
66 |
+
if form["month"] not in st.session_state['fit'][form["intervenant"]][form["year"]].keys():
|
67 |
+
st.session_state['fit'][form["intervenant"]][form["year"]][form["month"]] = {}
|
68 |
+
if form["week"] not in st.session_state['fit'][form["intervenant"]][form["year"]][form["month"]].keys():
|
69 |
+
st.session_state['fit'][form["intervenant"]][form["year"]][form["month"]][form["week"]] = {}
|
70 |
+
st.session_state['fit'][form["intervenant"]][form["year"]][form["month"]][form["week"]] = {
|
71 |
+
'data': fit_data['data'],
|
72 |
+
'totals': totals
|
73 |
+
}
|
74 |
+
st.toast('Fiche de temps envoyée', icon="📨")
|
75 |
+
return st.session_state['fit'][form["intervenant"]][form["year"]][form["month"]][form["week"]]
|
76 |
+
|
77 |
+
def validate():
|
78 |
+
data_dict = {
|
79 |
+
"intervenant": st.session_state.intervenant,
|
80 |
+
"nom": st.session_state.df['intervenants'][st.session_state.df['intervenants']['intervenant'] == st.session_state.intervenant]['Nom'].values[0],
|
81 |
+
"prenom": st.session_state.df['intervenants'][st.session_state.df['intervenants']['intervenant'] == st.session_state.intervenant]['Prénom'].values[0],
|
82 |
+
"employeur": st.session_state.df['intervenants'][st.session_state.df['intervenants']['intervenant'] == st.session_state.intervenant]['Employeur'].values[0],
|
83 |
+
"client": st.session_state.client,
|
84 |
+
"affaire": st.session_state.affaire,
|
85 |
+
"prestataire": st.session_state.prestataire,
|
86 |
+
"vehicule": st.session_state.vehicules,
|
87 |
+
"location": st.session_state.location,
|
88 |
+
"activities": st.session_state.activities_text_area,
|
89 |
+
|
90 |
+
"date": st.session_state.date_input,
|
91 |
+
"year": st.session_state.date_input.year,
|
92 |
+
"month": st.session_state.date_input.month,
|
93 |
+
"week": date_to_week_number(st.session_state.date_input),
|
94 |
+
"public_holyday": st.session_state.public_holyday,
|
95 |
+
"start_time": st.session_state.start_time_input,
|
96 |
+
"end_time": st.session_state.end_time_input,
|
97 |
+
|
98 |
+
"pause_time": st.session_state.pause_time_input,
|
99 |
+
"supplement_1": st.session_state.supplement1,
|
100 |
+
"supplement_2": st.session_state.supplement2,
|
101 |
+
"supplement_3": st.session_state.supplement3,
|
102 |
+
|
103 |
+
# "contract_hours": st.session_state.df['all'].contract_hours[st.session_state.indexes_all['contract_hours']],
|
104 |
+
# "maladie": st.session_state.total_hours if st.session_state.affaire.find('Absence Maladie Individuelle') != -1 or \
|
105 |
+
# st.session_state.affaire.find('Absence Maladie Professionnelle') != -1 \
|
106 |
+
# else 0,
|
107 |
+
"maladie": calculate_arrets() if st.session_state.affaire.find('Absence Maladie Individuelle') != -1 or \
|
108 |
+
st.session_state.affaire.find('Absence Maladie Professionnelle') != -1 \
|
109 |
+
else 0,
|
110 |
+
"arret_travail": calculate_arrets() if st.session_state.affaire.find('Absence Accident du Travail') != -1 else 0,
|
111 |
+
"conges_payes": calculate_arrets() if st.session_state.affaire.find('Absence Congés payés') != -1 else 0,
|
112 |
+
"conges_sans_solde": calculate_arrets() if st.session_state.affaire.find('Absence Congés sans solde') != -1 else 0,
|
113 |
+
"rtt": calculate_arrets() if st.session_state.affaire.find('Absence RTT') != -1 else 0,
|
114 |
+
"formation": calculate_arrets() if st.session_state.affaire.find('Absence Formation') != -1 else 0,
|
115 |
+
"evenement_familial": calculate_arrets() if st.session_state.affaire.find('Absence Evénement Familial') != -1 else 0,
|
116 |
+
|
117 |
+
|
118 |
+
"worked_hours": st.session_state.total_hours if st.session_state.total_hours else 0,
|
119 |
+
"night_hours": st.session_state.night_hours if st.session_state.night_hours else 0,
|
120 |
+
"contract_hours": st.session_state.contract_hours if st.session_state.contract_hours else 0,
|
121 |
+
"supp_contract_hours": st.session_state.supp_contract_hours if st.session_state.supp_contract_hours else 0,
|
122 |
+
"day_hours": st.session_state.total_hours - st.session_state.night_hours if st.session_state.total_hours and st.session_state.night_hours else 0,
|
123 |
+
"drive_hours": st.session_state.drive_hours if st.session_state.drive_hours else 0,
|
124 |
+
"saturday_hours": st.session_state.total_hours if date_to_week_day(st.session_state.date_input) == 'samedi' else 0,
|
125 |
+
"sunday_hours": st.session_state.total_hours if date_to_week_day(st.session_state.date_input) == 'dimanche' else 0,
|
126 |
+
"holyday_hours": st.session_state.total_hours if st.session_state.public_holyday else 0,
|
127 |
+
"overtime_25": st.session_state.overtime25 if st.session_state.overtime25 else 0,
|
128 |
+
"overtime_50": st.session_state.overtime50 if st.session_state.overtime50 else 0,
|
129 |
+
"overtime_100": st.session_state.overtime100 if st.session_state.overtime100 else 0,
|
130 |
+
|
131 |
+
"meal_nonus": st.session_state.meal_bonus if st.session_state.meal_bonus else 0,
|
132 |
+
"mileage_allowances_bonus": st.session_state.mileage_allowances_bonus if st.session_state.mileage_allowances_bonus else 0,
|
133 |
+
"personal_tools_bonus": st.session_state.personal_tools_bonus if st.session_state.personal_tools_bonus else 0,
|
134 |
+
"intervention_bonus": st.session_state.intervention_bonus if st.session_state.intervention_bonus else 0,
|
135 |
+
"on_call_bonus": st.session_state.on_call_bonus if st.session_state.on_call_bonus else 0,
|
136 |
+
"team_leader_bonus": st.session_state.team_leader_bonus if st.session_state.team_leader_bonus else 0,
|
137 |
+
# "other_bonus": st.session_state.electrical_bonus + st.session_state.painting_bonus + st.session_state.welding_bonus if st.session_state.electrical_bonus + st.session_state.painting_bonus + st.session_state.welding_bonus else 0,
|
138 |
+
"device_model": get_device_id(),
|
139 |
+
"device_ip": get_current_ip(),
|
140 |
+
"modified": dt.datetime.now(),
|
141 |
+
}
|
142 |
+
if 'completed_forms' not in st.session_state:
|
143 |
+
st.session_state['completed_forms'] = []
|
144 |
+
st.session_state.completed_forms.append(data_dict)
|
145 |
+
st.toast('Fiche de temps validée et enregistrée', icon="✅")
|
146 |
+
|
147 |
+
def reset(df):
|
148 |
+
st.session_state.df = df
|
149 |
+
st.session_state.indexes_all = {k: 0 for k in df['all'].columns}
|
150 |
+
st.session_state.indexes_affaire = {k: 0 for k in df['affaire'].columns}
|
151 |
+
st.session_state.indexes_intervenants = {k: 0 for k in df['intervenants'].columns}
|
152 |
+
st.session_state.indexes_vehicules = {k: 0 for k in df['vehicules'].columns}
|
153 |
+
|
154 |
+
|
155 |
+
st.session_state.intervenant = df['intervenants'].intervenant[0]
|
156 |
+
st.session_state.client = df['affaire'].client[0]
|
157 |
+
st.session_state.affaire = df['affaire'].affaire[0]
|
158 |
+
st.session_state.prestataire = df['affaire'].prestataire[0]
|
159 |
+
st.session_state.vehicules = df['vehicules'].vehicules[0]
|
160 |
+
|
161 |
+
st.session_state.activities_text_area = ""
|
162 |
+
|
163 |
+
st.session_state.date_input = dt.datetime.now()
|
164 |
+
st.session_state.public_holyday = False
|
165 |
+
st.session_state.start_time_input = dt.time(8, 00)
|
166 |
+
st.session_state.end_time_input = dt.time(17, 00)
|
167 |
+
|
168 |
+
st.session_state.pause_time_input = dt.time(0, 00)
|
169 |
+
|
170 |
+
|
171 |
+
st.session_state.supplement1 = df['supplements'].supplements[0]
|
172 |
+
st.session_state.supplement2 = df['supplements'].supplements[0]
|
173 |
+
st.session_state.supplement3 = df['supplements'].supplements[0]
|
174 |
+
st.toast('Nouvelle fiche de temps ', icon="🔃")
|
175 |
+
|
176 |
+
async def run_async_tasks():
|
177 |
+
await read_transform_write_excel(form=st.session_state.completed_forms[-1])
|
178 |
+
st.toast('Mise à jour de votre historique, veuillez patienter', icon="💾")
|
179 |
+
historic_df = await update_historical_week(dropbox_datapath='/SEC_IND_GTP2023_OUTPUT', form=st.session_state.completed_forms[-1])
|
180 |
+
st.toast('Historique mis à jour', icon="✅")
|
181 |
+
|
182 |
+
st.toast('Mise à jour du Tableau de société, veuillez patienter', icon="💾")
|
183 |
+
society_form = generate_society_fit(form=st.session_state.completed_forms[-1])
|
184 |
+
society_df = await update_society_fit(dropbox_datapath='/SEC_IND_GTP2023_OUTPUT', form=society_form)
|
185 |
+
st.toast('Tableau de société mis à jour', icon="✅")
|
186 |
+
|
187 |
+
st.toast('Mise à jour des Prestations croisées, veuillez patienter', icon="💾")
|
188 |
+
society_form = generate_society_payroll(form=st.session_state.completed_forms[-1])
|
189 |
+
society_df = await update_society_payroll(dropbox_datapath='/SEC_IND_GTP2023_OUTPUT', form=society_form)
|
190 |
+
st.toast('Prestations croisées mis à jour', icon="✅")
|
191 |
+
|
192 |
+
st.toast('Mise à jour du Tableau de paye, veuillez patienter', icon="💾")
|
193 |
+
payroll = generate_intervenant_monthly_payroll(form=historic_df)
|
194 |
+
await update_monthly_payroll(dropbox_datapath='/SEC_IND_GTP2023_OUTPUT', payroll_dict=payroll, year=st.session_state.completed_forms[-1]['year'] , month=st.session_state.completed_forms[-1]['month'], week=st.session_state.completed_forms[-1]['week'])
|
195 |
+
st.toast('Tableau de paye mis à jour', icon="✅")
|
196 |
+
st.toast('Fiche de temps dupliquée', icon="📄")
|
197 |
+
|
198 |
+
def validate_duplicate():
|
199 |
+
if st.session_state.intervenant != '-' and st.session_state.client != '-' and st.session_state.affaire != '-':
|
200 |
+
validate()
|
201 |
+
# print(st.session_state.completed_forms[-1])
|
202 |
+
if st.session_state.date_input < dt.datetime.now().date():
|
203 |
+
st.session_state.date_input += dt.timedelta(days=1)
|
204 |
+
|
205 |
+
asyncio.run(run_async_tasks())
|
206 |
+
|
207 |
+
else:
|
208 |
+
st.toast('Veuillez remplir les champs intervenant, client et affaire', icon="⚠️")
|
209 |
+
|
210 |
+
def validate_end():
|
211 |
+
if st.session_state.intervenant != '-' and st.session_state.client != '-' and st.session_state.affaire != '-':
|
212 |
+
validate()
|
213 |
+
|
214 |
+
asyncio.run(run_async_tasks())
|
215 |
+
|
216 |
+
st.session_state.completed_forms = []
|
217 |
+
st.toast('Vous pouvez fermer l\'application ', icon="👍")
|
218 |
+
else:
|
219 |
+
st.toast('Veuillez remplir les champs intervenant, client et affaire', icon="⚠️")
|
utils/device.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import platform
|
2 |
+
import socket
|
3 |
+
|
4 |
+
def get_current_ip():
|
5 |
+
hostname = socket.gethostname()
|
6 |
+
ip_address = socket.gethostbyname(hostname)
|
7 |
+
return ip_address
|
8 |
+
|
9 |
+
def get_device_id():
|
10 |
+
device_id = platform.node()
|
11 |
+
return device_id
|
utils/indemnites.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
import streamlit as st
|
3 |
+
|
4 |
+
from utils.times import date_to_week_day, date_to_week_number
|
5 |
+
|
6 |
+
def calculate_indemnites_km(indem_vehicles: List[str] = ['Perso']) -> (float, float):
|
7 |
+
location = st.session_state.location
|
8 |
+
vehicule = st.session_state.vehicules
|
9 |
+
prestataire = st.session_state.df['affaire'][st.session_state.df['affaire']['affaire'] == st.session_state.affaire]['prestataire'].values[0]
|
10 |
+
|
11 |
+
if st.session_state.intervenant == '-' \
|
12 |
+
or st.session_state.client == '-' \
|
13 |
+
or location == 'En Atelier' \
|
14 |
+
or not vehicule in indem_vehicles:
|
15 |
+
return 0, 0
|
16 |
+
else:
|
17 |
+
secteur = st.session_state.df['intervenants'][st.session_state.df['intervenants']['intervenant'] == st.session_state.intervenant]['secteur'].values[0]
|
18 |
+
distance = st.session_state.df['Clients'][(st.session_state.df['Clients']['Nom client'] == st.session_state.client) & (st.session_state.df['Clients']['Prestataire'] == prestataire)][f'Dist {secteur}'].values[0]
|
19 |
+
duration = st.session_state.df['Clients'][(st.session_state.df['Clients']['Nom client'] == st.session_state.client) & (st.session_state.df['Clients']['Prestataire'] == prestataire)][f'Duration {secteur}'].values[0]
|
20 |
+
|
21 |
+
if distance * 2 < 35:
|
22 |
+
return 0, 0
|
23 |
+
indemnites = float(distance) * 2 * 0.35
|
24 |
+
drive_hours = float(duration) * 2
|
25 |
+
indemnites = round(indemnites, 2)
|
26 |
+
hours, minutes = divmod(drive_hours, 60)
|
27 |
+
return indemnites, hours + (minutes / 60)
|
28 |
+
|
29 |
+
def calculate_astreinte():
|
30 |
+
public_holyday = st.session_state.public_holyday
|
31 |
+
intervenant = st.session_state.intervenant
|
32 |
+
year = st.session_state.date_input.year
|
33 |
+
month = st.session_state.date_input.month
|
34 |
+
week = date_to_week_number(st.session_state.date_input)
|
35 |
+
if 'fit' in st.session_state.keys() and intervenant in st.session_state.fit.keys() and year in st.session_state.fit[intervenant].keys() and month in st.session_state.fit[intervenant][year].keys() and week in st.session_state.fit[intervenant][year][month].keys():
|
36 |
+
totals = st.session_state.fit[intervenant][year][month][week]['totals']
|
37 |
+
if totals['worked_hours'] >= st.session_state.contract_hours and st.session_state.contract_hours != 0:
|
38 |
+
if public_holyday:
|
39 |
+
return 321.0
|
40 |
+
return 321.0
|
41 |
+
return 0.0
|
42 |
+
|
43 |
+
def calculate_overtimes():
|
44 |
+
overtime25, overtime50, overtime100 = 0., 0., 0.
|
45 |
+
intervenant = st.session_state.intervenant
|
46 |
+
year = st.session_state.date_input.year
|
47 |
+
month = st.session_state.date_input.month
|
48 |
+
week = date_to_week_number(st.session_state.date_input)
|
49 |
+
weekday = date_to_week_day(st.session_state.date_input)
|
50 |
+
hour_price = st.session_state.df['intervenants'][st.session_state.df['intervenants']['intervenant'] == st.session_state.intervenant]['cout horaire'].values[0]
|
51 |
+
if 'fit' in st.session_state.keys() and intervenant in st.session_state.fit.keys() and year in st.session_state.fit[intervenant].keys() and month in st.session_state.fit[intervenant][year].keys() and week in st.session_state.fit[intervenant][year][month].keys():
|
52 |
+
week_hours = st.session_state.fit[intervenant][year][month][week]['totals']['worked_hours']
|
53 |
+
if st.session_state.contract_hours != 0 and st.session_state.contract_hours < week_hours:
|
54 |
+
overtime25 = max(0, min(8, week_hours - 35))
|
55 |
+
if st.session_state.contract_hours != 0 and week_hours >= 43:
|
56 |
+
overtime50 = (week_hours - 43)
|
57 |
+
if weekday == 'samedi':
|
58 |
+
overtime50 = st.session_state.total_hours
|
59 |
+
if weekday == 'dimanche' or st.session_state.public_holyday:
|
60 |
+
overtime100 = st.session_state.total_hours
|
61 |
+
return overtime25, overtime50, overtime100
|
62 |
+
|
63 |
+
def calculate_arrets():
|
64 |
+
secteur = st.session_state.df['intervenants'][st.session_state.df['intervenants']['intervenant'] == st.session_state.intervenant]['secteur'].values[0]
|
65 |
+
mask = st.session_state.df['absences'].applymap(lambda x: isinstance(x, str) and st.session_state.affaire.find(x) != -1)
|
66 |
+
mask = st.session_state.df['absences']['Libellé GTP2023'].apply(lambda x: st.session_state.affaire.find(x) != -1)
|
67 |
+
hr_code = st.session_state.df['absences'][mask]['Code_RH'].values[0]
|
68 |
+
contract_hours = st.session_state.contract_hours
|
69 |
+
if not contract_hours in [35., 37., 37.5, 39.]:
|
70 |
+
return 0
|
71 |
+
weekday = date_to_week_day(st.session_state.date_input)
|
72 |
+
weekday = 'JS_' if weekday != 'vendredi' else 'V_'
|
73 |
+
weekday = weekday if secteur == 'STE' else ''
|
74 |
+
|
75 |
+
am_values = st.session_state.df['arrets'][st.session_state.df['arrets']['code_absence'] == f'{hr_code}1_{weekday}{secteur}'][f'H-{float(contract_hours)}'].values
|
76 |
+
return am_values[0] if len(am_values) > 0 else 0
|
77 |
+
|
78 |
+
|
utils/times.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import datetime, timedelta, time
|
2 |
+
import locale
|
3 |
+
from babel.dates import format_date, get_day_names
|
4 |
+
|
5 |
+
locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')
|
6 |
+
|
7 |
+
def calculate_night_time(start_time: time, end_time: time, pause_time: time, date: datetime) -> float:
|
8 |
+
start_datetime = datetime.combine(date, start_time)
|
9 |
+
end_datetime = datetime.combine(date, end_time)
|
10 |
+
|
11 |
+
night_start = datetime.combine(date, datetime.strptime("21:00:00", "%H:%M:%S").time())
|
12 |
+
night_end = datetime.combine(date, datetime.strptime("06:00:00", "%H:%M:%S").time())
|
13 |
+
night_worktime = timedelta()
|
14 |
+
|
15 |
+
if end_datetime < start_datetime :
|
16 |
+
end_datetime += timedelta(days=1)
|
17 |
+
night_end += timedelta(days=1)
|
18 |
+
night_worktime = min(end_datetime, night_end) - max(start_datetime, night_start)
|
19 |
+
else:
|
20 |
+
if end_datetime >= night_start:
|
21 |
+
night_worktime = max(end_datetime, night_end ) - max(start_datetime, night_start)
|
22 |
+
|
23 |
+
else:
|
24 |
+
night_worktime = min(end_datetime, night_end) - min(start_datetime, night_start)
|
25 |
+
|
26 |
+
night_worktime = max(night_worktime, timedelta())
|
27 |
+
pause_delta = timedelta(hours=pause_time.hour, minutes=pause_time.minute)
|
28 |
+
night_worktime -= pause_delta if night_worktime > pause_delta else night_worktime
|
29 |
+
|
30 |
+
total_seconds = night_worktime.total_seconds()
|
31 |
+
hours = float(total_seconds / 3600)
|
32 |
+
|
33 |
+
return hours
|
34 |
+
|
35 |
+
def calculate_work_time(start_time: time, end_time: time, pause_time: time, date: datetime) -> float:
|
36 |
+
start_datetime = datetime.combine(date, start_time)
|
37 |
+
end_datetime = datetime.combine(date, end_time)
|
38 |
+
|
39 |
+
if end_datetime < start_datetime:
|
40 |
+
end_datetime += timedelta(days=1)
|
41 |
+
|
42 |
+
worktime = end_datetime - start_datetime
|
43 |
+
pause_delta = timedelta(hours=pause_time.hour, minutes=pause_time.minute)
|
44 |
+
worktime -= pause_delta if worktime > pause_delta else worktime
|
45 |
+
|
46 |
+
total_seconds = worktime.total_seconds()
|
47 |
+
hours = float(total_seconds / 3600)
|
48 |
+
return hours
|
49 |
+
|
50 |
+
def date_to_week_number(date: datetime) -> int:
|
51 |
+
return date.isocalendar()[1]
|
52 |
+
|
53 |
+
def date_to_week_day(date: datetime) -> int:
|
54 |
+
# weekday_english = date.strftime("%A")
|
55 |
+
weekday_french = format_date(date, 'EEEE', locale='fr')
|
56 |
+
return weekday_french
|