File size: 6,217 Bytes
cdf2807
41a9969
cdf2807
bc36177
 
cdf2807
41a9969
aa3525b
e22be08
2cc4cc3
 
 
 
 
a3de3da
2cc4cc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cdf2807
2cc4cc3
 
 
 
 
 
 
 
40ca158
2cc4cc3
 
cdf2807
2cc4cc3
 
 
 
 
 
 
 
 
aa3525b
2cc4cc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aa3525b
2cc4cc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af26c90
2cc4cc3
 
 
bc36177
2cc4cc3
 
e27b63a
2cc4cc3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41a9969
2cc4cc3
 
 
 
 
 
 
b42ec56
 
 
 
 
 
2cc4cc3
b42ec56
 
 
 
 
 
 
 
 
 
 
 
2cc4cc3
fa525d3
2cc4cc3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import pandas as pd
import openpyxl  # ensure XLSX engine is available
import gradio as gr
import uuid
import os

# 7-Day Schedule Converter with explicit iteration and header fill and download support

def convert_schedule(file_path, direction):
    try:
        # 1. Load raw header rows to determine day labels
        raw = pd.read_excel(file_path, header=None)
        header1 = raw.iloc[0, 1:].astype(object)
        header2 = raw.iloc[1, 1:].astype(object)

        # Decide which header row to use: prefer second if fully populated
        if header2.notna().all() and not header2.str.startswith('Unnamed').any():
            days = header2.tolist()
            data_start = 2
        else:
            # Forward-fill merged first-row headers
            days = []
            last = None
            for val in header1:
                if pd.isna(val) or str(val).startswith('Unnamed'):
                    days.append(last)
                else:
                    last = str(val)
                    days.append(last)
            data_start = 1

        # 2. Load actual data using resolved day columns
        df = pd.read_excel(
            file_path,
            header=data_start,
            index_col=0,
            usecols=[0] + list(range(1, len(days) + 1))
        )
        df.columns = [str(day) for day in days]

        # 3. Retain original day column order
        day_cols = list(df.columns)

        # 4. Build assignment mapping via explicit iteration
        assignments = {}
        if direction == 'A to B':
            # Models in rows → Texters as rows
            for model in df.index.astype(str):
                for day in day_cols:
                    cell = df.at[model, day]
                    # Handle NaN values properly
                    if pd.isna(cell):
                        continue
                    for texter in str(cell).split(','):
                        texter = texter.strip()
                        if not texter or texter.lower() in ['nan', 'none', '']:
                            continue
                        assignments.setdefault(texter, {d: [] for d in day_cols})
                        assignments[texter][day].append(model)
            
            if not assignments:  # Handle empty assignments
                result = pd.DataFrame(columns=day_cols)
                first_col_name = 'Texter'
            else:
                index = sorted(assignments.keys())
                result = pd.DataFrame(index=index, columns=day_cols)
                first_col_name = 'Texter'
                for texter, days_map in assignments.items():
                    for day in day_cols:
                        models = days_map.get(day, [])
                        result.at[texter, day] = ', '.join(models) if models else 'OFF'
        else:
            # Texters in rows → Models as rows
            for texter in df.index.astype(str):
                for day in day_cols:
                    cell = df.at[texter, day]
                    # Handle NaN values properly
                    if pd.isna(cell):
                        continue
                    for model in str(cell).split(','):
                        model = model.strip()
                        if not model or model.lower() in ['nan', 'none', '']:
                            continue
                        assignments.setdefault(model, {d: [] for d in day_cols})
                        assignments[model][day].append(texter)
            
            if not assignments:  # Handle empty assignments
                result = pd.DataFrame(columns=day_cols)
                first_col_name = 'Model'
            else:
                index = sorted(assignments.keys())
                result = pd.DataFrame(index=index, columns=day_cols)
                first_col_name = 'Model'
                for model, days_map in assignments.items():
                    for day in day_cols:
                        texters = days_map.get(day, [])
                        result.at[model, day] = ', '.join(texters) if texters else 'OFF'

        # 5. Cleanup axis names
        result.index.name = None
        result.columns.name = None

        # For display, include index as a column
        display_df = result.reset_index().rename(columns={'index': first_col_name})

        # 6. Save to Excel for download with better error handling
        output_filename = f"converted_{uuid.uuid4().hex}.xlsx"
        
        # Clean the DataFrame before saving
        result_clean = result.copy()
        
        # Replace any problematic values
        result_clean = result_clean.fillna('OFF')  # Fill NaN with 'OFF'
        
        # Ensure all values are strings to avoid Excel compatibility issues
        for col in result_clean.columns:
            result_clean[col] = result_clean[col].astype(str)
        
        # Save with additional parameters for better compatibility
        with pd.ExcelWriter(output_filename, engine='openpyxl', mode='w') as writer:
            result_clean.to_excel(writer, sheet_name='Schedule', index=True, header=True)

        # Return both DataFrame and download path
        return display_df, output_filename

    except Exception as e:
        # Return error information
        error_df = pd.DataFrame({'Error': [f"Error processing file: {str(e)}"]})
        return error_df, None

# Build Gradio interface and launch immediately for Hugging Face Spaces
iface = gr.Interface(
    fn=convert_schedule,
    inputs=[
        gr.File(label='Upload Weekly Schedule (.xlsx)', file_count='single', type='filepath'),
        gr.Radio(['A to B', 'B to A'], label='Convert Direction', value='A to B')  # Added default value
    ],
    outputs=[
        gr.Dataframe(label='Converted Schedule'),
        gr.File(label='Download Converted Excel')
    ],
    title='7-Day Schedule Converter',
    description=(
        'Upload a 7-column weekly schedule (Models vs Days) with merged or single headers, '
        'then flip between Models→Texters or Texters→Models. Download the result as .xlsx.'
    ),
    flagging_mode='never'
)

# Launch on 0.0.0.0:7860 for Spaces
if __name__ == "__main__":
    iface.launch(server_name='0.0.0.0', server_port=7860)