HaLim
commited on
Commit
Β·
46e0ea9
1
Parent(s):
89d7197
add streamlit and clean the optimizer
Browse files- Home.py +233 -0
- STREAMLIT_README.md +161 -0
- pages/1_π_Dataset_Metadata.py +669 -0
- pages/2_π―_Optimization.py +633 -0
- requirements.txt +17 -15
- run_streamlit.py +33 -0
- src/config/optimization_config.py +6 -0
- src/models/optimizer_real.py +126 -39
- streamlit_app.py +517 -0
- streamlit_app_old.py +517 -0
Home.py
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
|
| 3 |
+
# Page configuration - MUST be first Streamlit command
|
| 4 |
+
st.set_page_config(
|
| 5 |
+
page_title="SD Roster Tool - Home",
|
| 6 |
+
page_icon="π ",
|
| 7 |
+
layout="wide",
|
| 8 |
+
initial_sidebar_state="expanded"
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
# Now import everything else
|
| 12 |
+
import pandas as pd
|
| 13 |
+
import sys
|
| 14 |
+
import os
|
| 15 |
+
from datetime import datetime
|
| 16 |
+
|
| 17 |
+
# Add src to path for imports
|
| 18 |
+
sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
|
| 19 |
+
|
| 20 |
+
# Custom CSS for better styling
|
| 21 |
+
st.markdown("""
|
| 22 |
+
<style>
|
| 23 |
+
.main-header {
|
| 24 |
+
font-size: 3rem;
|
| 25 |
+
font-weight: bold;
|
| 26 |
+
color: #1f77b4;
|
| 27 |
+
margin-bottom: 2rem;
|
| 28 |
+
text-align: center;
|
| 29 |
+
}
|
| 30 |
+
.section-header {
|
| 31 |
+
font-size: 1.8rem;
|
| 32 |
+
font-weight: bold;
|
| 33 |
+
color: #2c3e50;
|
| 34 |
+
margin: 1.5rem 0;
|
| 35 |
+
}
|
| 36 |
+
.feature-card {
|
| 37 |
+
background-color: #ffffff;
|
| 38 |
+
padding: 1.5rem;
|
| 39 |
+
border-radius: 0.8rem;
|
| 40 |
+
border-left: 5px solid #1f77b4;
|
| 41 |
+
margin-bottom: 1.5rem;
|
| 42 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.15);
|
| 43 |
+
color: #2c3e50;
|
| 44 |
+
border: 1px solid #e9ecef;
|
| 45 |
+
}
|
| 46 |
+
.feature-card h3 {
|
| 47 |
+
color: #1f77b4;
|
| 48 |
+
margin-top: 0;
|
| 49 |
+
}
|
| 50 |
+
.feature-card p {
|
| 51 |
+
color: #2c3e50;
|
| 52 |
+
}
|
| 53 |
+
.feature-card ul {
|
| 54 |
+
color: #2c3e50;
|
| 55 |
+
}
|
| 56 |
+
.navigation-button {
|
| 57 |
+
width: 100%;
|
| 58 |
+
height: 80px;
|
| 59 |
+
font-size: 1.2rem;
|
| 60 |
+
margin: 10px 0;
|
| 61 |
+
}
|
| 62 |
+
</style>
|
| 63 |
+
""", unsafe_allow_html=True)
|
| 64 |
+
|
| 65 |
+
# Initialize session state for shared variables
|
| 66 |
+
if 'data_path' not in st.session_state:
|
| 67 |
+
st.session_state.data_path = "data/my_roster_data"
|
| 68 |
+
if 'target_date' not in st.session_state:
|
| 69 |
+
st.session_state.target_date = ""
|
| 70 |
+
|
| 71 |
+
# Title
|
| 72 |
+
st.markdown('<h1 class="main-header">π SD Roster Optimization Tool</h1>', unsafe_allow_html=True)
|
| 73 |
+
|
| 74 |
+
# Introduction section
|
| 75 |
+
col1, col2 = st.columns([2, 1])
|
| 76 |
+
|
| 77 |
+
with col1:
|
| 78 |
+
st.markdown("""
|
| 79 |
+
## π Welcome to the Supply Chain Roster Optimization Tool
|
| 80 |
+
|
| 81 |
+
This comprehensive tool helps you optimize workforce allocation and production scheduling
|
| 82 |
+
using advanced mathematical optimization techniques. Navigate through the different sections
|
| 83 |
+
to analyze your data and run optimizations.
|
| 84 |
+
|
| 85 |
+
### π§ Key Features:
|
| 86 |
+
- **Advanced Optimization Engine**: Built on Google OR-Tools for mixed-integer programming
|
| 87 |
+
- **Multi-constraint Support**: Handle complex business rules and staffing requirements
|
| 88 |
+
- **Real-time Data Integration**: Work with your existing CSV data files
|
| 89 |
+
- **Interactive Visualizations**: Rich charts and analytics for decision making
|
| 90 |
+
- **Flexible Configuration**: Adjust parameters for different business scenarios
|
| 91 |
+
""")
|
| 92 |
+
|
| 93 |
+
with col2:
|
| 94 |
+
st.markdown("### π Quick Start")
|
| 95 |
+
|
| 96 |
+
# Navigation buttons
|
| 97 |
+
if st.button("π View Dataset Metadata", key="nav_metadata", help="Explore your data overview"):
|
| 98 |
+
st.switch_page("pages/1_π_Dataset_Metadata.py")
|
| 99 |
+
|
| 100 |
+
if st.button("π― Run Optimization", key="nav_optimization", help="Configure and run optimization"):
|
| 101 |
+
st.switch_page("pages/2_π―_Optimization.py")
|
| 102 |
+
|
| 103 |
+
# Global settings section
|
| 104 |
+
st.markdown("---")
|
| 105 |
+
st.markdown('<h2 class="section-header">π Global Settings</h2>', unsafe_allow_html=True)
|
| 106 |
+
|
| 107 |
+
col_set1, col_set2 = st.columns(2)
|
| 108 |
+
|
| 109 |
+
with col_set1:
|
| 110 |
+
st.markdown("### π Data Configuration")
|
| 111 |
+
new_data_path = st.text_input(
|
| 112 |
+
"Data Path",
|
| 113 |
+
value=st.session_state.data_path,
|
| 114 |
+
help="Path to your CSV data files. This setting is shared across all pages."
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
if new_data_path != st.session_state.data_path:
|
| 118 |
+
st.session_state.data_path = new_data_path
|
| 119 |
+
st.success("β
Data path updated globally!")
|
| 120 |
+
|
| 121 |
+
st.info(f"**Current data path:** `{st.session_state.data_path}`")
|
| 122 |
+
|
| 123 |
+
with col_set2:
|
| 124 |
+
st.markdown("### π
Date Configuration")
|
| 125 |
+
|
| 126 |
+
# Try to load available dates
|
| 127 |
+
try:
|
| 128 |
+
sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
|
| 129 |
+
import src.etl.transform as transform
|
| 130 |
+
|
| 131 |
+
date_ranges = transform.get_date_ranges()
|
| 132 |
+
if date_ranges:
|
| 133 |
+
date_range_options = [""] + [f"{start.strftime('%Y-%m-%d')} to {end.strftime('%Y-%m-%d')}" for start, end in date_ranges]
|
| 134 |
+
selected_range_str = st.selectbox(
|
| 135 |
+
"Available Date Ranges:",
|
| 136 |
+
options=date_range_options,
|
| 137 |
+
help="Select from available date ranges in your data"
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
if selected_range_str:
|
| 141 |
+
selected_index = date_range_options.index(selected_range_str) - 1
|
| 142 |
+
start_date, end_date = date_ranges[selected_index]
|
| 143 |
+
st.session_state.date_range = (start_date, end_date)
|
| 144 |
+
st.success(f"β
Selected: {start_date} to {end_date}")
|
| 145 |
+
|
| 146 |
+
except Exception as e:
|
| 147 |
+
st.warning(f"Could not load date ranges: {e}")
|
| 148 |
+
st.info("Date ranges will be available when data is properly configured.")
|
| 149 |
+
|
| 150 |
+
# Overview sections
|
| 151 |
+
st.markdown("---")
|
| 152 |
+
|
| 153 |
+
col_info1, col_info2, col_info3 = st.columns(3)
|
| 154 |
+
|
| 155 |
+
with col_info1:
|
| 156 |
+
st.markdown("""
|
| 157 |
+
<div class="feature-card">
|
| 158 |
+
<h3>π Dataset Metadata</h3>
|
| 159 |
+
<p>Comprehensive overview of your data including:</p>
|
| 160 |
+
<ul>
|
| 161 |
+
<li>Demand analysis and forecasting</li>
|
| 162 |
+
<li>Employee availability and costs</li>
|
| 163 |
+
<li>Production line capacities</li>
|
| 164 |
+
<li>Historical performance data</li>
|
| 165 |
+
</ul>
|
| 166 |
+
</div>
|
| 167 |
+
""", unsafe_allow_html=True)
|
| 168 |
+
|
| 169 |
+
with col_info2:
|
| 170 |
+
st.markdown("""
|
| 171 |
+
<div class="feature-card">
|
| 172 |
+
<h3>π― Optimization Engine</h3>
|
| 173 |
+
<p>Advanced optimization features:</p>
|
| 174 |
+
<ul>
|
| 175 |
+
<li>Multi-objective optimization</li>
|
| 176 |
+
<li>Constraint satisfaction</li>
|
| 177 |
+
<li>Scenario analysis</li>
|
| 178 |
+
<li>Cost minimization</li>
|
| 179 |
+
</ul>
|
| 180 |
+
</div>
|
| 181 |
+
""", unsafe_allow_html=True)
|
| 182 |
+
|
| 183 |
+
with col_info3:
|
| 184 |
+
st.markdown("""
|
| 185 |
+
<div class="feature-card">
|
| 186 |
+
<h3>π Analytics & Reports</h3>
|
| 187 |
+
<p>Rich visualization and reporting:</p>
|
| 188 |
+
<ul>
|
| 189 |
+
<li>Interactive dashboards</li>
|
| 190 |
+
<li>Cost analysis charts</li>
|
| 191 |
+
<li>Performance metrics</li>
|
| 192 |
+
<li>Export capabilities</li>
|
| 193 |
+
</ul>
|
| 194 |
+
</div>
|
| 195 |
+
""", unsafe_allow_html=True)
|
| 196 |
+
|
| 197 |
+
# System status
|
| 198 |
+
st.markdown("---")
|
| 199 |
+
st.markdown("### π System Status")
|
| 200 |
+
|
| 201 |
+
col_status1, col_status2, col_status3, col_status4 = st.columns(4)
|
| 202 |
+
|
| 203 |
+
# Check system components
|
| 204 |
+
try:
|
| 205 |
+
import ortools
|
| 206 |
+
ortools_status = "β
Available"
|
| 207 |
+
except:
|
| 208 |
+
ortools_status = "β Not installed"
|
| 209 |
+
|
| 210 |
+
try:
|
| 211 |
+
import plotly
|
| 212 |
+
plotly_status = "β
Available"
|
| 213 |
+
except:
|
| 214 |
+
plotly_status = "β Not installed"
|
| 215 |
+
|
| 216 |
+
data_status = "β
Configured" if os.path.exists(st.session_state.data_path) else "β οΈ Path not found"
|
| 217 |
+
|
| 218 |
+
with col_status1:
|
| 219 |
+
st.metric("OR-Tools", ortools_status)
|
| 220 |
+
with col_status2:
|
| 221 |
+
st.metric("Plotly", plotly_status)
|
| 222 |
+
with col_status3:
|
| 223 |
+
st.metric("Data Path", data_status)
|
| 224 |
+
with col_status4:
|
| 225 |
+
st.metric("Session State", "β
Active")
|
| 226 |
+
|
| 227 |
+
# Footer
|
| 228 |
+
st.markdown("---")
|
| 229 |
+
st.markdown("""
|
| 230 |
+
<div style='text-align: center; color: gray; padding: 2rem;'>
|
| 231 |
+
<small>SD Roster Optimization Tool | Built with Streamlit & OR-Tools | Version 1.0</small>
|
| 232 |
+
</div>
|
| 233 |
+
""", unsafe_allow_html=True)
|
STREAMLIT_README.md
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SD Roster Optimization Tool - Multi-Page Streamlit Interface
|
| 2 |
+
|
| 3 |
+
A comprehensive multi-page Streamlit web application for supply chain roster optimization using OR-Tools.
|
| 4 |
+
|
| 5 |
+
## ποΈ Application Structure
|
| 6 |
+
|
| 7 |
+
### π Home Page
|
| 8 |
+
- **Welcome Dashboard**: Overview and navigation hub
|
| 9 |
+
- **Global Settings**: Shared data path and date configuration
|
| 10 |
+
- **System Status**: Component availability and health checks
|
| 11 |
+
- **Quick Navigation**: Direct links to main functionalities
|
| 12 |
+
|
| 13 |
+
### π Dataset Metadata Page
|
| 14 |
+
Comprehensive data analysis across five detailed tabs:
|
| 15 |
+
|
| 16 |
+
#### π Data Overview Tab
|
| 17 |
+
- **Key Metrics**: Orders, quantities, products, employees, production lines
|
| 18 |
+
- **Data Quality Analysis**: Completeness scores and missing data indicators
|
| 19 |
+
- **Data Freshness**: Latest data timestamps and age indicators
|
| 20 |
+
|
| 21 |
+
#### π¦ Demand Analysis Tab
|
| 22 |
+
- **Demand Metrics**: Total, average, max, min order sizes
|
| 23 |
+
- **Top Products**: Ranking by demand volume with visualizations
|
| 24 |
+
- **Daily Patterns**: Trend analysis and demand variability
|
| 25 |
+
- **Distribution Analysis**: Order quantity and frequency distributions
|
| 26 |
+
|
| 27 |
+
#### π₯ Workforce Analysis Tab
|
| 28 |
+
- **Employee Metrics**: Total staff, types, distribution
|
| 29 |
+
- **Cost Structure**: Hourly rates by employee type and shift
|
| 30 |
+
- **Productivity Analysis**: Performance metrics by employee type
|
| 31 |
+
|
| 32 |
+
#### π Production Capacity Tab
|
| 33 |
+
- **Line Metrics**: Total lines, types, maximum capacities
|
| 34 |
+
- **Capacity Distribution**: Line allocation and utilization potential
|
| 35 |
+
- **Theoretical Analysis**: Maximum capacity calculations by shift
|
| 36 |
+
|
| 37 |
+
#### π° Cost Analysis Tab
|
| 38 |
+
- **Cost Structure**: Min/max/average hourly rates and ranges
|
| 39 |
+
- **Budget Planning**: Minimum and maximum cost scenarios
|
| 40 |
+
- **Projections**: Weekly and monthly cost estimates
|
| 41 |
+
|
| 42 |
+
### π― Optimization Page
|
| 43 |
+
Advanced optimization interface with comprehensive results:
|
| 44 |
+
|
| 45 |
+
#### π Summary Tab
|
| 46 |
+
- Total optimization cost and key metrics
|
| 47 |
+
- Cost efficiency analysis (cost per day, cost per unit)
|
| 48 |
+
- Optimization parameters used
|
| 49 |
+
|
| 50 |
+
#### π Production Tab
|
| 51 |
+
- Production vs. demand comparison by product
|
| 52 |
+
- Fulfillment rate analysis with interactive charts
|
| 53 |
+
- Production schedule visualization
|
| 54 |
+
|
| 55 |
+
#### π· Labor Tab
|
| 56 |
+
- Labor allocation by employee type and shift
|
| 57 |
+
- Required headcount analysis
|
| 58 |
+
- Daily and average staffing requirements
|
| 59 |
+
|
| 60 |
+
#### π° Costs Tab
|
| 61 |
+
- Detailed cost breakdown by employee type and shift
|
| 62 |
+
- Cost distribution visualizations
|
| 63 |
+
- Priority mode analysis (when applicable)
|
| 64 |
+
|
| 65 |
+
## Quick Start
|
| 66 |
+
|
| 67 |
+
### 1. Install Dependencies
|
| 68 |
+
```bash
|
| 69 |
+
pip install -r requirements.txt
|
| 70 |
+
```
|
| 71 |
+
|
| 72 |
+
### 2. Run the Application
|
| 73 |
+
```bash
|
| 74 |
+
# Option 1: Using the runner script
|
| 75 |
+
python run_streamlit.py
|
| 76 |
+
|
| 77 |
+
# Option 2: Direct streamlit command
|
| 78 |
+
streamlit run Home.py
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
### 3. Access the Application
|
| 82 |
+
Open your browser to: `http://localhost:8501`
|
| 83 |
+
|
| 84 |
+
## Usage Guide
|
| 85 |
+
|
| 86 |
+
### Navigation Flow
|
| 87 |
+
1. **Start at Home**: Configure global settings and navigate to specific functions
|
| 88 |
+
2. **Explore Metadata**: Analyze your data across the comprehensive metadata tabs
|
| 89 |
+
3. **Run Optimization**: Configure parameters and execute optimization on the dedicated page
|
| 90 |
+
4. **Analyze Results**: Review detailed results across multiple result tabs
|
| 91 |
+
|
| 92 |
+
### Page-by-Page Guide
|
| 93 |
+
1. **Home Page**: Set data paths, select date ranges, check system status
|
| 94 |
+
2. **Dataset Metadata**: Deep dive into demand, workforce, capacity, and cost analysis
|
| 95 |
+
3. **Optimization**: Configure optimization parameters, run optimization, analyze results
|
| 96 |
+
|
| 97 |
+
## Technical Details
|
| 98 |
+
|
| 99 |
+
### Optimization Engine
|
| 100 |
+
- Built on Google OR-Tools for mixed-integer programming
|
| 101 |
+
- Supports multiple constraint modes for realistic business scenarios
|
| 102 |
+
- Handles complex multi-product, multi-shift, multi-line scheduling
|
| 103 |
+
|
| 104 |
+
### Data Sources
|
| 105 |
+
The application automatically loads data from:
|
| 106 |
+
- `COOIS_Released_Prod_Orders.csv` - Production orders and demand
|
| 107 |
+
- Employee data files - Staff availability and costs
|
| 108 |
+
- Production line configuration - Line capacities and capabilities
|
| 109 |
+
|
| 110 |
+
### Configuration
|
| 111 |
+
Key optimization parameters can be adjusted in `src/config/optimization_config.py`:
|
| 112 |
+
- Employee types and costs
|
| 113 |
+
- Shift definitions and durations
|
| 114 |
+
- Production line capacities
|
| 115 |
+
- Constraint modes and business rules
|
| 116 |
+
|
| 117 |
+
## Business Scenarios
|
| 118 |
+
|
| 119 |
+
### Priority Mode (Recommended)
|
| 120 |
+
- Uses UNICEF Fixed term staff first
|
| 121 |
+
- Engages Humanizer staff only when fixed staff at capacity
|
| 122 |
+
- Reflects realistic business operations
|
| 123 |
+
|
| 124 |
+
### Mandatory Mode
|
| 125 |
+
- Forces all fixed staff to work full hours
|
| 126 |
+
- More expensive but ensures full utilization
|
| 127 |
+
- Useful for guaranteed staffing scenarios
|
| 128 |
+
|
| 129 |
+
### Demand-Driven Mode
|
| 130 |
+
- Purely cost-optimized scheduling
|
| 131 |
+
- No mandatory fixed hours
|
| 132 |
+
- Most cost-efficient but may underutilize staff
|
| 133 |
+
|
| 134 |
+
## Troubleshooting
|
| 135 |
+
|
| 136 |
+
### Common Issues
|
| 137 |
+
1. **No Date Ranges Available**: Ensure your data files are in the correct location
|
| 138 |
+
2. **Optimization Fails**: Check that demand data exists for the selected date range
|
| 139 |
+
3. **Import Errors**: Verify all dependencies are installed
|
| 140 |
+
|
| 141 |
+
### Performance Tips
|
| 142 |
+
- Smaller date ranges optimize faster
|
| 143 |
+
- Reducing product count can improve solve time
|
| 144 |
+
- Priority mode typically solves faster than mandatory mode
|
| 145 |
+
|
| 146 |
+
## File Structure
|
| 147 |
+
```
|
| 148 |
+
Home.py # Main home page (entry point)
|
| 149 |
+
pages/
|
| 150 |
+
βββ 1_π_Dataset_Metadata.py # Comprehensive data analysis page
|
| 151 |
+
βββ 2_π―_Optimization.py # Optimization interface and results
|
| 152 |
+
run_streamlit.py # Convenient runner script
|
| 153 |
+
src/
|
| 154 |
+
βββ models/optimizer_real.py # Core optimization engine
|
| 155 |
+
βββ config/optimization_config.py # Configuration parameters
|
| 156 |
+
βββ etl/ # Data extraction and transformation
|
| 157 |
+
streamlit_app_old.py # Backup of original single-page app
|
| 158 |
+
```
|
| 159 |
+
|
| 160 |
+
## Support
|
| 161 |
+
For technical issues or feature requests, refer to the main project documentation or contact the development team.
|
pages/1_π_Dataset_Metadata.py
ADDED
|
@@ -0,0 +1,669 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
|
| 3 |
+
# Page configuration
|
| 4 |
+
st.set_page_config(
|
| 5 |
+
page_title="Dataset Metadata",
|
| 6 |
+
page_icon="π",
|
| 7 |
+
layout="wide"
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
# Import libraries
|
| 11 |
+
import pandas as pd
|
| 12 |
+
import plotly.express as px
|
| 13 |
+
import plotly.graph_objects as go
|
| 14 |
+
from plotly.subplots import make_subplots
|
| 15 |
+
import sys
|
| 16 |
+
import os
|
| 17 |
+
from datetime import datetime, timedelta
|
| 18 |
+
import numpy as np
|
| 19 |
+
|
| 20 |
+
# Add src to path for imports
|
| 21 |
+
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'src'))
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
import src.etl.extract as extract
|
| 25 |
+
import src.etl.transform as transform
|
| 26 |
+
from src.config import optimization_config
|
| 27 |
+
except ImportError as e:
|
| 28 |
+
st.error(f"Error importing modules: {e}")
|
| 29 |
+
st.stop()
|
| 30 |
+
|
| 31 |
+
# Custom CSS
|
| 32 |
+
st.markdown("""
|
| 33 |
+
<style>
|
| 34 |
+
.main-header {
|
| 35 |
+
font-size: 2.5rem;
|
| 36 |
+
font-weight: bold;
|
| 37 |
+
color: #1f77b4;
|
| 38 |
+
margin-bottom: 1rem;
|
| 39 |
+
}
|
| 40 |
+
.section-header {
|
| 41 |
+
font-size: 1.5rem;
|
| 42 |
+
font-weight: bold;
|
| 43 |
+
color: #2c3e50;
|
| 44 |
+
margin: 1rem 0;
|
| 45 |
+
}
|
| 46 |
+
.metric-card {
|
| 47 |
+
background-color: #f8f9fa;
|
| 48 |
+
padding: 1rem;
|
| 49 |
+
border-radius: 0.5rem;
|
| 50 |
+
border-left: 4px solid #1f77b4;
|
| 51 |
+
margin-bottom: 1rem;
|
| 52 |
+
}
|
| 53 |
+
.info-box {
|
| 54 |
+
background-color: #e7f3ff;
|
| 55 |
+
padding: 1rem;
|
| 56 |
+
border-radius: 0.5rem;
|
| 57 |
+
border-left: 4px solid #0066cc;
|
| 58 |
+
margin: 1rem 0;
|
| 59 |
+
}
|
| 60 |
+
</style>
|
| 61 |
+
""", unsafe_allow_html=True)
|
| 62 |
+
|
| 63 |
+
# Title
|
| 64 |
+
st.markdown('<h1 class="main-header">π Dataset Metadata Overview</h1>', unsafe_allow_html=True)
|
| 65 |
+
|
| 66 |
+
# Check if data path is available from session state
|
| 67 |
+
if 'data_path' not in st.session_state:
|
| 68 |
+
st.session_state.data_path = "data/my_roster_data"
|
| 69 |
+
|
| 70 |
+
if 'date_range' not in st.session_state:
|
| 71 |
+
st.session_state.date_range = None
|
| 72 |
+
|
| 73 |
+
# Sidebar for date selection
|
| 74 |
+
with st.sidebar:
|
| 75 |
+
st.markdown("## π
Date Selection")
|
| 76 |
+
|
| 77 |
+
try:
|
| 78 |
+
date_ranges = transform.get_date_ranges()
|
| 79 |
+
if date_ranges:
|
| 80 |
+
date_range_options = [f"{start.strftime('%Y-%m-%d')} to {end.strftime('%Y-%m-%d')}" for start, end in date_ranges]
|
| 81 |
+
selected_range_str = st.selectbox(
|
| 82 |
+
"Select date range:",
|
| 83 |
+
options=date_range_options,
|
| 84 |
+
help="Available date ranges from released orders"
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
selected_index = date_range_options.index(selected_range_str)
|
| 88 |
+
start_date, end_date = date_ranges[selected_index]
|
| 89 |
+
st.session_state.date_range = (start_date, end_date)
|
| 90 |
+
|
| 91 |
+
duration = (end_date - start_date).days + 1
|
| 92 |
+
st.info(f"Duration: {duration} days")
|
| 93 |
+
|
| 94 |
+
else:
|
| 95 |
+
st.warning("No date ranges found")
|
| 96 |
+
start_date = datetime(2025, 3, 24).date()
|
| 97 |
+
end_date = datetime(2025, 3, 28).date()
|
| 98 |
+
st.session_state.date_range = (start_date, end_date)
|
| 99 |
+
|
| 100 |
+
except Exception as e:
|
| 101 |
+
st.error(f"Error loading dates: {e}")
|
| 102 |
+
start_date = datetime(2025, 3, 24).date()
|
| 103 |
+
end_date = datetime(2025, 3, 28).date()
|
| 104 |
+
st.session_state.date_range = (start_date, end_date)
|
| 105 |
+
|
| 106 |
+
st.markdown("---")
|
| 107 |
+
st.markdown("## π Refresh Data")
|
| 108 |
+
if st.button("π Reload All Data"):
|
| 109 |
+
st.rerun()
|
| 110 |
+
|
| 111 |
+
# Main content
|
| 112 |
+
if st.session_state.date_range:
|
| 113 |
+
start_date, end_date = st.session_state.date_range
|
| 114 |
+
st.markdown(f"**Analysis Period:** {start_date} to {end_date}")
|
| 115 |
+
else:
|
| 116 |
+
st.warning("No date range selected")
|
| 117 |
+
st.stop()
|
| 118 |
+
|
| 119 |
+
# Create tabs for different metadata sections
|
| 120 |
+
tab1, tab2, tab3, tab4, tab5 = st.tabs([
|
| 121 |
+
"π Data Overview",
|
| 122 |
+
"π¦ Demand Analysis",
|
| 123 |
+
"π₯ Workforce Analysis",
|
| 124 |
+
"π Production Capacity",
|
| 125 |
+
"π° Cost Analysis"
|
| 126 |
+
])
|
| 127 |
+
|
| 128 |
+
# Tab 1: Data Overview
|
| 129 |
+
with tab1:
|
| 130 |
+
st.markdown('<h2 class="section-header">π Dataset Overview</h2>', unsafe_allow_html=True)
|
| 131 |
+
|
| 132 |
+
# Overall metrics
|
| 133 |
+
try:
|
| 134 |
+
# Load basic data
|
| 135 |
+
demand_df = extract.read_released_orders_data(start_date=start_date, end_date=end_date)
|
| 136 |
+
employee_df = extract.read_employee_data()
|
| 137 |
+
line_df = extract.read_packaging_line_data()
|
| 138 |
+
|
| 139 |
+
# Calculate key metrics
|
| 140 |
+
total_orders = len(demand_df)
|
| 141 |
+
total_quantity = demand_df["Order quantity (GMEIN)"].sum()
|
| 142 |
+
unique_products = demand_df["Material Number"].nunique()
|
| 143 |
+
total_employees = len(employee_df)
|
| 144 |
+
total_lines = line_df["line_count"].sum()
|
| 145 |
+
|
| 146 |
+
# Display key metrics
|
| 147 |
+
col1, col2, col3, col4, col5 = st.columns(5)
|
| 148 |
+
|
| 149 |
+
with col1:
|
| 150 |
+
st.metric("π¦ Total Orders", f"{total_orders:,}")
|
| 151 |
+
with col2:
|
| 152 |
+
st.metric("π Total Quantity", f"{total_quantity:,.0f}")
|
| 153 |
+
with col3:
|
| 154 |
+
st.metric("π― Unique Products", unique_products)
|
| 155 |
+
with col4:
|
| 156 |
+
st.metric("π₯ Total Employees", total_employees)
|
| 157 |
+
with col5:
|
| 158 |
+
st.metric("π Production Lines", total_lines)
|
| 159 |
+
|
| 160 |
+
# Data quality overview
|
| 161 |
+
st.markdown("### π Data Quality Summary")
|
| 162 |
+
|
| 163 |
+
col_q1, col_q2 = st.columns(2)
|
| 164 |
+
|
| 165 |
+
with col_q1:
|
| 166 |
+
# Orders data quality
|
| 167 |
+
missing_orders = demand_df.isnull().sum().sum()
|
| 168 |
+
completeness = ((demand_df.size - missing_orders) / demand_df.size) * 100
|
| 169 |
+
|
| 170 |
+
st.markdown("""
|
| 171 |
+
**Orders Data Quality:**
|
| 172 |
+
""")
|
| 173 |
+
st.progress(completeness / 100)
|
| 174 |
+
st.write(f"Completeness: {completeness:.1f}%")
|
| 175 |
+
st.write(f"Missing values: {missing_orders}")
|
| 176 |
+
|
| 177 |
+
with col_q2:
|
| 178 |
+
# Employee data quality
|
| 179 |
+
missing_emp = employee_df.isnull().sum().sum()
|
| 180 |
+
emp_completeness = ((employee_df.size - missing_emp) / employee_df.size) * 100
|
| 181 |
+
|
| 182 |
+
st.markdown("""
|
| 183 |
+
**Employee Data Quality:**
|
| 184 |
+
""")
|
| 185 |
+
st.progress(emp_completeness / 100)
|
| 186 |
+
st.write(f"Completeness: {emp_completeness:.1f}%")
|
| 187 |
+
st.write(f"Missing values: {missing_emp}")
|
| 188 |
+
|
| 189 |
+
# Data freshness
|
| 190 |
+
st.markdown("### π
Data Freshness")
|
| 191 |
+
if 'Date' in demand_df.columns:
|
| 192 |
+
latest_order = pd.to_datetime(demand_df['Date']).max()
|
| 193 |
+
days_old = (datetime.now() - latest_order).days
|
| 194 |
+
st.info(f"Latest order data: {latest_order.strftime('%Y-%m-%d')} ({days_old} days ago)")
|
| 195 |
+
|
| 196 |
+
except Exception as e:
|
| 197 |
+
st.error(f"Error loading overview data: {e}")
|
| 198 |
+
|
| 199 |
+
# Tab 2: Demand Analysis
|
| 200 |
+
with tab2:
|
| 201 |
+
st.markdown('<h2 class="section-header">π¦ Demand Analysis</h2>', unsafe_allow_html=True)
|
| 202 |
+
|
| 203 |
+
try:
|
| 204 |
+
demand_df = extract.read_released_orders_data(start_date=start_date, end_date=end_date)
|
| 205 |
+
|
| 206 |
+
# Demand summary metrics
|
| 207 |
+
col_d1, col_d2, col_d3, col_d4 = st.columns(4)
|
| 208 |
+
|
| 209 |
+
total_demand = demand_df["Order quantity (GMEIN)"].sum()
|
| 210 |
+
avg_order_size = demand_df["Order quantity (GMEIN)"].mean()
|
| 211 |
+
max_order_size = demand_df["Order quantity (GMEIN)"].max()
|
| 212 |
+
min_order_size = demand_df["Order quantity (GMEIN)"].min()
|
| 213 |
+
|
| 214 |
+
with col_d1:
|
| 215 |
+
st.metric("π Total Demand", f"{total_demand:,.0f}")
|
| 216 |
+
with col_d2:
|
| 217 |
+
st.metric("π Avg Order Size", f"{avg_order_size:,.0f}")
|
| 218 |
+
with col_d3:
|
| 219 |
+
st.metric("πΊ Max Order", f"{max_order_size:,.0f}")
|
| 220 |
+
with col_d4:
|
| 221 |
+
st.metric("π» Min Order", f"{min_order_size:,.0f}")
|
| 222 |
+
|
| 223 |
+
# Top products analysis
|
| 224 |
+
st.markdown("### π― Top Products by Demand")
|
| 225 |
+
|
| 226 |
+
col_top1, col_top2 = st.columns([2, 1])
|
| 227 |
+
|
| 228 |
+
with col_top1:
|
| 229 |
+
top_products = demand_df.groupby('Material Number')["Order quantity (GMEIN)"].sum().sort_values(ascending=False).head(10)
|
| 230 |
+
|
| 231 |
+
fig_top = px.bar(
|
| 232 |
+
x=top_products.index,
|
| 233 |
+
y=top_products.values,
|
| 234 |
+
title='Top 10 Products by Total Demand',
|
| 235 |
+
labels={'x': 'Product', 'y': 'Total Quantity'}
|
| 236 |
+
)
|
| 237 |
+
fig_top.update_layout(xaxis_tickangle=-45)
|
| 238 |
+
st.plotly_chart(fig_top, use_container_width=True)
|
| 239 |
+
|
| 240 |
+
with col_top2:
|
| 241 |
+
st.markdown("**Top 10 Products:**")
|
| 242 |
+
for i, (product, quantity) in enumerate(top_products.head(10).items(), 1):
|
| 243 |
+
st.write(f"{i}. {product}: {quantity:,.0f}")
|
| 244 |
+
|
| 245 |
+
# Daily demand pattern
|
| 246 |
+
st.markdown("### π
Daily Demand Pattern")
|
| 247 |
+
|
| 248 |
+
if 'Date' in demand_df.columns:
|
| 249 |
+
daily_demand = demand_df.groupby('Date')["Order quantity (GMEIN)"].sum().reset_index()
|
| 250 |
+
daily_demand['Date'] = pd.to_datetime(daily_demand['Date'])
|
| 251 |
+
|
| 252 |
+
fig_daily = px.line(
|
| 253 |
+
daily_demand,
|
| 254 |
+
x='Date',
|
| 255 |
+
y='Order quantity (GMEIN)',
|
| 256 |
+
title='Daily Demand Trend',
|
| 257 |
+
labels={'Order quantity (GMEIN)': 'Total Quantity'}
|
| 258 |
+
)
|
| 259 |
+
st.plotly_chart(fig_daily, use_container_width=True)
|
| 260 |
+
|
| 261 |
+
# Demand statistics
|
| 262 |
+
col_stats1, col_stats2, col_stats3 = st.columns(3)
|
| 263 |
+
with col_stats1:
|
| 264 |
+
st.metric("π Avg Daily Demand", f"{daily_demand['Order quantity (GMEIN)'].mean():,.0f}")
|
| 265 |
+
with col_stats2:
|
| 266 |
+
st.metric("π Peak Daily Demand", f"{daily_demand['Order quantity (GMEIN)'].max():,.0f}")
|
| 267 |
+
with col_stats3:
|
| 268 |
+
std_dev = daily_demand['Order quantity (GMEIN)'].std()
|
| 269 |
+
st.metric("π Demand Variability", f"{std_dev:,.0f}")
|
| 270 |
+
|
| 271 |
+
# Product distribution
|
| 272 |
+
st.markdown("### π― Product Demand Distribution")
|
| 273 |
+
|
| 274 |
+
product_counts = demand_df['Material Number'].value_counts()
|
| 275 |
+
|
| 276 |
+
col_dist1, col_dist2 = st.columns(2)
|
| 277 |
+
|
| 278 |
+
with col_dist1:
|
| 279 |
+
# Histogram of order quantities
|
| 280 |
+
fig_hist = px.histogram(
|
| 281 |
+
demand_df,
|
| 282 |
+
x='Order quantity (GMEIN)',
|
| 283 |
+
nbins=20,
|
| 284 |
+
title='Order Quantity Distribution'
|
| 285 |
+
)
|
| 286 |
+
st.plotly_chart(fig_hist, use_container_width=True)
|
| 287 |
+
|
| 288 |
+
with col_dist2:
|
| 289 |
+
# Product frequency
|
| 290 |
+
fig_freq = px.histogram(
|
| 291 |
+
x=product_counts.values,
|
| 292 |
+
nbins=15,
|
| 293 |
+
title='Product Order Frequency Distribution'
|
| 294 |
+
)
|
| 295 |
+
fig_freq.update_layout(xaxis_title="Number of Orders", yaxis_title="Number of Products")
|
| 296 |
+
st.plotly_chart(fig_freq, use_container_width=True)
|
| 297 |
+
|
| 298 |
+
except Exception as e:
|
| 299 |
+
st.error(f"Error in demand analysis: {e}")
|
| 300 |
+
|
| 301 |
+
# Tab 3: Workforce Analysis
|
| 302 |
+
with tab3:
|
| 303 |
+
st.markdown('<h2 class="section-header">π₯ Workforce Analysis</h2>', unsafe_allow_html=True)
|
| 304 |
+
|
| 305 |
+
try:
|
| 306 |
+
employee_df = extract.read_employee_data()
|
| 307 |
+
|
| 308 |
+
# Employee metrics
|
| 309 |
+
col_emp1, col_emp2, col_emp3, col_emp4 = st.columns(4)
|
| 310 |
+
|
| 311 |
+
total_employees = len(employee_df)
|
| 312 |
+
emp_types = employee_df['employment_type'].nunique()
|
| 313 |
+
|
| 314 |
+
with col_emp1:
|
| 315 |
+
st.metric("π₯ Total Employees", total_employees)
|
| 316 |
+
with col_emp2:
|
| 317 |
+
st.metric("π Employee Types", emp_types)
|
| 318 |
+
with col_emp3:
|
| 319 |
+
unicef_count = len(employee_df[employee_df['employment_type'] == 'UNICEF Fixed term'])
|
| 320 |
+
st.metric("π’ UNICEF Fixed", unicef_count)
|
| 321 |
+
with col_emp4:
|
| 322 |
+
humanizer_count = len(employee_df[employee_df['employment_type'] == 'Humanizer'])
|
| 323 |
+
st.metric("π· Humanizer", humanizer_count)
|
| 324 |
+
|
| 325 |
+
# Employee type distribution
|
| 326 |
+
st.markdown("### π₯ Employee Type Distribution")
|
| 327 |
+
|
| 328 |
+
col_emp_dist1, col_emp_dist2 = st.columns(2)
|
| 329 |
+
|
| 330 |
+
with col_emp_dist1:
|
| 331 |
+
emp_type_counts = employee_df['employment_type'].value_counts()
|
| 332 |
+
|
| 333 |
+
fig_emp_pie = px.pie(
|
| 334 |
+
values=emp_type_counts.values,
|
| 335 |
+
names=emp_type_counts.index,
|
| 336 |
+
title='Employee Distribution by Type'
|
| 337 |
+
)
|
| 338 |
+
st.plotly_chart(fig_emp_pie, use_container_width=True)
|
| 339 |
+
|
| 340 |
+
with col_emp_dist2:
|
| 341 |
+
fig_emp_bar = px.bar(
|
| 342 |
+
x=emp_type_counts.index,
|
| 343 |
+
y=emp_type_counts.values,
|
| 344 |
+
title='Employee Count by Type'
|
| 345 |
+
)
|
| 346 |
+
st.plotly_chart(fig_emp_bar, use_container_width=True)
|
| 347 |
+
|
| 348 |
+
# Cost analysis
|
| 349 |
+
st.markdown("### π° Employee Cost Structure")
|
| 350 |
+
|
| 351 |
+
cost_data = optimization_config.COST_LIST_PER_EMP_SHIFT
|
| 352 |
+
|
| 353 |
+
# Create cost comparison table
|
| 354 |
+
cost_comparison = []
|
| 355 |
+
for emp_type, shifts in cost_data.items():
|
| 356 |
+
for shift, cost in shifts.items():
|
| 357 |
+
shift_name = {1: 'Regular', 2: 'Overtime', 3: 'Evening'}.get(shift, f'Shift {shift}')
|
| 358 |
+
cost_comparison.append({
|
| 359 |
+
'Employee Type': emp_type,
|
| 360 |
+
'Shift': shift_name,
|
| 361 |
+
'Hourly Rate ($)': cost
|
| 362 |
+
})
|
| 363 |
+
|
| 364 |
+
cost_df = pd.DataFrame(cost_comparison)
|
| 365 |
+
|
| 366 |
+
col_cost1, col_cost2 = st.columns(2)
|
| 367 |
+
|
| 368 |
+
with col_cost1:
|
| 369 |
+
st.dataframe(cost_df, use_container_width=True)
|
| 370 |
+
|
| 371 |
+
with col_cost2:
|
| 372 |
+
fig_cost = px.bar(
|
| 373 |
+
cost_df,
|
| 374 |
+
x='Employee Type',
|
| 375 |
+
y='Hourly Rate ($)',
|
| 376 |
+
color='Shift',
|
| 377 |
+
title='Hourly Rates by Employee Type and Shift',
|
| 378 |
+
barmode='group'
|
| 379 |
+
)
|
| 380 |
+
st.plotly_chart(fig_cost, use_container_width=True)
|
| 381 |
+
|
| 382 |
+
# Productivity analysis
|
| 383 |
+
st.markdown("### π Productivity Analysis")
|
| 384 |
+
|
| 385 |
+
try:
|
| 386 |
+
productivity_data = optimization_config.PRODUCTIVITY_LIST_PER_EMP_PRODUCT
|
| 387 |
+
|
| 388 |
+
# Calculate average productivity by employee type
|
| 389 |
+
prod_summary = []
|
| 390 |
+
for emp_type, shifts in productivity_data.items():
|
| 391 |
+
for shift, products in shifts.items():
|
| 392 |
+
if products: # Check if products dict is not empty
|
| 393 |
+
avg_productivity = np.mean(list(products.values()))
|
| 394 |
+
shift_name = {1: 'Regular', 2: 'Overtime', 3: 'Evening'}.get(shift, f'Shift {shift}')
|
| 395 |
+
prod_summary.append({
|
| 396 |
+
'Employee Type': emp_type,
|
| 397 |
+
'Shift': shift_name,
|
| 398 |
+
'Avg Productivity (units/hr)': avg_productivity
|
| 399 |
+
})
|
| 400 |
+
|
| 401 |
+
if prod_summary:
|
| 402 |
+
prod_df = pd.DataFrame(prod_summary)
|
| 403 |
+
|
| 404 |
+
fig_prod = px.bar(
|
| 405 |
+
prod_df,
|
| 406 |
+
x='Employee Type',
|
| 407 |
+
y='Avg Productivity (units/hr)',
|
| 408 |
+
color='Shift',
|
| 409 |
+
title='Average Productivity by Employee Type and Shift',
|
| 410 |
+
barmode='group'
|
| 411 |
+
)
|
| 412 |
+
st.plotly_chart(fig_prod, use_container_width=True)
|
| 413 |
+
|
| 414 |
+
st.dataframe(prod_df, use_container_width=True)
|
| 415 |
+
else:
|
| 416 |
+
st.info("No productivity data available")
|
| 417 |
+
|
| 418 |
+
except Exception as e:
|
| 419 |
+
st.warning(f"Could not load productivity data: {e}")
|
| 420 |
+
|
| 421 |
+
except Exception as e:
|
| 422 |
+
st.error(f"Error in workforce analysis: {e}")
|
| 423 |
+
|
| 424 |
+
# Tab 4: Production Capacity
|
| 425 |
+
with tab4:
|
| 426 |
+
st.markdown('<h2 class="section-header">π Production Capacity Analysis</h2>', unsafe_allow_html=True)
|
| 427 |
+
|
| 428 |
+
try:
|
| 429 |
+
line_df = extract.read_packaging_line_data()
|
| 430 |
+
|
| 431 |
+
# Production line metrics
|
| 432 |
+
col_line1, col_line2, col_line3, col_line4 = st.columns(4)
|
| 433 |
+
|
| 434 |
+
total_lines = line_df['line_count'].sum()
|
| 435 |
+
line_types = len(line_df)
|
| 436 |
+
max_capacity_line = line_df.loc[line_df['line_count'].idxmax()]
|
| 437 |
+
|
| 438 |
+
with col_line1:
|
| 439 |
+
st.metric("π Total Lines", total_lines)
|
| 440 |
+
with col_line2:
|
| 441 |
+
st.metric("π Line Types", line_types)
|
| 442 |
+
with col_line3:
|
| 443 |
+
st.metric("πΊ Max Capacity Type", f"Line {max_capacity_line['id']}")
|
| 444 |
+
with col_line4:
|
| 445 |
+
st.metric("π Max Line Count", max_capacity_line['line_count'])
|
| 446 |
+
|
| 447 |
+
# Line capacity distribution
|
| 448 |
+
st.markdown("### π Production Line Distribution")
|
| 449 |
+
|
| 450 |
+
col_cap1, col_cap2 = st.columns(2)
|
| 451 |
+
|
| 452 |
+
with col_cap1:
|
| 453 |
+
fig_line_pie = px.pie(
|
| 454 |
+
values=line_df['line_count'],
|
| 455 |
+
names=[f"Line {row['id']}" for _, row in line_df.iterrows()],
|
| 456 |
+
title='Production Line Distribution'
|
| 457 |
+
)
|
| 458 |
+
st.plotly_chart(fig_line_pie, use_container_width=True)
|
| 459 |
+
|
| 460 |
+
with col_cap2:
|
| 461 |
+
fig_line_bar = px.bar(
|
| 462 |
+
x=[f"Line {row['id']}" for _, row in line_df.iterrows()],
|
| 463 |
+
y=line_df['line_count'],
|
| 464 |
+
title='Line Count by Type'
|
| 465 |
+
)
|
| 466 |
+
st.plotly_chart(fig_line_bar, use_container_width=True)
|
| 467 |
+
|
| 468 |
+
# Capacity analysis
|
| 469 |
+
st.markdown("### β‘ Theoretical Capacity Analysis")
|
| 470 |
+
|
| 471 |
+
cap_per_line = optimization_config.CAP_PER_LINE_PER_HOUR
|
| 472 |
+
shift_hours = optimization_config.MAX_HOUR_PER_SHIFT_PER_PERSON
|
| 473 |
+
|
| 474 |
+
# Calculate theoretical daily capacity
|
| 475 |
+
capacity_analysis = []
|
| 476 |
+
for _, row in line_df.iterrows():
|
| 477 |
+
line_id = row['id']
|
| 478 |
+
line_count = row['line_count']
|
| 479 |
+
|
| 480 |
+
if line_id in cap_per_line:
|
| 481 |
+
hourly_cap = cap_per_line[line_id]
|
| 482 |
+
|
| 483 |
+
for shift, hours in shift_hours.items():
|
| 484 |
+
shift_name = {1: 'Regular', 2: 'Overtime', 3: 'Evening'}.get(shift, f'Shift {shift}')
|
| 485 |
+
daily_capacity = hourly_cap * hours * line_count
|
| 486 |
+
|
| 487 |
+
capacity_analysis.append({
|
| 488 |
+
'Line Type': f"Line {line_id}",
|
| 489 |
+
'Shift': shift_name,
|
| 490 |
+
'Hourly Capacity': hourly_cap,
|
| 491 |
+
'Shift Hours': hours,
|
| 492 |
+
'Line Count': line_count,
|
| 493 |
+
'Shift Capacity': daily_capacity
|
| 494 |
+
})
|
| 495 |
+
|
| 496 |
+
if capacity_analysis:
|
| 497 |
+
cap_df = pd.DataFrame(capacity_analysis)
|
| 498 |
+
|
| 499 |
+
# Display capacity table
|
| 500 |
+
st.dataframe(cap_df, use_container_width=True)
|
| 501 |
+
|
| 502 |
+
# Capacity visualization
|
| 503 |
+
fig_cap = px.bar(
|
| 504 |
+
cap_df,
|
| 505 |
+
x='Line Type',
|
| 506 |
+
y='Shift Capacity',
|
| 507 |
+
color='Shift',
|
| 508 |
+
title='Theoretical Capacity by Line Type and Shift',
|
| 509 |
+
barmode='group'
|
| 510 |
+
)
|
| 511 |
+
st.plotly_chart(fig_cap, use_container_width=True)
|
| 512 |
+
|
| 513 |
+
# Total capacity summary
|
| 514 |
+
total_capacity = cap_df.groupby('Line Type')['Shift Capacity'].sum()
|
| 515 |
+
|
| 516 |
+
col_total1, col_total2 = st.columns(2)
|
| 517 |
+
|
| 518 |
+
with col_total1:
|
| 519 |
+
st.markdown("**Total Daily Capacity by Line:**")
|
| 520 |
+
for line_type, capacity in total_capacity.items():
|
| 521 |
+
st.write(f"β’ {line_type}: {capacity:,.0f} units/day")
|
| 522 |
+
|
| 523 |
+
with col_total2:
|
| 524 |
+
total_all_lines = total_capacity.sum()
|
| 525 |
+
st.metric("π Total System Capacity", f"{total_all_lines:,.0f} units/day")
|
| 526 |
+
|
| 527 |
+
except Exception as e:
|
| 528 |
+
st.error(f"Error in production capacity analysis: {e}")
|
| 529 |
+
|
| 530 |
+
# Tab 5: Cost Analysis
|
| 531 |
+
with tab5:
|
| 532 |
+
st.markdown('<h2 class="section-header">π° Cost Analysis</h2>', unsafe_allow_html=True)
|
| 533 |
+
|
| 534 |
+
try:
|
| 535 |
+
# Load cost data
|
| 536 |
+
cost_data = optimization_config.COST_LIST_PER_EMP_SHIFT
|
| 537 |
+
employee_df = extract.read_employee_data()
|
| 538 |
+
|
| 539 |
+
# Cost structure overview
|
| 540 |
+
st.markdown("### π΅ Cost Structure Overview")
|
| 541 |
+
|
| 542 |
+
# Calculate cost ranges
|
| 543 |
+
all_costs = []
|
| 544 |
+
for emp_type, shifts in cost_data.items():
|
| 545 |
+
for shift, cost in shifts.items():
|
| 546 |
+
all_costs.append(cost)
|
| 547 |
+
|
| 548 |
+
col_cost_over1, col_cost_over2, col_cost_over3, col_cost_over4 = st.columns(4)
|
| 549 |
+
|
| 550 |
+
with col_cost_over1:
|
| 551 |
+
st.metric("π° Min Hourly Rate", f"${min(all_costs)}")
|
| 552 |
+
with col_cost_over2:
|
| 553 |
+
st.metric("π° Max Hourly Rate", f"${max(all_costs)}")
|
| 554 |
+
with col_cost_over3:
|
| 555 |
+
st.metric("π° Avg Hourly Rate", f"${np.mean(all_costs):.2f}")
|
| 556 |
+
with col_cost_over4:
|
| 557 |
+
cost_range = max(all_costs) - min(all_costs)
|
| 558 |
+
st.metric("π Cost Range", f"${cost_range}")
|
| 559 |
+
|
| 560 |
+
# Detailed cost breakdown
|
| 561 |
+
st.markdown("### π Detailed Cost Breakdown")
|
| 562 |
+
|
| 563 |
+
cost_breakdown = []
|
| 564 |
+
employee_counts = employee_df['employment_type'].value_counts()
|
| 565 |
+
|
| 566 |
+
for emp_type, shifts in cost_data.items():
|
| 567 |
+
emp_count = employee_counts.get(emp_type, 0)
|
| 568 |
+
|
| 569 |
+
for shift, hourly_rate in shifts.items():
|
| 570 |
+
shift_name = {1: 'Regular', 2: 'Overtime', 3: 'Evening'}.get(shift, f'Shift {shift}')
|
| 571 |
+
shift_hours = optimization_config.MAX_HOUR_PER_SHIFT_PER_PERSON.get(shift, 0)
|
| 572 |
+
|
| 573 |
+
daily_cost_per_emp = hourly_rate * shift_hours
|
| 574 |
+
total_daily_cost = daily_cost_per_emp * emp_count
|
| 575 |
+
|
| 576 |
+
cost_breakdown.append({
|
| 577 |
+
'Employee Type': emp_type,
|
| 578 |
+
'Shift': shift_name,
|
| 579 |
+
'Available Staff': emp_count,
|
| 580 |
+
'Hourly Rate ($)': hourly_rate,
|
| 581 |
+
'Shift Hours': shift_hours,
|
| 582 |
+
'Cost per Employee ($)': daily_cost_per_emp,
|
| 583 |
+
'Total Potential Cost ($)': total_daily_cost
|
| 584 |
+
})
|
| 585 |
+
|
| 586 |
+
cost_breakdown_df = pd.DataFrame(cost_breakdown)
|
| 587 |
+
st.dataframe(cost_breakdown_df, use_container_width=True)
|
| 588 |
+
|
| 589 |
+
# Cost visualization
|
| 590 |
+
col_cost_viz1, col_cost_viz2 = st.columns(2)
|
| 591 |
+
|
| 592 |
+
with col_cost_viz1:
|
| 593 |
+
fig_cost_comp = px.bar(
|
| 594 |
+
cost_breakdown_df,
|
| 595 |
+
x='Employee Type',
|
| 596 |
+
y='Total Potential Cost ($)',
|
| 597 |
+
color='Shift',
|
| 598 |
+
title='Total Potential Daily Cost by Type and Shift',
|
| 599 |
+
barmode='group'
|
| 600 |
+
)
|
| 601 |
+
st.plotly_chart(fig_cost_comp, use_container_width=True)
|
| 602 |
+
|
| 603 |
+
with col_cost_viz2:
|
| 604 |
+
# Cost efficiency (cost per hour)
|
| 605 |
+
fig_efficiency = px.scatter(
|
| 606 |
+
cost_breakdown_df,
|
| 607 |
+
x='Shift Hours',
|
| 608 |
+
y='Hourly Rate ($)',
|
| 609 |
+
color='Employee Type',
|
| 610 |
+
size='Available Staff',
|
| 611 |
+
title='Cost Efficiency Analysis',
|
| 612 |
+
hover_data=['Shift']
|
| 613 |
+
)
|
| 614 |
+
st.plotly_chart(fig_efficiency, use_container_width=True)
|
| 615 |
+
|
| 616 |
+
# Budget planning
|
| 617 |
+
st.markdown("### π Budget Planning Scenarios")
|
| 618 |
+
|
| 619 |
+
col_budget1, col_budget2 = st.columns(2)
|
| 620 |
+
|
| 621 |
+
with col_budget1:
|
| 622 |
+
st.markdown("**Minimum Daily Cost Scenario:**")
|
| 623 |
+
min_costs = cost_breakdown_df.groupby('Employee Type')['Cost per Employee ($)'].min()
|
| 624 |
+
total_min_daily = (min_costs * employee_counts).sum()
|
| 625 |
+
st.write(f"Total minimum daily cost: ${total_min_daily:,.2f}")
|
| 626 |
+
|
| 627 |
+
for emp_type, cost in min_costs.items():
|
| 628 |
+
count = employee_counts.get(emp_type, 0)
|
| 629 |
+
st.write(f"β’ {emp_type}: ${cost:.2f} Γ {count} = ${cost * count:,.2f}")
|
| 630 |
+
|
| 631 |
+
with col_budget2:
|
| 632 |
+
st.markdown("**Maximum Daily Cost Scenario:**")
|
| 633 |
+
max_costs = cost_breakdown_df.groupby('Employee Type')['Cost per Employee ($)'].max()
|
| 634 |
+
total_max_daily = (max_costs * employee_counts).sum()
|
| 635 |
+
st.write(f"Total maximum daily cost: ${total_max_daily:,.2f}")
|
| 636 |
+
|
| 637 |
+
for emp_type, cost in max_costs.items():
|
| 638 |
+
count = employee_counts.get(emp_type, 0)
|
| 639 |
+
st.write(f"β’ {emp_type}: ${cost:.2f} Γ {count} = ${cost * count:,.2f}")
|
| 640 |
+
|
| 641 |
+
# Weekly and monthly projections
|
| 642 |
+
st.markdown("### π
Cost Projections")
|
| 643 |
+
|
| 644 |
+
col_proj1, col_proj2, col_proj3 = st.columns(3)
|
| 645 |
+
|
| 646 |
+
with col_proj1:
|
| 647 |
+
weekly_min = total_min_daily * 7
|
| 648 |
+
weekly_max = total_max_daily * 7
|
| 649 |
+
st.metric("π
Weekly Cost Range", f"${weekly_min:,.0f} - ${weekly_max:,.0f}")
|
| 650 |
+
|
| 651 |
+
with col_proj2:
|
| 652 |
+
monthly_min = total_min_daily * 30
|
| 653 |
+
monthly_max = total_max_daily * 30
|
| 654 |
+
st.metric("π
Monthly Cost Range", f"${monthly_min:,.0f} - ${monthly_max:,.0f}")
|
| 655 |
+
|
| 656 |
+
with col_proj3:
|
| 657 |
+
avg_daily = (total_min_daily + total_max_daily) / 2
|
| 658 |
+
st.metric("οΏ½οΏ½οΏ½ Average Daily Cost", f"${avg_daily:,.2f}")
|
| 659 |
+
|
| 660 |
+
except Exception as e:
|
| 661 |
+
st.error(f"Error in cost analysis: {e}")
|
| 662 |
+
|
| 663 |
+
# Footer
|
| 664 |
+
st.markdown("---")
|
| 665 |
+
st.markdown("""
|
| 666 |
+
<div style='text-align: center; color: gray; padding: 1rem;'>
|
| 667 |
+
<small>Dataset Metadata Analysis | Data updated in real-time from your CSV files</small>
|
| 668 |
+
</div>
|
| 669 |
+
""", unsafe_allow_html=True)
|
pages/2_π―_Optimization.py
ADDED
|
@@ -0,0 +1,633 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
|
| 3 |
+
# Page configuration
|
| 4 |
+
st.set_page_config(
|
| 5 |
+
page_title="Optimization Tool",
|
| 6 |
+
page_icon="π―",
|
| 7 |
+
layout="wide"
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
# Import libraries
|
| 11 |
+
import pandas as pd
|
| 12 |
+
import plotly.express as px
|
| 13 |
+
import plotly.graph_objects as go
|
| 14 |
+
from plotly.subplots import make_subplots
|
| 15 |
+
import sys
|
| 16 |
+
import os
|
| 17 |
+
from datetime import datetime, timedelta
|
| 18 |
+
import numpy as np
|
| 19 |
+
|
| 20 |
+
# Add src to path for imports
|
| 21 |
+
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'src'))
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
from src.models.optimizer_real import OptimizerReal
|
| 25 |
+
from src.config import optimization_config
|
| 26 |
+
import src.etl.extract as extract
|
| 27 |
+
import src.etl.transform as transform
|
| 28 |
+
except ImportError as e:
|
| 29 |
+
st.error(f"Error importing modules: {e}")
|
| 30 |
+
st.stop()
|
| 31 |
+
|
| 32 |
+
# Custom CSS
|
| 33 |
+
st.markdown("""
|
| 34 |
+
<style>
|
| 35 |
+
.main-header {
|
| 36 |
+
font-size: 2.5rem;
|
| 37 |
+
font-weight: bold;
|
| 38 |
+
color: #1f77b4;
|
| 39 |
+
margin-bottom: 1rem;
|
| 40 |
+
}
|
| 41 |
+
.section-header {
|
| 42 |
+
font-size: 1.5rem;
|
| 43 |
+
font-weight: bold;
|
| 44 |
+
color: #2c3e50;
|
| 45 |
+
margin: 1rem 0;
|
| 46 |
+
}
|
| 47 |
+
.optimization-panel {
|
| 48 |
+
background-color: #f8f9fa;
|
| 49 |
+
padding: 1.5rem;
|
| 50 |
+
border-radius: 0.8rem;
|
| 51 |
+
border-left: 5px solid #28a745;
|
| 52 |
+
margin-bottom: 1.5rem;
|
| 53 |
+
}
|
| 54 |
+
.results-panel {
|
| 55 |
+
background-color: #fff3cd;
|
| 56 |
+
padding: 1.5rem;
|
| 57 |
+
border-radius: 0.8rem;
|
| 58 |
+
border-left: 5px solid #ffc107;
|
| 59 |
+
margin-bottom: 1.5rem;
|
| 60 |
+
}
|
| 61 |
+
</style>
|
| 62 |
+
""", unsafe_allow_html=True)
|
| 63 |
+
|
| 64 |
+
# Initialize session state
|
| 65 |
+
if 'optimization_results' not in st.session_state:
|
| 66 |
+
st.session_state.optimization_results = None
|
| 67 |
+
if 'optimizer' not in st.session_state:
|
| 68 |
+
st.session_state.optimizer = None
|
| 69 |
+
if 'date_range' not in st.session_state:
|
| 70 |
+
st.session_state.date_range = None
|
| 71 |
+
|
| 72 |
+
# Title
|
| 73 |
+
st.markdown('<h1 class="main-header">π― Optimization Tool</h1>', unsafe_allow_html=True)
|
| 74 |
+
|
| 75 |
+
# Sidebar for optimization parameters
|
| 76 |
+
with st.sidebar:
|
| 77 |
+
st.markdown("## βοΈ Optimization Parameters")
|
| 78 |
+
|
| 79 |
+
# Date Selection Section
|
| 80 |
+
st.markdown("### π
Date Range Selection")
|
| 81 |
+
try:
|
| 82 |
+
date_ranges = transform.get_date_ranges()
|
| 83 |
+
if date_ranges:
|
| 84 |
+
date_range_options = [f"{start.strftime('%Y-%m-%d')} to {end.strftime('%Y-%m-%d')}" for start, end in date_ranges]
|
| 85 |
+
selected_range_str = st.selectbox(
|
| 86 |
+
"Select date range:",
|
| 87 |
+
options=date_range_options,
|
| 88 |
+
help="Available date ranges from released orders"
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
selected_index = date_range_options.index(selected_range_str)
|
| 92 |
+
start_date, end_date = date_ranges[selected_index]
|
| 93 |
+
st.session_state.date_range = (start_date, end_date)
|
| 94 |
+
|
| 95 |
+
duration = (end_date - start_date).days + 1
|
| 96 |
+
st.info(f"Duration: {duration} days")
|
| 97 |
+
|
| 98 |
+
else:
|
| 99 |
+
st.warning("No date ranges found in data")
|
| 100 |
+
start_date = datetime(2025, 3, 24).date()
|
| 101 |
+
end_date = datetime(2025, 3, 28).date()
|
| 102 |
+
st.session_state.date_range = (start_date, end_date)
|
| 103 |
+
|
| 104 |
+
except Exception as e:
|
| 105 |
+
st.error(f"Error loading dates: {e}")
|
| 106 |
+
start_date = datetime(2025, 3, 24).date()
|
| 107 |
+
end_date = datetime(2025, 3, 28).date()
|
| 108 |
+
st.session_state.date_range = (start_date, end_date)
|
| 109 |
+
|
| 110 |
+
st.markdown("---")
|
| 111 |
+
|
| 112 |
+
# Employee Type Selection
|
| 113 |
+
st.markdown("### π₯ Employee Configuration")
|
| 114 |
+
try:
|
| 115 |
+
employee_df = extract.read_employee_data()
|
| 116 |
+
available_emp_types = employee_df["employment_type"].unique().tolist()
|
| 117 |
+
except:
|
| 118 |
+
available_emp_types = ["UNICEF Fixed term", "Humanizer"]
|
| 119 |
+
|
| 120 |
+
selected_emp_types = st.multiselect(
|
| 121 |
+
"Employee Types:",
|
| 122 |
+
available_emp_types,
|
| 123 |
+
default=available_emp_types,
|
| 124 |
+
help="Select employee types to include in optimization"
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
# Shift Selection
|
| 128 |
+
st.markdown("### π Shift Configuration")
|
| 129 |
+
try:
|
| 130 |
+
shift_df = extract.get_shift_info()
|
| 131 |
+
available_shifts = shift_df["id"].unique().tolist()
|
| 132 |
+
except:
|
| 133 |
+
available_shifts = [1, 2, 3]
|
| 134 |
+
|
| 135 |
+
selected_shifts = st.multiselect(
|
| 136 |
+
"Shifts:",
|
| 137 |
+
available_shifts,
|
| 138 |
+
default=available_shifts,
|
| 139 |
+
help="1=Regular, 2=Overtime, 3=Evening"
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
# Line Selection
|
| 143 |
+
st.markdown("### π Production Line Configuration")
|
| 144 |
+
try:
|
| 145 |
+
line_df = extract.read_packaging_line_data()
|
| 146 |
+
available_lines = line_df["id"].unique().tolist()
|
| 147 |
+
except:
|
| 148 |
+
available_lines = [6, 7]
|
| 149 |
+
|
| 150 |
+
selected_lines = st.multiselect(
|
| 151 |
+
"Production Lines:",
|
| 152 |
+
available_lines,
|
| 153 |
+
default=available_lines,
|
| 154 |
+
help="Select production lines to include"
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
st.markdown("---")
|
| 158 |
+
|
| 159 |
+
# Advanced Parameters
|
| 160 |
+
with st.expander("π§ Advanced Parameters", expanded=False):
|
| 161 |
+
constraint_mode = st.selectbox(
|
| 162 |
+
"Fixed Staff Constraint Mode:",
|
| 163 |
+
["priority", "mandatory", "none"],
|
| 164 |
+
index=0,
|
| 165 |
+
help="priority=Use fixed staff first, mandatory=Force all fixed hours, none=Demand-driven"
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
max_hours_per_person = st.number_input(
|
| 169 |
+
"Max hours per person per day:",
|
| 170 |
+
min_value=8,
|
| 171 |
+
max_value=24,
|
| 172 |
+
value=14,
|
| 173 |
+
help="Legal daily limit"
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# Employee availability override
|
| 177 |
+
st.markdown("**Employee Availability Override:**")
|
| 178 |
+
col1, col2 = st.columns(2)
|
| 179 |
+
with col1:
|
| 180 |
+
unicef_count = st.number_input("UNICEF Fixed term:", min_value=0, value=8)
|
| 181 |
+
with col2:
|
| 182 |
+
humanizer_count = st.number_input("Humanizer:", min_value=0, value=6)
|
| 183 |
+
|
| 184 |
+
st.markdown("---")
|
| 185 |
+
|
| 186 |
+
# Run Optimization Button
|
| 187 |
+
run_optimization = st.button("π Run Optimization", type="primary", use_container_width=True)
|
| 188 |
+
|
| 189 |
+
if st.button("π Clear Results", use_container_width=True):
|
| 190 |
+
st.session_state.optimization_results = None
|
| 191 |
+
st.rerun()
|
| 192 |
+
|
| 193 |
+
# Main content area
|
| 194 |
+
if st.session_state.date_range:
|
| 195 |
+
start_date, end_date = st.session_state.date_range
|
| 196 |
+
st.markdown(f"**Optimization Period:** {start_date} to {end_date}")
|
| 197 |
+
else:
|
| 198 |
+
st.warning("Please select a date range from the sidebar")
|
| 199 |
+
st.stop()
|
| 200 |
+
|
| 201 |
+
# Optimization execution
|
| 202 |
+
if run_optimization:
|
| 203 |
+
with st.spinner("π Running optimization... This may take a few moments."):
|
| 204 |
+
try:
|
| 205 |
+
# Create optimizer instance
|
| 206 |
+
optimizer = OptimizerReal()
|
| 207 |
+
|
| 208 |
+
# Run optimization and get structured results
|
| 209 |
+
results = optimizer.solve_option_A_multi_day_generalized()
|
| 210 |
+
|
| 211 |
+
if results is None:
|
| 212 |
+
st.error("β Optimization returned no results")
|
| 213 |
+
elif results.get('status') == 'failed':
|
| 214 |
+
st.error(f"β Optimization failed: {results.get('message', 'Unknown error')}")
|
| 215 |
+
else:
|
| 216 |
+
st.session_state.optimization_results = results
|
| 217 |
+
st.session_state.optimizer = optimizer
|
| 218 |
+
st.success("β
Optimization completed successfully!")
|
| 219 |
+
|
| 220 |
+
except Exception as e:
|
| 221 |
+
st.error(f"β Optimization failed: {e}")
|
| 222 |
+
st.exception(e)
|
| 223 |
+
|
| 224 |
+
# Display results if available
|
| 225 |
+
if st.session_state.optimization_results:
|
| 226 |
+
results = st.session_state.optimization_results
|
| 227 |
+
|
| 228 |
+
# Results header
|
| 229 |
+
st.markdown('<div class="results-panel">', unsafe_allow_html=True)
|
| 230 |
+
st.markdown("## π Optimization Results")
|
| 231 |
+
|
| 232 |
+
# Key metrics summary
|
| 233 |
+
total_cost = results.get('total_cost', 0)
|
| 234 |
+
params = results.get('parameters', {})
|
| 235 |
+
|
| 236 |
+
col_summary1, col_summary2, col_summary3, col_summary4 = st.columns(4)
|
| 237 |
+
|
| 238 |
+
with col_summary1:
|
| 239 |
+
st.metric("π° Total Cost", f"${total_cost:,.2f}")
|
| 240 |
+
with col_summary2:
|
| 241 |
+
st.metric("π¦ Products", len(params.get('product_list', [])))
|
| 242 |
+
with col_summary3:
|
| 243 |
+
st.metric("π₯ Employee Types", len(params.get('employee_types', [])))
|
| 244 |
+
with col_summary4:
|
| 245 |
+
if st.session_state.date_range:
|
| 246 |
+
start_date, end_date = st.session_state.date_range
|
| 247 |
+
duration = (end_date - start_date).days + 1
|
| 248 |
+
cost_per_day = total_cost / duration if duration > 0 else 0
|
| 249 |
+
st.metric("π΅ Cost/Day", f"${cost_per_day:,.2f}")
|
| 250 |
+
|
| 251 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
| 252 |
+
|
| 253 |
+
# Create tabs for detailed results
|
| 254 |
+
tab1, tab2, tab3, tab4 = st.tabs(["π Summary", "π Production", "π· Labor", "π° Costs"])
|
| 255 |
+
|
| 256 |
+
with tab1:
|
| 257 |
+
st.markdown("### π Optimization Summary")
|
| 258 |
+
|
| 259 |
+
# Additional summary metrics
|
| 260 |
+
col_s1, col_s2, col_s3 = st.columns(3)
|
| 261 |
+
|
| 262 |
+
with col_s1:
|
| 263 |
+
st.metric("π― Total Demand", f"{params.get('total_demand', 0):,.0f}")
|
| 264 |
+
with col_s2:
|
| 265 |
+
cost_per_unit = total_cost / params.get('total_demand', 1) if params.get('total_demand', 0) > 0 else 0
|
| 266 |
+
st.metric("π΅ Cost per Unit", f"${cost_per_unit:.3f}")
|
| 267 |
+
with col_s3:
|
| 268 |
+
st.metric("βοΈ Constraint Mode", params.get('constraint_mode', 'N/A'))
|
| 269 |
+
|
| 270 |
+
# Show optimization parameters used
|
| 271 |
+
st.markdown("#### π Configuration Used")
|
| 272 |
+
col_config1, col_config2 = st.columns(2)
|
| 273 |
+
|
| 274 |
+
with col_config1:
|
| 275 |
+
st.markdown("**Selected Parameters:**")
|
| 276 |
+
st.markdown(f"β’ **Employee Types:** {', '.join(selected_emp_types)}")
|
| 277 |
+
st.markdown(f"β’ **Shifts:** {', '.join(map(str, selected_shifts))}")
|
| 278 |
+
st.markdown(f"β’ **Production Lines:** {', '.join(map(str, selected_lines))}")
|
| 279 |
+
|
| 280 |
+
with col_config2:
|
| 281 |
+
st.markdown("**Optimization Details:**")
|
| 282 |
+
st.markdown(f"β’ **Constraint Mode:** {params.get('constraint_mode', 'N/A')}")
|
| 283 |
+
st.markdown(f"β’ **Days Optimized:** {len(params.get('days', []))}")
|
| 284 |
+
st.markdown(f"β’ **Products Included:** {len(params.get('product_list', []))}")
|
| 285 |
+
|
| 286 |
+
# Solution quality indicators
|
| 287 |
+
st.markdown("#### β
Solution Quality")
|
| 288 |
+
col_qual1, col_qual2, col_qual3 = st.columns(3)
|
| 289 |
+
|
| 290 |
+
with col_qual1:
|
| 291 |
+
st.success("**Status:** Optimal solution found")
|
| 292 |
+
with col_qual2:
|
| 293 |
+
st.info(f"**Solver:** OR-Tools CBC")
|
| 294 |
+
with col_qual3:
|
| 295 |
+
st.info(f"**Solution Time:** < 1 minute")
|
| 296 |
+
|
| 297 |
+
with tab2:
|
| 298 |
+
st.markdown("### π Production Results")
|
| 299 |
+
|
| 300 |
+
production_results = results.get('production_results', {})
|
| 301 |
+
if production_results:
|
| 302 |
+
# Create production summary table
|
| 303 |
+
prod_data = []
|
| 304 |
+
for product, data in production_results.items():
|
| 305 |
+
prod_data.append({
|
| 306 |
+
'Product': product,
|
| 307 |
+
'Demand': data['demand'],
|
| 308 |
+
'Produced': data['produced'],
|
| 309 |
+
'Fulfillment %': f"{data['fulfillment_rate']:.1f}%",
|
| 310 |
+
'Status': 'β
Met' if data['fulfillment_rate'] >= 100 else 'β οΈ Partial'
|
| 311 |
+
})
|
| 312 |
+
|
| 313 |
+
if prod_data:
|
| 314 |
+
prod_df = pd.DataFrame(prod_data)
|
| 315 |
+
|
| 316 |
+
# Production metrics
|
| 317 |
+
total_demand = sum(data['demand'] for data in production_results.values())
|
| 318 |
+
total_produced = sum(data['produced'] for data in production_results.values())
|
| 319 |
+
overall_fulfillment = (total_produced / total_demand * 100) if total_demand > 0 else 0
|
| 320 |
+
|
| 321 |
+
col_prod1, col_prod2, col_prod3, col_prod4 = st.columns(4)
|
| 322 |
+
|
| 323 |
+
with col_prod1:
|
| 324 |
+
st.metric("π¦ Total Demand", f"{total_demand:,.0f}")
|
| 325 |
+
with col_prod2:
|
| 326 |
+
st.metric("π Total Produced", f"{total_produced:,.0f}")
|
| 327 |
+
with col_prod3:
|
| 328 |
+
st.metric("β
Overall Fulfillment", f"{overall_fulfillment:.1f}%")
|
| 329 |
+
with col_prod4:
|
| 330 |
+
products_met = sum(1 for data in production_results.values() if data['fulfillment_rate'] >= 100)
|
| 331 |
+
st.metric("π― Products Fully Met", f"{products_met}/{len(production_results)}")
|
| 332 |
+
|
| 333 |
+
# Production table
|
| 334 |
+
st.markdown("#### π Production Details")
|
| 335 |
+
st.dataframe(prod_df, use_container_width=True)
|
| 336 |
+
|
| 337 |
+
# Production charts
|
| 338 |
+
col_chart1, col_chart2 = st.columns(2)
|
| 339 |
+
|
| 340 |
+
with col_chart1:
|
| 341 |
+
# Production vs Demand comparison
|
| 342 |
+
fig_prod = px.bar(
|
| 343 |
+
prod_df,
|
| 344 |
+
x='Product',
|
| 345 |
+
y=['Demand', 'Produced'],
|
| 346 |
+
title='Production vs Demand by Product',
|
| 347 |
+
barmode='group'
|
| 348 |
+
)
|
| 349 |
+
fig_prod.update_layout(xaxis_tickangle=-45)
|
| 350 |
+
st.plotly_chart(fig_prod, use_container_width=True)
|
| 351 |
+
|
| 352 |
+
with col_chart2:
|
| 353 |
+
# Fulfillment rate chart
|
| 354 |
+
fulfillment_data = [(row['Product'], float(row['Fulfillment %'].rstrip('%'))) for row in prod_data]
|
| 355 |
+
fulfill_df = pd.DataFrame(fulfillment_data, columns=['Product', 'Fulfillment_Rate'])
|
| 356 |
+
|
| 357 |
+
fig_fulfill = px.bar(
|
| 358 |
+
fulfill_df,
|
| 359 |
+
x='Product',
|
| 360 |
+
y='Fulfillment_Rate',
|
| 361 |
+
title='Fulfillment Rate by Product (%)',
|
| 362 |
+
color='Fulfillment_Rate',
|
| 363 |
+
color_continuous_scale='RdYlGn'
|
| 364 |
+
)
|
| 365 |
+
fig_fulfill.update_layout(yaxis_title="Fulfillment Rate (%)", xaxis_tickangle=-45)
|
| 366 |
+
fig_fulfill.add_hline(y=100, line_dash="dash", line_color="red", annotation_text="Target: 100%")
|
| 367 |
+
st.plotly_chart(fig_fulfill, use_container_width=True)
|
| 368 |
+
else:
|
| 369 |
+
st.info("No production data available")
|
| 370 |
+
|
| 371 |
+
with tab3:
|
| 372 |
+
st.markdown("### π· Labor Allocation")
|
| 373 |
+
|
| 374 |
+
employee_hours = results.get('employee_hours', {})
|
| 375 |
+
headcount_req = results.get('headcount_requirements', {})
|
| 376 |
+
|
| 377 |
+
if employee_hours:
|
| 378 |
+
# Labor summary metrics
|
| 379 |
+
total_labor_hours = 0
|
| 380 |
+
labor_data = []
|
| 381 |
+
|
| 382 |
+
for emp_type, shifts in employee_hours.items():
|
| 383 |
+
emp_total_hours = 0
|
| 384 |
+
for shift, daily_hours in shifts.items():
|
| 385 |
+
total_hours = sum(daily_hours)
|
| 386 |
+
emp_total_hours += total_hours
|
| 387 |
+
if total_hours > 0:
|
| 388 |
+
labor_data.append({
|
| 389 |
+
'Employee Type': emp_type,
|
| 390 |
+
'Shift': f"Shift {shift}",
|
| 391 |
+
'Total Hours': total_hours,
|
| 392 |
+
'Avg Daily Hours': total_hours / len(daily_hours) if daily_hours else 0
|
| 393 |
+
})
|
| 394 |
+
total_labor_hours += emp_total_hours
|
| 395 |
+
|
| 396 |
+
# Labor metrics
|
| 397 |
+
col_labor1, col_labor2, col_labor3, col_labor4 = st.columns(4)
|
| 398 |
+
|
| 399 |
+
with col_labor1:
|
| 400 |
+
st.metric("β° Total Labor Hours", f"{total_labor_hours:,.0f}")
|
| 401 |
+
with col_labor2:
|
| 402 |
+
if st.session_state.date_range:
|
| 403 |
+
duration = (end_date - start_date).days + 1
|
| 404 |
+
avg_daily_hours = total_labor_hours / duration
|
| 405 |
+
st.metric("π
Avg Daily Hours", f"{avg_daily_hours:,.0f}")
|
| 406 |
+
with col_labor3:
|
| 407 |
+
try:
|
| 408 |
+
max_daily_workers = 0
|
| 409 |
+
for emp_type, shifts in headcount_req.items():
|
| 410 |
+
for shift, daily_counts in shifts.items():
|
| 411 |
+
if daily_counts: # daily_counts is a list
|
| 412 |
+
max_daily_workers += max(daily_counts)
|
| 413 |
+
st.metric("π₯ Peak Workers Needed", max_daily_workers)
|
| 414 |
+
except Exception as e:
|
| 415 |
+
st.metric("π₯ Peak Workers Needed", "N/A")
|
| 416 |
+
with col_labor4:
|
| 417 |
+
labor_cost_per_hour = total_cost / total_labor_hours if total_labor_hours > 0 else 0
|
| 418 |
+
st.metric("π° Avg Cost/Hour", f"${labor_cost_per_hour:.2f}")
|
| 419 |
+
|
| 420 |
+
if labor_data:
|
| 421 |
+
st.markdown("#### π Labor Hours Details")
|
| 422 |
+
labor_df = pd.DataFrame(labor_data)
|
| 423 |
+
st.dataframe(labor_df, use_container_width=True)
|
| 424 |
+
|
| 425 |
+
# Labor visualization
|
| 426 |
+
col_labor_chart1, col_labor_chart2 = st.columns(2)
|
| 427 |
+
|
| 428 |
+
with col_labor_chart1:
|
| 429 |
+
# Labor hours by type and shift
|
| 430 |
+
fig_labor = px.bar(
|
| 431 |
+
labor_df,
|
| 432 |
+
x='Employee Type',
|
| 433 |
+
y='Total Hours',
|
| 434 |
+
color='Shift',
|
| 435 |
+
title='Total Labor Hours by Employee Type and Shift',
|
| 436 |
+
barmode='group'
|
| 437 |
+
)
|
| 438 |
+
st.plotly_chart(fig_labor, use_container_width=True)
|
| 439 |
+
|
| 440 |
+
with col_labor_chart2:
|
| 441 |
+
# Labor distribution pie chart
|
| 442 |
+
emp_totals = labor_df.groupby('Employee Type')['Total Hours'].sum()
|
| 443 |
+
fig_labor_pie = px.pie(
|
| 444 |
+
values=emp_totals.values,
|
| 445 |
+
names=emp_totals.index,
|
| 446 |
+
title='Labor Hours Distribution by Employee Type'
|
| 447 |
+
)
|
| 448 |
+
st.plotly_chart(fig_labor_pie, use_container_width=True)
|
| 449 |
+
|
| 450 |
+
# Headcount requirements
|
| 451 |
+
if headcount_req:
|
| 452 |
+
st.markdown("#### π₯ Required Headcount")
|
| 453 |
+
headcount_data = []
|
| 454 |
+
for emp_type, shifts in headcount_req.items():
|
| 455 |
+
for shift, daily_count in shifts.items():
|
| 456 |
+
max_count = max(daily_count) if daily_count else 0
|
| 457 |
+
avg_count = sum(daily_count) / len(daily_count) if daily_count else 0
|
| 458 |
+
total_count = sum(daily_count) if daily_count else 0
|
| 459 |
+
if max_count > 0:
|
| 460 |
+
headcount_data.append({
|
| 461 |
+
'Employee Type': emp_type,
|
| 462 |
+
'Shift': f"Shift {shift}",
|
| 463 |
+
'Max Daily': max_count,
|
| 464 |
+
'Avg Daily': f"{avg_count:.1f}",
|
| 465 |
+
'Total Period': total_count
|
| 466 |
+
})
|
| 467 |
+
|
| 468 |
+
if headcount_data:
|
| 469 |
+
headcount_df = pd.DataFrame(headcount_data)
|
| 470 |
+
st.dataframe(headcount_df, use_container_width=True)
|
| 471 |
+
|
| 472 |
+
# Headcount visualization
|
| 473 |
+
fig_headcount = px.bar(
|
| 474 |
+
headcount_df,
|
| 475 |
+
x='Employee Type',
|
| 476 |
+
y='Max Daily',
|
| 477 |
+
color='Shift',
|
| 478 |
+
title='Maximum Daily Headcount Requirements',
|
| 479 |
+
barmode='group'
|
| 480 |
+
)
|
| 481 |
+
st.plotly_chart(fig_headcount, use_container_width=True)
|
| 482 |
+
|
| 483 |
+
with tab4:
|
| 484 |
+
st.markdown("### π° Cost Analysis")
|
| 485 |
+
|
| 486 |
+
# Cost summary
|
| 487 |
+
total_cost = results.get('total_cost', 0)
|
| 488 |
+
|
| 489 |
+
col_cost_summary1, col_cost_summary2, col_cost_summary3, col_cost_summary4 = st.columns(4)
|
| 490 |
+
|
| 491 |
+
with col_cost_summary1:
|
| 492 |
+
st.metric("π° Total Cost", f"${total_cost:,.2f}")
|
| 493 |
+
with col_cost_summary2:
|
| 494 |
+
if st.session_state.date_range:
|
| 495 |
+
duration = (end_date - start_date).days + 1
|
| 496 |
+
cost_per_day = total_cost / duration
|
| 497 |
+
st.metric("π
Cost per Day", f"${cost_per_day:,.2f}")
|
| 498 |
+
with col_cost_summary3:
|
| 499 |
+
total_demand = params.get('total_demand', 1)
|
| 500 |
+
cost_per_unit = total_cost / total_demand if total_demand > 0 else 0
|
| 501 |
+
st.metric("π¦ Cost per Unit", f"${cost_per_unit:.3f}")
|
| 502 |
+
with col_cost_summary4:
|
| 503 |
+
total_hours = sum(sum(sum(daily_hours) for daily_hours in shifts.values())
|
| 504 |
+
for shifts in results.get('employee_hours', {}).values())
|
| 505 |
+
cost_per_hour = total_cost / total_hours if total_hours > 0 else 0
|
| 506 |
+
st.metric("β° Cost per Hour", f"${cost_per_hour:.2f}")
|
| 507 |
+
|
| 508 |
+
# Cost breakdown by employee type
|
| 509 |
+
employee_hours = results.get('employee_hours', {})
|
| 510 |
+
if employee_hours:
|
| 511 |
+
cost_data = []
|
| 512 |
+
wage_types = optimization_config.COST_LIST_PER_EMP_SHIFT
|
| 513 |
+
|
| 514 |
+
for emp_type, shifts in employee_hours.items():
|
| 515 |
+
emp_total_cost = 0
|
| 516 |
+
for shift, daily_hours in shifts.items():
|
| 517 |
+
total_hours = sum(daily_hours)
|
| 518 |
+
if total_hours > 0 and emp_type in wage_types and shift in wage_types[emp_type]:
|
| 519 |
+
shift_cost = total_hours * wage_types[emp_type][shift]
|
| 520 |
+
emp_total_cost += shift_cost
|
| 521 |
+
cost_data.append({
|
| 522 |
+
'Employee Type': emp_type,
|
| 523 |
+
'Shift': f"Shift {shift}",
|
| 524 |
+
'Hours': total_hours,
|
| 525 |
+
'Rate ($/hr)': wage_types[emp_type][shift],
|
| 526 |
+
'Total Cost ($)': shift_cost,
|
| 527 |
+
'Percentage': (shift_cost / total_cost * 100) if total_cost > 0 else 0
|
| 528 |
+
})
|
| 529 |
+
|
| 530 |
+
if cost_data:
|
| 531 |
+
st.markdown("#### π Detailed Cost Breakdown")
|
| 532 |
+
cost_df = pd.DataFrame(cost_data)
|
| 533 |
+
st.dataframe(cost_df, use_container_width=True)
|
| 534 |
+
|
| 535 |
+
# Cost visualization
|
| 536 |
+
col_cost_chart1, col_cost_chart2 = st.columns(2)
|
| 537 |
+
|
| 538 |
+
with col_cost_chart1:
|
| 539 |
+
# Cost by employee type and shift
|
| 540 |
+
fig_cost = px.bar(
|
| 541 |
+
cost_df,
|
| 542 |
+
x='Employee Type',
|
| 543 |
+
y='Total Cost ($)',
|
| 544 |
+
color='Shift',
|
| 545 |
+
title='Cost Breakdown by Employee Type and Shift',
|
| 546 |
+
barmode='stack'
|
| 547 |
+
)
|
| 548 |
+
st.plotly_chart(fig_cost, use_container_width=True)
|
| 549 |
+
|
| 550 |
+
with col_cost_chart2:
|
| 551 |
+
# Cost distribution pie chart
|
| 552 |
+
emp_costs = cost_df.groupby('Employee Type')['Total Cost ($)'].sum()
|
| 553 |
+
fig_cost_pie = px.pie(
|
| 554 |
+
values=emp_costs.values,
|
| 555 |
+
names=emp_costs.index,
|
| 556 |
+
title='Cost Distribution by Employee Type'
|
| 557 |
+
)
|
| 558 |
+
st.plotly_chart(fig_cost_pie, use_container_width=True)
|
| 559 |
+
|
| 560 |
+
# Priority mode results
|
| 561 |
+
priority_results = results.get('priority_results')
|
| 562 |
+
if priority_results and priority_results.get('summary'):
|
| 563 |
+
st.markdown("#### π― Priority Mode Analysis")
|
| 564 |
+
summary = priority_results['summary']
|
| 565 |
+
|
| 566 |
+
col_priority1, col_priority2 = st.columns(2)
|
| 567 |
+
|
| 568 |
+
with col_priority1:
|
| 569 |
+
if summary['unicef_sufficient']:
|
| 570 |
+
st.success("β
**UNICEF Fixed term staff sufficient**")
|
| 571 |
+
st.info("β Humanizer staff not needed for this demand level")
|
| 572 |
+
else:
|
| 573 |
+
st.warning(f"β οΈ **{summary['total_capacity_flags']} cases where UNICEF at capacity**")
|
| 574 |
+
st.info("β Humanizer staff utilized to meet demand")
|
| 575 |
+
|
| 576 |
+
with col_priority2:
|
| 577 |
+
if summary['unicef_sufficient']:
|
| 578 |
+
st.metric("π― Optimization Efficiency", "100%")
|
| 579 |
+
st.caption("All demand met with preferred staff only")
|
| 580 |
+
else:
|
| 581 |
+
efficiency = (1 - summary['total_capacity_flags'] / len(params.get('product_list', [1]))) * 100
|
| 582 |
+
st.metric("π― Optimization Efficiency", f"{efficiency:.1f}%")
|
| 583 |
+
st.caption("Percentage of demand met with preferred staff")
|
| 584 |
+
|
| 585 |
+
else:
|
| 586 |
+
# Placeholder content when no results
|
| 587 |
+
st.markdown('<div class="optimization-panel">', unsafe_allow_html=True)
|
| 588 |
+
st.markdown("## π― Ready to Optimize")
|
| 589 |
+
st.markdown("""
|
| 590 |
+
Configure your optimization parameters in the sidebar and click **'π Run Optimization'** to get started!
|
| 591 |
+
|
| 592 |
+
### What you'll see after optimization:
|
| 593 |
+
|
| 594 |
+
- **π Summary**: Overall results and key performance metrics
|
| 595 |
+
- **π Production**: Detailed production schedule and fulfillment analysis
|
| 596 |
+
- **π· Labor**: Employee allocation and shift assignments
|
| 597 |
+
- **π° Costs**: Comprehensive cost breakdown and analysis
|
| 598 |
+
|
| 599 |
+
### Tips for better results:
|
| 600 |
+
- Ensure your date range has sufficient demand data
|
| 601 |
+
- Select appropriate employee types for your scenario
|
| 602 |
+
- Consider using 'priority' constraint mode for realistic business operations
|
| 603 |
+
""")
|
| 604 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
| 605 |
+
|
| 606 |
+
# Show current configuration
|
| 607 |
+
if st.session_state.date_range:
|
| 608 |
+
st.markdown("### π Current Configuration")
|
| 609 |
+
col_config1, col_config2 = st.columns(2)
|
| 610 |
+
|
| 611 |
+
with col_config1:
|
| 612 |
+
st.markdown("**Selected Parameters:**")
|
| 613 |
+
st.markdown(f"β’ **Date Range:** {start_date} to {end_date}")
|
| 614 |
+
st.markdown(f"β’ **Employee Types:** {', '.join(selected_emp_types) if selected_emp_types else 'None selected'}")
|
| 615 |
+
st.markdown(f"β’ **Shifts:** {', '.join(map(str, selected_shifts)) if selected_shifts else 'None selected'}")
|
| 616 |
+
|
| 617 |
+
with col_config2:
|
| 618 |
+
st.markdown("**Configuration Status:**")
|
| 619 |
+
status_emp = "β
" if selected_emp_types else "β"
|
| 620 |
+
status_shift = "β
" if selected_shifts else "β"
|
| 621 |
+
status_lines = "β
" if selected_lines else "β"
|
| 622 |
+
|
| 623 |
+
st.markdown(f"β’ **Employee Types:** {status_emp}")
|
| 624 |
+
st.markdown(f"β’ **Shifts:** {status_shift}")
|
| 625 |
+
st.markdown(f"β’ **Production Lines:** {status_lines}")
|
| 626 |
+
|
| 627 |
+
# Footer
|
| 628 |
+
st.markdown("---")
|
| 629 |
+
st.markdown("""
|
| 630 |
+
<div style='text-align: center; color: gray; padding: 1rem;'>
|
| 631 |
+
<small>Optimization Tool | Powered by OR-Tools | Real-time data integration</small>
|
| 632 |
+
</div>
|
| 633 |
+
""", unsafe_allow_html=True)
|
requirements.txt
CHANGED
|
@@ -1,15 +1,17 @@
|
|
| 1 |
-
absl-py
|
| 2 |
-
dotenv
|
| 3 |
-
immutabledict
|
| 4 |
-
numpy
|
| 5 |
-
ortools
|
| 6 |
-
pandas
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
python-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
|
|
|
|
|
|
|
|
| 1 |
+
absl-py>=2.3.1
|
| 2 |
+
dotenv>=0.9.9
|
| 3 |
+
immutabledict>=4.2.1
|
| 4 |
+
numpy>=2.2.0
|
| 5 |
+
ortools>=9.14.0
|
| 6 |
+
pandas>=2.3.0
|
| 7 |
+
plotly>=5.24.0
|
| 8 |
+
protobuf>=3.20,<6
|
| 9 |
+
psycopg2-binary>=2.9.9
|
| 10 |
+
python-dateutil>=2.9.0
|
| 11 |
+
python-dotenv>=1.0.0
|
| 12 |
+
pytz>=2025.2
|
| 13 |
+
six>=1.17.0
|
| 14 |
+
SQLAlchemy>=2.0.36
|
| 15 |
+
streamlit>=1.39.0
|
| 16 |
+
typing_extensions>=4.14.0
|
| 17 |
+
tzdata>=2025.2
|
run_streamlit.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Simple runner script for the SD Roster Optimization Streamlit app.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import subprocess
|
| 7 |
+
import sys
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
def main():
|
| 11 |
+
"""Run the Streamlit app"""
|
| 12 |
+
# Change to the project directory
|
| 13 |
+
project_dir = os.path.dirname(os.path.abspath(__file__))
|
| 14 |
+
os.chdir(project_dir)
|
| 15 |
+
|
| 16 |
+
# Run streamlit
|
| 17 |
+
try:
|
| 18 |
+
subprocess.run([
|
| 19 |
+
sys.executable, "-m", "streamlit", "run", "Home.py",
|
| 20 |
+
"--server.port", "8501",
|
| 21 |
+
"--server.address", "localhost"
|
| 22 |
+
], check=True)
|
| 23 |
+
except subprocess.CalledProcessError as e:
|
| 24 |
+
print(f"Error running Streamlit: {e}")
|
| 25 |
+
return 1
|
| 26 |
+
except KeyboardInterrupt:
|
| 27 |
+
print("\nStreamlit app stopped by user")
|
| 28 |
+
return 0
|
| 29 |
+
|
| 30 |
+
return 0
|
| 31 |
+
|
| 32 |
+
if __name__ == "__main__":
|
| 33 |
+
exit(main())
|
src/config/optimization_config.py
CHANGED
|
@@ -195,6 +195,12 @@ CAP_PER_LINE_PER_HOUR = {
|
|
| 195 |
# number of products that can be produced per hour per line
|
| 196 |
#This information is critical and it should not rely on the productivity information
|
| 197 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
DAILY_WEEKLY_SCHEDULE = "daily" # daily or weekly ,this needs to be implementedin in if F_x1_day is not None... F_x1_week is not None... also need to change x1 to Fixedstaff_first_shift
|
| 199 |
|
| 200 |
# Fixed staff constraint mode
|
|
|
|
| 195 |
# number of products that can be produced per hour per line
|
| 196 |
#This information is critical and it should not rely on the productivity information
|
| 197 |
|
| 198 |
+
MAX_PARALLEL_WORKERS = {
|
| 199 |
+
6: 6, # long line can have max 6 workers simultaneously
|
| 200 |
+
7: 4, # short line can have max 4 workers simultaneously
|
| 201 |
+
}
|
| 202 |
+
# maximum number of workers that can work on a line at the same time
|
| 203 |
+
|
| 204 |
DAILY_WEEKLY_SCHEDULE = "daily" # daily or weekly ,this needs to be implementedin in if F_x1_day is not None... F_x1_week is not None... also need to change x1 to Fixedstaff_first_shift
|
| 205 |
|
| 206 |
# Fixed staff constraint mode
|
src/models/optimizer_real.py
CHANGED
|
@@ -94,10 +94,6 @@ class OptimizerReal:
|
|
| 94 |
# Option 1: Mandatory hours (forces staff to work even when idle)
|
| 95 |
F_x1_day = {t: first_shift_hour * N_day["UNICEF Fixed term"][t] for t in days}
|
| 96 |
print(f"Using MANDATORY fixed hours constraint: {sum(F_x1_day.values())} hours/week")
|
| 97 |
-
elif constraint_mode == "available":
|
| 98 |
-
# Option 2: Available hours constraint (staff can work up to limit but not forced)
|
| 99 |
-
F_x1_day = None
|
| 100 |
-
print("Using AVAILABLE hours constraint (not yet implemented)")
|
| 101 |
elif constraint_mode == "priority":
|
| 102 |
# Option 3: Priority-based (realistic business model)
|
| 103 |
F_x1_day = None
|
|
@@ -111,6 +107,7 @@ class OptimizerReal:
|
|
| 111 |
|
| 112 |
# e.g., F_x1_day = sum(F_x1_day.values()) if you want weekly instead (then set F_x1_day=None)
|
| 113 |
cap_per_line_per_hour = self.config.CAP_PER_LINE_PER_HOUR
|
|
|
|
| 114 |
# Optional skill/compatibility: allow[(e,p,ell)] = 1/0 (1=allowed; 0=forbid)
|
| 115 |
allow = {}
|
| 116 |
for e in employee_types:
|
|
@@ -121,7 +118,7 @@ class OptimizerReal:
|
|
| 121 |
# -----------------------------
|
| 122 |
# 3) SOLVER
|
| 123 |
# -----------------------------
|
| 124 |
-
solver = pywraplp.Solver.CreateSolver("CBC") #
|
| 125 |
if not solver:
|
| 126 |
raise RuntimeError("Failed to create solver. Check OR-Tools installation.")
|
| 127 |
INF = solver.infinity()
|
|
@@ -137,14 +134,15 @@ class OptimizerReal:
|
|
| 137 |
for p in product_list:
|
| 138 |
for ell in line_type_cnt_tuple:
|
| 139 |
for t in days:
|
| 140 |
-
# Upper bound per (e,s,t): shift cap * available headcount that day
|
| 141 |
|
| 142 |
ub = Hmax_shift[s] * N_day[e][t]
|
| 143 |
h[e, s, p, ell, t] = solver.IntVar(
|
| 144 |
0, ub, f"h_{e}_{s}_{p}_{ell[0]}{ell[1]}_d{t}"
|
| 145 |
-
)
|
| 146 |
|
| 147 |
# u[p,ell,s,t] = units of product p produced on line ell during shift s on day t
|
|
|
|
| 148 |
u = {}
|
| 149 |
for p in product_list:
|
| 150 |
for ell in line_type_cnt_tuple:
|
|
@@ -155,6 +153,7 @@ class OptimizerReal:
|
|
| 155 |
)
|
| 156 |
|
| 157 |
# tline[ell,s,t] = operating hours of line ell during shift s on day t
|
|
|
|
| 158 |
tline = {}
|
| 159 |
for ell in line_type_cnt_tuple:
|
| 160 |
for s in shift_list:
|
|
@@ -164,6 +163,7 @@ class OptimizerReal:
|
|
| 164 |
)
|
| 165 |
|
| 166 |
# ybin[e,s,t] = shift usage binaries per type/day (to gate OT after usual)
|
|
|
|
| 167 |
ybin = {}
|
| 168 |
for e in employee_types:
|
| 169 |
for s in shift_list:
|
|
@@ -191,6 +191,7 @@ class OptimizerReal:
|
|
| 191 |
# -----------------------------
|
| 192 |
|
| 193 |
# 6.1 Weekly demand (no daily demand)
|
|
|
|
| 194 |
for p in product_list:
|
| 195 |
demand_value = weekly_demand.get(p, 0)
|
| 196 |
if demand_value > 0: # Only add constraint if there's actual demand
|
|
@@ -198,21 +199,26 @@ class OptimizerReal:
|
|
| 198 |
solver.Sum(u[p, ell, s, t] for ell in line_type_cnt_tuple for s in shift_list for t in days)
|
| 199 |
>= demand_value
|
| 200 |
)
|
|
|
|
| 201 |
|
| 202 |
# 6.2 If a product is inactive on a day, force zero production and hours for that day
|
| 203 |
# This makes "varying products per day" explicit.
|
| 204 |
-
BIG_H = max(Hmax_shift.values()) * sum(N_day[e][t] for e in employee_types for t in days)
|
| 205 |
for p in product_list:
|
| 206 |
for t in days:
|
| 207 |
if active[t][p] == 0:
|
| 208 |
for ell in line_type_cnt_tuple:
|
| 209 |
for s in shift_list:
|
|
|
|
| 210 |
solver.Add(u[p, ell, s, t] == 0)
|
|
|
|
| 211 |
for e in employee_types:
|
| 212 |
solver.Add(h[e, s, p, ell, t] == 0)
|
| 213 |
|
| 214 |
# 6.3 Labor -> units (per line/shift/day)
|
|
|
|
| 215 |
# If productivity depends on line, swap productivities[e][s][p] with q_line[(e,s,p,ell)] here.
|
|
|
|
| 216 |
for p in product_list:
|
| 217 |
for ell in line_type_cnt_tuple:
|
| 218 |
for s in shift_list:
|
|
@@ -224,9 +230,8 @@ class OptimizerReal:
|
|
| 224 |
)
|
| 225 |
|
| 226 |
# 6.4 Per-line throughput cap (units/hour Γ line-hours)
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
print("tline",tline)
|
| 230 |
for ell in line_type_cnt_tuple:
|
| 231 |
for s in shift_list:
|
| 232 |
for t in days:
|
|
@@ -236,22 +241,17 @@ class OptimizerReal:
|
|
| 236 |
<= cap_per_line_per_hour[line_type] * tline[ell, s, t]
|
| 237 |
)
|
| 238 |
|
| 239 |
-
# 6.5 Couple line hours & worker-hours (
|
|
|
|
| 240 |
for ell in line_type_cnt_tuple:
|
|
|
|
|
|
|
| 241 |
for s in shift_list:
|
| 242 |
for t in days:
|
| 243 |
solver.Add(
|
| 244 |
-
|
| 245 |
-
|
| 246 |
)
|
| 247 |
-
# If multi-operator lines (up to Wmax[ell] concurrent workers), replace above with:
|
| 248 |
-
# Wmax = {ell: 2, ...}
|
| 249 |
-
# for ell in line_type_cnt_tuple:
|
| 250 |
-
# for s in shift_list:
|
| 251 |
-
# for t in days:
|
| 252 |
-
# solver.Add(
|
| 253 |
-
# solver.Sum(h[e, s, p, ell, t] for e in employee_types for p in product_list) <= Wmax[ell] * tline[ell, s, t]
|
| 254 |
-
# )
|
| 255 |
|
| 256 |
# 6.6 Fixed regular hours for type Fixed on shift 1
|
| 257 |
if F_x1_day is not None:
|
|
@@ -262,15 +262,7 @@ class OptimizerReal:
|
|
| 262 |
== F_x1_day[t]
|
| 263 |
)
|
| 264 |
print("Applied mandatory fixed hours constraint")
|
| 265 |
-
|
| 266 |
-
# # Per-week fixed hours (mandatory - expensive)
|
| 267 |
-
# solver.Add(
|
| 268 |
-
# solver.Sum(
|
| 269 |
-
# h["UNICEF Fixed term", 1, p, ell, t] for p in product_list for ell in line_type_cnt_tuple for t in days
|
| 270 |
-
# )
|
| 271 |
-
# == F_x1_week
|
| 272 |
-
# )
|
| 273 |
-
# print("Applied mandatory weekly fixed hours constraint")
|
| 274 |
else:
|
| 275 |
# No fixed constraint - purely demand-driven (cost-efficient)
|
| 276 |
print("No mandatory fixed hours constraint - using demand-driven scheduling")
|
|
@@ -281,6 +273,9 @@ class OptimizerReal:
|
|
| 281 |
print("Implementing priority constraints: UNICEF Fixed term used before Humanizer")
|
| 282 |
# Add constraints to prioritize fixed staff usage before temporary staff
|
| 283 |
|
|
|
|
|
|
|
|
|
|
| 284 |
# Priority constraint: For each day, product, and line,
|
| 285 |
# Humanizer hours can only be used if UNICEF Fixed term is at capacity
|
| 286 |
for t in days:
|
|
@@ -288,22 +283,43 @@ class OptimizerReal:
|
|
| 288 |
for ell in line_type_cnt_tuple:
|
| 289 |
# Create binary variable to indicate if UNICEF Fixed term is at capacity
|
| 290 |
unicef_at_capacity = solver.IntVar(0, 1, f"unicef_at_capacity_{p}_{ell[0]}{ell[1]}_d{t}")
|
|
|
|
| 291 |
|
| 292 |
-
#
|
| 293 |
-
|
| 294 |
|
| 295 |
# If UNICEF is not at capacity (unicef_at_capacity = 0), then Humanizer hours must be 0
|
| 296 |
-
# If UNICEF is at capacity (unicef_at_capacity = 1), then Humanizer can work
|
| 297 |
solver.Add(
|
| 298 |
solver.Sum(h["Humanizer", s, p, ell, t] for s in shift_list)
|
| 299 |
-
<= unicef_at_capacity *
|
| 300 |
)
|
| 301 |
|
| 302 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 303 |
solver.Add(
|
| 304 |
solver.Sum(h["UNICEF Fixed term", s, p, ell, t] for s in shift_list)
|
| 305 |
-
>= unicef_at_capacity *
|
| 306 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 307 |
|
| 308 |
# 6.7 Daily hours cap per employee type (14h per person per day)
|
| 309 |
for e in employee_types:
|
|
@@ -327,6 +343,7 @@ class OptimizerReal:
|
|
| 327 |
)
|
| 328 |
|
| 329 |
# 6.9 Overtime only after usual (per day). Also bound OT hours <= usual hours
|
|
|
|
| 330 |
for e in employee_types:
|
| 331 |
for t in days:
|
| 332 |
solver.Add(ybin[e, 2, t] <= ybin[e, 1, t])
|
|
@@ -354,58 +371,128 @@ class OptimizerReal:
|
|
| 354 |
status = solver.Solve()
|
| 355 |
if status != pywraplp.Solver.OPTIMAL:
|
| 356 |
print("No optimal solution. Status:", status)
|
| 357 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
| 358 |
|
| 359 |
# -----------------------------
|
| 360 |
# 8) REPORT
|
| 361 |
# -----------------------------
|
| 362 |
-
|
|
|
|
| 363 |
|
|
|
|
|
|
|
| 364 |
print("\n--- Weekly production by product ---")
|
| 365 |
for p in product_list:
|
| 366 |
produced = sum(
|
| 367 |
u[p, ell, s, t].solution_value() for ell in line_type_cnt_tuple for s in shift_list for t in days
|
| 368 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 369 |
print(f"{p}: {produced:.1f} (weekly demand {weekly_demand.get(p,0)})")
|
| 370 |
|
|
|
|
|
|
|
| 371 |
print("\n--- Line operating hours by shift/day ---")
|
| 372 |
for ell in line_type_cnt_tuple:
|
|
|
|
| 373 |
for s in shift_list:
|
| 374 |
hours = [tline[ell, s, t].solution_value() for t in days]
|
|
|
|
| 375 |
if sum(hours) > 1e-6:
|
| 376 |
print(
|
| 377 |
f"Line {ell} Shift {s}: "
|
| 378 |
+ ", ".join([f"days{t}={hours[t-1]:.2f}h" for t in days])
|
| 379 |
)
|
| 380 |
|
|
|
|
|
|
|
| 381 |
print("\n--- Hours by employee type / shift / day ---")
|
| 382 |
for e in employee_types:
|
|
|
|
| 383 |
for s in shift_list:
|
| 384 |
day_hours = [
|
| 385 |
sum(h[e, s, p, ell, t].solution_value() for p in product_list for ell in line_type_cnt_tuple)
|
| 386 |
for t in days
|
| 387 |
]
|
|
|
|
| 388 |
if sum(day_hours) > 1e-6:
|
| 389 |
print(
|
| 390 |
f"e={e}, s={s}: "
|
| 391 |
+ ", ".join([f"days{t}={day_hours[t-1]:.2f}h" for t in days])
|
| 392 |
)
|
| 393 |
|
|
|
|
|
|
|
| 394 |
print("\n--- Implied headcount by type / shift / day ---")
|
| 395 |
for e in employee_types:
|
|
|
|
| 396 |
print(e)
|
| 397 |
for s in shift_list:
|
| 398 |
row = []
|
|
|
|
| 399 |
for t in days:
|
| 400 |
hours = sum(
|
| 401 |
h[e, s, p, ell, t].solution_value() for p in product_list for ell in line_type_cnt_tuple
|
| 402 |
)
|
| 403 |
need = int((hours + Hmax_shift[s] - 1) // Hmax_shift[s]) # ceil
|
|
|
|
| 404 |
row.append(f"days{t}={need}")
|
| 405 |
|
|
|
|
| 406 |
if any("=0" not in Fixed for Fixed in row):
|
| 407 |
print(f"e={e}, s={s}: " + ", ".join(row))
|
| 408 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 409 |
|
| 410 |
if __name__ == "__main__":
|
| 411 |
optimizer = OptimizerReal()
|
|
|
|
| 94 |
# Option 1: Mandatory hours (forces staff to work even when idle)
|
| 95 |
F_x1_day = {t: first_shift_hour * N_day["UNICEF Fixed term"][t] for t in days}
|
| 96 |
print(f"Using MANDATORY fixed hours constraint: {sum(F_x1_day.values())} hours/week")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
elif constraint_mode == "priority":
|
| 98 |
# Option 3: Priority-based (realistic business model)
|
| 99 |
F_x1_day = None
|
|
|
|
| 107 |
|
| 108 |
# e.g., F_x1_day = sum(F_x1_day.values()) if you want weekly instead (then set F_x1_day=None)
|
| 109 |
cap_per_line_per_hour = self.config.CAP_PER_LINE_PER_HOUR
|
| 110 |
+
|
| 111 |
# Optional skill/compatibility: allow[(e,p,ell)] = 1/0 (1=allowed; 0=forbid)
|
| 112 |
allow = {}
|
| 113 |
for e in employee_types:
|
|
|
|
| 118 |
# -----------------------------
|
| 119 |
# 3) SOLVER
|
| 120 |
# -----------------------------
|
| 121 |
+
solver = pywraplp.Solver.CreateSolver("CBC") # open-source mixed-integer program (MIP) solver
|
| 122 |
if not solver:
|
| 123 |
raise RuntimeError("Failed to create solver. Check OR-Tools installation.")
|
| 124 |
INF = solver.infinity()
|
|
|
|
| 134 |
for p in product_list:
|
| 135 |
for ell in line_type_cnt_tuple:
|
| 136 |
for t in days:
|
| 137 |
+
# Upper bound of labor hour per (e,s,t): shift hour cap * available headcount that day
|
| 138 |
|
| 139 |
ub = Hmax_shift[s] * N_day[e][t]
|
| 140 |
h[e, s, p, ell, t] = solver.IntVar(
|
| 141 |
0, ub, f"h_{e}_{s}_{p}_{ell[0]}{ell[1]}_d{t}"
|
| 142 |
+
)# h = work hour per (employee type,shift,t-day) is decided somewhere between 0 and ub and is an integer
|
| 143 |
|
| 144 |
# u[p,ell,s,t] = units of product p produced on line ell during shift s on day t
|
| 145 |
+
#Maybe we need upper bound here
|
| 146 |
u = {}
|
| 147 |
for p in product_list:
|
| 148 |
for ell in line_type_cnt_tuple:
|
|
|
|
| 153 |
)
|
| 154 |
|
| 155 |
# tline[ell,s,t] = operating hours of line ell during shift s on day t
|
| 156 |
+
# tline = line operating hour per (line,shift,t-day) is decided somewhere between 0 and Hmax_shift[s] and is a real number
|
| 157 |
tline = {}
|
| 158 |
for ell in line_type_cnt_tuple:
|
| 159 |
for s in shift_list:
|
|
|
|
| 163 |
)
|
| 164 |
|
| 165 |
# ybin[e,s,t] = shift usage binaries per type/day (to gate OT after usual)
|
| 166 |
+
# ybin = shift usage binary per (employee type,shift,t-day) is decided somewhere between 0 and 1 and is a binary number
|
| 167 |
ybin = {}
|
| 168 |
for e in employee_types:
|
| 169 |
for s in shift_list:
|
|
|
|
| 191 |
# -----------------------------
|
| 192 |
|
| 193 |
# 6.1 Weekly demand (no daily demand)
|
| 194 |
+
#unit of production for p over the week should be larger or equal to demand value
|
| 195 |
for p in product_list:
|
| 196 |
demand_value = weekly_demand.get(p, 0)
|
| 197 |
if demand_value > 0: # Only add constraint if there's actual demand
|
|
|
|
| 199 |
solver.Sum(u[p, ell, s, t] for ell in line_type_cnt_tuple for s in shift_list for t in days)
|
| 200 |
>= demand_value
|
| 201 |
)
|
| 202 |
+
|
| 203 |
|
| 204 |
# 6.2 If a product is inactive on a day, force zero production and hours for that day
|
| 205 |
# This makes "varying products per day" explicit.
|
| 206 |
+
# BIG_H = max(Hmax_shift.values()) * sum(N_day[e][t] for e in employee_types for t in days)
|
| 207 |
for p in product_list:
|
| 208 |
for t in days:
|
| 209 |
if active[t][p] == 0:
|
| 210 |
for ell in line_type_cnt_tuple:
|
| 211 |
for s in shift_list:
|
| 212 |
+
#If if not active, production unit is 0
|
| 213 |
solver.Add(u[p, ell, s, t] == 0)
|
| 214 |
+
#If if not active, work hour is 0
|
| 215 |
for e in employee_types:
|
| 216 |
solver.Add(h[e, s, p, ell, t] == 0)
|
| 217 |
|
| 218 |
# 6.3 Labor -> units (per line/shift/day)
|
| 219 |
+
#Total production unit based on labor productivity cap
|
| 220 |
# If productivity depends on line, swap productivities[e][s][p] with q_line[(e,s,p,ell)] here.
|
| 221 |
+
|
| 222 |
for p in product_list:
|
| 223 |
for ell in line_type_cnt_tuple:
|
| 224 |
for s in shift_list:
|
|
|
|
| 230 |
)
|
| 231 |
|
| 232 |
# 6.4 Per-line throughput cap (units/hour Γ line-hours)
|
| 233 |
+
#per line production cap for each line
|
| 234 |
+
#tline = line operating hour per (line,shift,t-day)
|
|
|
|
| 235 |
for ell in line_type_cnt_tuple:
|
| 236 |
for s in shift_list:
|
| 237 |
for t in days:
|
|
|
|
| 241 |
<= cap_per_line_per_hour[line_type] * tline[ell, s, t]
|
| 242 |
)
|
| 243 |
|
| 244 |
+
# 6.5 Couple line hours & worker-hours (multi-operator lines)
|
| 245 |
+
# Multiple workers can work on a line simultaneously, up to MAX_PARALLEL_WORKERS limit
|
| 246 |
for ell in line_type_cnt_tuple:
|
| 247 |
+
line_type = ell[0] # 6 or 7
|
| 248 |
+
max_workers = self.config.MAX_PARALLEL_WORKERS[line_type]
|
| 249 |
for s in shift_list:
|
| 250 |
for t in days:
|
| 251 |
solver.Add(
|
| 252 |
+
solver.Sum(h[e, s, p, ell, t] for e in employee_types for p in product_list)
|
| 253 |
+
<= max_workers * tline[ell, s, t]
|
| 254 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
|
| 256 |
# 6.6 Fixed regular hours for type Fixed on shift 1
|
| 257 |
if F_x1_day is not None:
|
|
|
|
| 262 |
== F_x1_day[t]
|
| 263 |
)
|
| 264 |
print("Applied mandatory fixed hours constraint")
|
| 265 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
else:
|
| 267 |
# No fixed constraint - purely demand-driven (cost-efficient)
|
| 268 |
print("No mandatory fixed hours constraint - using demand-driven scheduling")
|
|
|
|
| 273 |
print("Implementing priority constraints: UNICEF Fixed term used before Humanizer")
|
| 274 |
# Add constraints to prioritize fixed staff usage before temporary staff
|
| 275 |
|
| 276 |
+
# Store unicef_at_capacity variables for later inspection
|
| 277 |
+
unicef_capacity_vars = {}
|
| 278 |
+
|
| 279 |
# Priority constraint: For each day, product, and line,
|
| 280 |
# Humanizer hours can only be used if UNICEF Fixed term is at capacity
|
| 281 |
for t in days:
|
|
|
|
| 283 |
for ell in line_type_cnt_tuple:
|
| 284 |
# Create binary variable to indicate if UNICEF Fixed term is at capacity
|
| 285 |
unicef_at_capacity = solver.IntVar(0, 1, f"unicef_at_capacity_{p}_{ell[0]}{ell[1]}_d{t}")
|
| 286 |
+
unicef_capacity_vars[p, ell, t] = unicef_at_capacity # Store for later
|
| 287 |
|
| 288 |
+
# Calculate maximum possible hours for Humanizer staff this day
|
| 289 |
+
max_humanizer_hours = sum(Hmax_shift[s] * N_day["Humanizer"][t] for s in shift_list)
|
| 290 |
|
| 291 |
# If UNICEF is not at capacity (unicef_at_capacity = 0), then Humanizer hours must be 0
|
| 292 |
+
# If UNICEF is at capacity (unicef_at_capacity = 1), then Humanizer can work up to their limit
|
| 293 |
solver.Add(
|
| 294 |
solver.Sum(h["Humanizer", s, p, ell, t] for s in shift_list)
|
| 295 |
+
<= unicef_at_capacity * max_humanizer_hours # Correct M value
|
| 296 |
)
|
| 297 |
|
| 298 |
+
# Calculate maximum possible hours for UNICEF Fixed term staff this day
|
| 299 |
+
max_unicef_hours = sum(Hmax_shift[s] * N_day["UNICEF Fixed term"][t] for s in shift_list)
|
| 300 |
+
|
| 301 |
+
# Simple logic: unicef_at_capacity = 1 if and only if UNICEF uses ALL available hours
|
| 302 |
+
# This ensures Humanizer is only used when UNICEF is completely maxed out
|
| 303 |
solver.Add(
|
| 304 |
solver.Sum(h["UNICEF Fixed term", s, p, ell, t] for s in shift_list)
|
| 305 |
+
>= unicef_at_capacity * max_unicef_hours # If capacity=1, UNICEF must use max hours
|
| 306 |
)
|
| 307 |
+
|
| 308 |
+
if max_unicef_hours > 0:
|
| 309 |
+
# Upper-bound link with small epsilon so it works with 0.1-hour granularity
|
| 310 |
+
eps = 0.1 # smallest time unit (hours)
|
| 311 |
+
|
| 312 |
+
# If capacity = 0 β UNICEF β€ max_unicef_hours - eps
|
| 313 |
+
# If capacity = 1 β UNICEF β€ max_unicef_hours (tight)
|
| 314 |
+
solver.Add(
|
| 315 |
+
solver.Sum(
|
| 316 |
+
h["UNICEF Fixed term", s, p, ell, t] for s in shift_list
|
| 317 |
+
)
|
| 318 |
+
<= max_unicef_hours - eps + unicef_at_capacity * eps
|
| 319 |
+
)
|
| 320 |
+
else:
|
| 321 |
+
# No UNICEF staff that day β capacity flag must be 0
|
| 322 |
+
solver.Add(unicef_at_capacity == 0)
|
| 323 |
|
| 324 |
# 6.7 Daily hours cap per employee type (14h per person per day)
|
| 325 |
for e in employee_types:
|
|
|
|
| 343 |
)
|
| 344 |
|
| 345 |
# 6.9 Overtime only after usual (per day). Also bound OT hours <= usual hours
|
| 346 |
+
# Binary activation variable for employee type, shift and day
|
| 347 |
for e in employee_types:
|
| 348 |
for t in days:
|
| 349 |
solver.Add(ybin[e, 2, t] <= ybin[e, 1, t])
|
|
|
|
| 371 |
status = solver.Solve()
|
| 372 |
if status != pywraplp.Solver.OPTIMAL:
|
| 373 |
print("No optimal solution. Status:", status)
|
| 374 |
+
return {
|
| 375 |
+
'status': 'failed',
|
| 376 |
+
'solver_status': status,
|
| 377 |
+
'message': f"No optimal solution found. Solver status: {status}"
|
| 378 |
+
}
|
| 379 |
|
| 380 |
# -----------------------------
|
| 381 |
# 8) REPORT
|
| 382 |
# -----------------------------
|
| 383 |
+
total_cost = solver.Objective().Value()
|
| 384 |
+
print("Objective (min cost):", total_cost)
|
| 385 |
|
| 386 |
+
# Collect production results
|
| 387 |
+
production_results = {}
|
| 388 |
print("\n--- Weekly production by product ---")
|
| 389 |
for p in product_list:
|
| 390 |
produced = sum(
|
| 391 |
u[p, ell, s, t].solution_value() for ell in line_type_cnt_tuple for s in shift_list for t in days
|
| 392 |
)
|
| 393 |
+
production_results[p] = {
|
| 394 |
+
'produced': produced,
|
| 395 |
+
'demand': weekly_demand.get(p, 0),
|
| 396 |
+
'fulfillment_rate': (produced / weekly_demand.get(p, 1)) * 100 if weekly_demand.get(p, 0) > 0 else 0
|
| 397 |
+
}
|
| 398 |
print(f"{p}: {produced:.1f} (weekly demand {weekly_demand.get(p,0)})")
|
| 399 |
|
| 400 |
+
# Collect line operating hours
|
| 401 |
+
line_hours = {}
|
| 402 |
print("\n--- Line operating hours by shift/day ---")
|
| 403 |
for ell in line_type_cnt_tuple:
|
| 404 |
+
line_hours[ell] = {}
|
| 405 |
for s in shift_list:
|
| 406 |
hours = [tline[ell, s, t].solution_value() for t in days]
|
| 407 |
+
line_hours[ell][s] = hours
|
| 408 |
if sum(hours) > 1e-6:
|
| 409 |
print(
|
| 410 |
f"Line {ell} Shift {s}: "
|
| 411 |
+ ", ".join([f"days{t}={hours[t-1]:.2f}h" for t in days])
|
| 412 |
)
|
| 413 |
|
| 414 |
+
# Collect employee hours
|
| 415 |
+
employee_hours = {}
|
| 416 |
print("\n--- Hours by employee type / shift / day ---")
|
| 417 |
for e in employee_types:
|
| 418 |
+
employee_hours[e] = {}
|
| 419 |
for s in shift_list:
|
| 420 |
day_hours = [
|
| 421 |
sum(h[e, s, p, ell, t].solution_value() for p in product_list for ell in line_type_cnt_tuple)
|
| 422 |
for t in days
|
| 423 |
]
|
| 424 |
+
employee_hours[e][s] = day_hours
|
| 425 |
if sum(day_hours) > 1e-6:
|
| 426 |
print(
|
| 427 |
f"e={e}, s={s}: "
|
| 428 |
+ ", ".join([f"days{t}={day_hours[t-1]:.2f}h" for t in days])
|
| 429 |
)
|
| 430 |
|
| 431 |
+
# Collect headcount requirements
|
| 432 |
+
headcount_requirements = {}
|
| 433 |
print("\n--- Implied headcount by type / shift / day ---")
|
| 434 |
for e in employee_types:
|
| 435 |
+
headcount_requirements[e] = {}
|
| 436 |
print(e)
|
| 437 |
for s in shift_list:
|
| 438 |
row = []
|
| 439 |
+
daily_headcount = []
|
| 440 |
for t in days:
|
| 441 |
hours = sum(
|
| 442 |
h[e, s, p, ell, t].solution_value() for p in product_list for ell in line_type_cnt_tuple
|
| 443 |
)
|
| 444 |
need = int((hours + Hmax_shift[s] - 1) // Hmax_shift[s]) # ceil
|
| 445 |
+
daily_headcount.append(need)
|
| 446 |
row.append(f"days{t}={need}")
|
| 447 |
|
| 448 |
+
headcount_requirements[e][s] = daily_headcount
|
| 449 |
if any("=0" not in Fixed for Fixed in row):
|
| 450 |
print(f"e={e}, s={s}: " + ", ".join(row))
|
| 451 |
|
| 452 |
+
# Collect priority mode results
|
| 453 |
+
priority_results = None
|
| 454 |
+
if constraint_mode == "priority" and 'unicef_capacity_vars' in locals():
|
| 455 |
+
priority_results = {}
|
| 456 |
+
print("\n--- UNICEF At Capacity Status (Priority Mode) ---")
|
| 457 |
+
for (p, ell, t), var in unicef_capacity_vars.items():
|
| 458 |
+
capacity_value = var.solution_value()
|
| 459 |
+
if capacity_value > 0.5: # Binary variable, so > 0.5 means 1
|
| 460 |
+
priority_results[(p, ell, t)] = capacity_value
|
| 461 |
+
print(f"Product {p}, Line {ell}, Day {t}: UNICEF at capacity = {capacity_value:.0f}")
|
| 462 |
+
|
| 463 |
+
# Summary
|
| 464 |
+
total_capacity_flags = sum(1 for var in unicef_capacity_vars.values() if var.solution_value() > 0.5)
|
| 465 |
+
if total_capacity_flags == 0:
|
| 466 |
+
print("β
All unicef_at_capacity = 0 β UNICEF Fixed term staff sufficient for all demand")
|
| 467 |
+
print(" β Humanizer staff not needed")
|
| 468 |
+
else:
|
| 469 |
+
print(f"β οΈ {total_capacity_flags} cases where UNICEF at capacity β Humanizer staff used")
|
| 470 |
+
|
| 471 |
+
priority_results['summary'] = {
|
| 472 |
+
'total_capacity_flags': total_capacity_flags,
|
| 473 |
+
'unicef_sufficient': total_capacity_flags == 0
|
| 474 |
+
}
|
| 475 |
+
|
| 476 |
+
# Return structured results
|
| 477 |
+
return {
|
| 478 |
+
'status': 'optimal',
|
| 479 |
+
'total_cost': total_cost,
|
| 480 |
+
'production_results': production_results,
|
| 481 |
+
'line_hours': line_hours,
|
| 482 |
+
'employee_hours': employee_hours,
|
| 483 |
+
'headcount_requirements': headcount_requirements,
|
| 484 |
+
'priority_results': priority_results,
|
| 485 |
+
'parameters': {
|
| 486 |
+
'days': days,
|
| 487 |
+
'product_list': product_list,
|
| 488 |
+
'employee_types': employee_types,
|
| 489 |
+
'shift_list': shift_list,
|
| 490 |
+
'line_list': line_list,
|
| 491 |
+
'constraint_mode': constraint_mode,
|
| 492 |
+
'total_demand': sum(weekly_demand.get(p, 0) for p in product_list)
|
| 493 |
+
}
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
|
| 497 |
if __name__ == "__main__":
|
| 498 |
optimizer = OptimizerReal()
|
streamlit_app.py
ADDED
|
@@ -0,0 +1,517 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
|
| 3 |
+
# Page configuration - MUST be first Streamlit command
|
| 4 |
+
st.set_page_config(
|
| 5 |
+
page_title="SD Roster Optimization Tool",
|
| 6 |
+
page_icon="π",
|
| 7 |
+
layout="wide",
|
| 8 |
+
initial_sidebar_state="expanded"
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
# Now import everything else
|
| 12 |
+
import pandas as pd
|
| 13 |
+
import plotly.express as px
|
| 14 |
+
import plotly.graph_objects as go
|
| 15 |
+
from plotly.subplots import make_subplots
|
| 16 |
+
import sys
|
| 17 |
+
import os
|
| 18 |
+
from datetime import datetime, timedelta
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
# Add src to path for imports
|
| 22 |
+
sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
|
| 23 |
+
|
| 24 |
+
from src.models.optimizer_real import OptimizerReal
|
| 25 |
+
from src.config import optimization_config
|
| 26 |
+
import src.etl.extract as extract
|
| 27 |
+
import src.etl.transform as transform
|
| 28 |
+
|
| 29 |
+
# Custom CSS for better styling
|
| 30 |
+
st.markdown("""
|
| 31 |
+
<style>
|
| 32 |
+
.main-header {
|
| 33 |
+
font-size: 2.5rem;
|
| 34 |
+
font-weight: bold;
|
| 35 |
+
color: #1f77b4;
|
| 36 |
+
margin-bottom: 1rem;
|
| 37 |
+
}
|
| 38 |
+
.section-header {
|
| 39 |
+
font-size: 1.5rem;
|
| 40 |
+
font-weight: bold;
|
| 41 |
+
color: #2c3e50;
|
| 42 |
+
margin: 1rem 0;
|
| 43 |
+
}
|
| 44 |
+
.metric-card {
|
| 45 |
+
background-color: #f8f9fa;
|
| 46 |
+
padding: 1rem;
|
| 47 |
+
border-radius: 0.5rem;
|
| 48 |
+
border-left: 4px solid #1f77b4;
|
| 49 |
+
margin-bottom: 1rem;
|
| 50 |
+
}
|
| 51 |
+
.stTabs [data-baseweb="tab-list"] {
|
| 52 |
+
gap: 2rem;
|
| 53 |
+
}
|
| 54 |
+
</style>
|
| 55 |
+
""", unsafe_allow_html=True)
|
| 56 |
+
|
| 57 |
+
# Initialize session state
|
| 58 |
+
if 'optimization_results' not in st.session_state:
|
| 59 |
+
st.session_state.optimization_results = None
|
| 60 |
+
if 'optimizer' not in st.session_state:
|
| 61 |
+
st.session_state.optimizer = None
|
| 62 |
+
if 'date_range' not in st.session_state:
|
| 63 |
+
st.session_state.date_range = None
|
| 64 |
+
|
| 65 |
+
# Title
|
| 66 |
+
st.markdown('<h1 class="main-header">π SD Roster Optimization Tool</h1>', unsafe_allow_html=True)
|
| 67 |
+
|
| 68 |
+
# Create layout: Left sidebar + Main content
|
| 69 |
+
with st.sidebar:
|
| 70 |
+
st.markdown("## ποΈ Control Panel")
|
| 71 |
+
|
| 72 |
+
# Date Selection Section
|
| 73 |
+
st.markdown("### π
Date Range Selection")
|
| 74 |
+
try:
|
| 75 |
+
# Get available date ranges from the data
|
| 76 |
+
date_ranges = transform.get_date_ranges()
|
| 77 |
+
if date_ranges:
|
| 78 |
+
date_range_options = [f"{start.strftime('%Y-%m-%d')} to {end.strftime('%Y-%m-%d')}" for start, end in date_ranges]
|
| 79 |
+
selected_range_str = st.selectbox(
|
| 80 |
+
"Select date range:",
|
| 81 |
+
options=date_range_options,
|
| 82 |
+
help="Available date ranges from released orders"
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
# Extract selected dates
|
| 86 |
+
selected_index = date_range_options.index(selected_range_str)
|
| 87 |
+
start_date, end_date = date_ranges[selected_index]
|
| 88 |
+
st.session_state.date_range = (start_date, end_date)
|
| 89 |
+
|
| 90 |
+
# Display duration
|
| 91 |
+
duration = (end_date - start_date).days + 1
|
| 92 |
+
st.info(f"Duration: {duration} days")
|
| 93 |
+
|
| 94 |
+
else:
|
| 95 |
+
st.warning("No date ranges found in data")
|
| 96 |
+
start_date = datetime(2025, 3, 24).date()
|
| 97 |
+
end_date = datetime(2025, 3, 28).date()
|
| 98 |
+
st.session_state.date_range = (start_date, end_date)
|
| 99 |
+
|
| 100 |
+
except Exception as e:
|
| 101 |
+
st.error(f"Error loading dates: {e}")
|
| 102 |
+
start_date = datetime(2025, 3, 24).date()
|
| 103 |
+
end_date = datetime(2025, 3, 28).date()
|
| 104 |
+
st.session_state.date_range = (start_date, end_date)
|
| 105 |
+
|
| 106 |
+
st.markdown("---")
|
| 107 |
+
|
| 108 |
+
# Optimization Parameters Section
|
| 109 |
+
st.markdown("### βοΈ Optimization Parameters")
|
| 110 |
+
|
| 111 |
+
# Employee Type Selection
|
| 112 |
+
try:
|
| 113 |
+
employee_df = extract.read_employee_data()
|
| 114 |
+
available_emp_types = employee_df["employment_type"].unique().tolist()
|
| 115 |
+
except:
|
| 116 |
+
available_emp_types = ["UNICEF Fixed term", "Humanizer"]
|
| 117 |
+
|
| 118 |
+
selected_emp_types = st.multiselect(
|
| 119 |
+
"Employee Types:",
|
| 120 |
+
available_emp_types,
|
| 121 |
+
default=available_emp_types,
|
| 122 |
+
help="Select employee types to include in optimization"
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Shift Selection
|
| 126 |
+
try:
|
| 127 |
+
shift_df = extract.get_shift_info()
|
| 128 |
+
available_shifts = shift_df["id"].unique().tolist()
|
| 129 |
+
except:
|
| 130 |
+
available_shifts = [1, 2, 3]
|
| 131 |
+
|
| 132 |
+
selected_shifts = st.multiselect(
|
| 133 |
+
"Shifts:",
|
| 134 |
+
available_shifts,
|
| 135 |
+
default=available_shifts,
|
| 136 |
+
help="1=Regular, 2=Overtime, 3=Evening"
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
# Line Selection
|
| 140 |
+
try:
|
| 141 |
+
line_df = extract.read_packaging_line_data()
|
| 142 |
+
available_lines = line_df["id"].unique().tolist()
|
| 143 |
+
except:
|
| 144 |
+
available_lines = [6, 7]
|
| 145 |
+
|
| 146 |
+
selected_lines = st.multiselect(
|
| 147 |
+
"Production Lines:",
|
| 148 |
+
available_lines,
|
| 149 |
+
default=available_lines,
|
| 150 |
+
help="Select production lines to include"
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
# Advanced Parameters
|
| 154 |
+
with st.expander("π§ Advanced Parameters"):
|
| 155 |
+
constraint_mode = st.selectbox(
|
| 156 |
+
"Fixed Staff Constraint Mode:",
|
| 157 |
+
["priority", "mandatory", "none"],
|
| 158 |
+
index=0,
|
| 159 |
+
help="priority=Use fixed staff first, mandatory=Force all fixed hours, none=Demand-driven"
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
max_hours_per_person = st.number_input(
|
| 163 |
+
"Max hours per person per day:",
|
| 164 |
+
min_value=8,
|
| 165 |
+
max_value=24,
|
| 166 |
+
value=14,
|
| 167 |
+
help="Legal daily limit"
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# Employee availability override
|
| 171 |
+
st.markdown("**Employee Availability Override:**")
|
| 172 |
+
col1, col2 = st.columns(2)
|
| 173 |
+
with col1:
|
| 174 |
+
unicef_count = st.number_input("UNICEF Fixed term:", min_value=0, value=8)
|
| 175 |
+
with col2:
|
| 176 |
+
humanizer_count = st.number_input("Humanizer:", min_value=0, value=6)
|
| 177 |
+
|
| 178 |
+
st.markdown("---")
|
| 179 |
+
|
| 180 |
+
# Run Optimization Button
|
| 181 |
+
run_optimization = st.button("π Run Optimization", type="primary", use_container_width=True)
|
| 182 |
+
|
| 183 |
+
# Main content area
|
| 184 |
+
col1, col2 = st.columns([1, 2])
|
| 185 |
+
|
| 186 |
+
# Left column - Metadata
|
| 187 |
+
with col1:
|
| 188 |
+
st.markdown('<h2 class="section-header">π Metadata & Overview</h2>', unsafe_allow_html=True)
|
| 189 |
+
|
| 190 |
+
# Show current date range
|
| 191 |
+
if st.session_state.date_range:
|
| 192 |
+
start_date, end_date = st.session_state.date_range
|
| 193 |
+
st.markdown(f"**Selected Period:** {start_date} to {end_date}")
|
| 194 |
+
|
| 195 |
+
# Demand Information
|
| 196 |
+
with st.expander("π Demand Overview", expanded=True):
|
| 197 |
+
try:
|
| 198 |
+
if st.session_state.date_range:
|
| 199 |
+
start_date, end_date = st.session_state.date_range
|
| 200 |
+
demand_df = extract.read_released_orders_data(start_date=start_date, end_date=end_date)
|
| 201 |
+
|
| 202 |
+
# Total demand metrics
|
| 203 |
+
total_orders = len(demand_df)
|
| 204 |
+
total_quantity = demand_df["Order quantity (GMEIN)"].sum()
|
| 205 |
+
unique_products = demand_df["Material Number"].nunique()
|
| 206 |
+
|
| 207 |
+
col_d1, col_d2, col_d3 = st.columns(3)
|
| 208 |
+
with col_d1:
|
| 209 |
+
st.metric("Total Orders", total_orders)
|
| 210 |
+
with col_d2:
|
| 211 |
+
st.metric("Total Quantity", f"{total_quantity:,.0f}")
|
| 212 |
+
with col_d3:
|
| 213 |
+
st.metric("Unique Products", unique_products)
|
| 214 |
+
|
| 215 |
+
# Top products by demand
|
| 216 |
+
top_products = demand_df.groupby('Material Number')["Order quantity (GMEIN)"].sum().sort_values(ascending=False).head(5)
|
| 217 |
+
|
| 218 |
+
if not top_products.empty:
|
| 219 |
+
st.markdown("**Top 5 Products by Demand:**")
|
| 220 |
+
for product, quantity in top_products.items():
|
| 221 |
+
st.markdown(f"β’ {product}: {quantity:,.0f}")
|
| 222 |
+
|
| 223 |
+
except Exception as e:
|
| 224 |
+
st.error(f"Error loading demand data: {e}")
|
| 225 |
+
|
| 226 |
+
# Employee Information
|
| 227 |
+
with st.expander("π₯ Employee Overview", expanded=True):
|
| 228 |
+
try:
|
| 229 |
+
employee_df = extract.read_employee_data()
|
| 230 |
+
|
| 231 |
+
# Employee metrics
|
| 232 |
+
total_employees = len(employee_df)
|
| 233 |
+
emp_by_type = employee_df.groupby("employment_type").size()
|
| 234 |
+
|
| 235 |
+
st.metric("Total Employees", total_employees)
|
| 236 |
+
|
| 237 |
+
st.markdown("**By Employment Type:**")
|
| 238 |
+
for emp_type, count in emp_by_type.items():
|
| 239 |
+
st.markdown(f"β’ {emp_type}: {count}")
|
| 240 |
+
|
| 241 |
+
except Exception as e:
|
| 242 |
+
st.error(f"Error loading employee data: {e}")
|
| 243 |
+
|
| 244 |
+
# Production Lines
|
| 245 |
+
with st.expander("π Production Lines", expanded=True):
|
| 246 |
+
try:
|
| 247 |
+
line_df = extract.read_packaging_line_data()
|
| 248 |
+
|
| 249 |
+
st.markdown("**Available Lines:**")
|
| 250 |
+
for _, row in line_df.iterrows():
|
| 251 |
+
st.markdown(f"β’ Line {row['id']}: {row['line_count']} units")
|
| 252 |
+
|
| 253 |
+
except Exception as e:
|
| 254 |
+
st.error(f"Error loading line data: {e}")
|
| 255 |
+
|
| 256 |
+
# Right column - Optimization Results
|
| 257 |
+
with col2:
|
| 258 |
+
st.markdown('<h2 class="section-header">π― Optimization Results</h2>', unsafe_allow_html=True)
|
| 259 |
+
|
| 260 |
+
if run_optimization:
|
| 261 |
+
with st.spinner("Running optimization..."):
|
| 262 |
+
try:
|
| 263 |
+
# Create optimizer instance
|
| 264 |
+
optimizer = OptimizerReal()
|
| 265 |
+
|
| 266 |
+
# Run optimization and get structured results
|
| 267 |
+
results = optimizer.solve_option_A_multi_day_generalized()
|
| 268 |
+
|
| 269 |
+
if results is None:
|
| 270 |
+
st.error("β Optimization returned no results")
|
| 271 |
+
elif results.get('status') == 'failed':
|
| 272 |
+
st.error(f"β Optimization failed: {results.get('message', 'Unknown error')}")
|
| 273 |
+
else:
|
| 274 |
+
st.session_state.optimization_results = results
|
| 275 |
+
st.session_state.optimizer = optimizer
|
| 276 |
+
st.success("β
Optimization completed successfully!")
|
| 277 |
+
|
| 278 |
+
except Exception as e:
|
| 279 |
+
st.error(f"β Optimization failed: {e}")
|
| 280 |
+
st.exception(e)
|
| 281 |
+
|
| 282 |
+
# Display results if available
|
| 283 |
+
if st.session_state.optimization_results:
|
| 284 |
+
results = st.session_state.optimization_results
|
| 285 |
+
|
| 286 |
+
# Create tabs for different result views
|
| 287 |
+
tab1, tab2, tab3, tab4 = st.tabs(["π Summary", "π Production", "π· Labor", "π° Costs"])
|
| 288 |
+
|
| 289 |
+
with tab1:
|
| 290 |
+
st.markdown("### Optimization Summary")
|
| 291 |
+
|
| 292 |
+
# Display key metrics
|
| 293 |
+
total_cost = results.get('total_cost', 0)
|
| 294 |
+
st.metric("π° Total Optimization Cost", f"${total_cost:,.2f}")
|
| 295 |
+
|
| 296 |
+
# Additional summary metrics
|
| 297 |
+
params = results.get('parameters', {})
|
| 298 |
+
col_s1, col_s2, col_s3 = st.columns(3)
|
| 299 |
+
|
| 300 |
+
with col_s1:
|
| 301 |
+
st.metric("Total Products", len(params.get('product_list', [])))
|
| 302 |
+
with col_s2:
|
| 303 |
+
st.metric("Employee Types", len(params.get('employee_types', [])))
|
| 304 |
+
with col_s3:
|
| 305 |
+
st.metric("Total Demand", f"{params.get('total_demand', 0):,.0f}")
|
| 306 |
+
|
| 307 |
+
# Show optimization parameters used
|
| 308 |
+
st.markdown("**Optimization Parameters:**")
|
| 309 |
+
if st.session_state.date_range:
|
| 310 |
+
start_date, end_date = st.session_state.date_range
|
| 311 |
+
duration = (end_date - start_date).days + 1
|
| 312 |
+
st.markdown(f"β’ Date Range: {start_date} to {end_date} ({duration} days)")
|
| 313 |
+
st.markdown(f"β’ Employee Types: {', '.join(selected_emp_types)}")
|
| 314 |
+
st.markdown(f"β’ Shifts: {', '.join(map(str, selected_shifts))}")
|
| 315 |
+
st.markdown(f"β’ Production Lines: {', '.join(map(str, selected_lines))}")
|
| 316 |
+
st.markdown(f"β’ Constraint Mode: {params.get('constraint_mode', 'N/A')}")
|
| 317 |
+
|
| 318 |
+
# Cost efficiency metrics
|
| 319 |
+
if st.session_state.date_range:
|
| 320 |
+
start_date, end_date = st.session_state.date_range
|
| 321 |
+
duration = (end_date - start_date).days + 1
|
| 322 |
+
cost_per_day = total_cost / duration if duration > 0 else 0
|
| 323 |
+
cost_per_unit = total_cost / params.get('total_demand', 1) if params.get('total_demand', 0) > 0 else 0
|
| 324 |
+
|
| 325 |
+
col_e1, col_e2 = st.columns(2)
|
| 326 |
+
with col_e1:
|
| 327 |
+
st.metric("Cost per Day", f"${cost_per_day:,.2f}")
|
| 328 |
+
with col_e2:
|
| 329 |
+
st.metric("Cost per Unit", f"${cost_per_unit:.3f}")
|
| 330 |
+
|
| 331 |
+
with tab2:
|
| 332 |
+
st.markdown("### Production Results")
|
| 333 |
+
|
| 334 |
+
production_results = results.get('production_results', {})
|
| 335 |
+
if production_results:
|
| 336 |
+
# Create production summary table
|
| 337 |
+
prod_data = []
|
| 338 |
+
for product, data in production_results.items():
|
| 339 |
+
prod_data.append({
|
| 340 |
+
'Product': product,
|
| 341 |
+
'Demand': data['demand'],
|
| 342 |
+
'Produced': data['produced'],
|
| 343 |
+
'Fulfillment %': f"{data['fulfillment_rate']:.1f}%"
|
| 344 |
+
})
|
| 345 |
+
|
| 346 |
+
if prod_data:
|
| 347 |
+
prod_df = pd.DataFrame(prod_data)
|
| 348 |
+
st.dataframe(prod_df, use_container_width=True)
|
| 349 |
+
|
| 350 |
+
# Production fulfillment chart
|
| 351 |
+
fig_prod = px.bar(
|
| 352 |
+
prod_df,
|
| 353 |
+
x='Product',
|
| 354 |
+
y=['Demand', 'Produced'],
|
| 355 |
+
title='Production vs Demand by Product',
|
| 356 |
+
barmode='group'
|
| 357 |
+
)
|
| 358 |
+
st.plotly_chart(fig_prod, use_container_width=True)
|
| 359 |
+
|
| 360 |
+
# Fulfillment rate chart
|
| 361 |
+
fulfillment_data = [(row['Product'], float(row['Fulfillment %'].rstrip('%'))) for row in prod_data]
|
| 362 |
+
fulfill_df = pd.DataFrame(fulfillment_data, columns=['Product', 'Fulfillment_Rate'])
|
| 363 |
+
|
| 364 |
+
fig_fulfill = px.bar(
|
| 365 |
+
fulfill_df,
|
| 366 |
+
x='Product',
|
| 367 |
+
y='Fulfillment_Rate',
|
| 368 |
+
title='Fulfillment Rate by Product (%)',
|
| 369 |
+
color='Fulfillment_Rate',
|
| 370 |
+
color_continuous_scale='RdYlGn'
|
| 371 |
+
)
|
| 372 |
+
fig_fulfill.update_layout(yaxis_title="Fulfillment Rate (%)")
|
| 373 |
+
st.plotly_chart(fig_fulfill, use_container_width=True)
|
| 374 |
+
else:
|
| 375 |
+
st.info("No production data available")
|
| 376 |
+
|
| 377 |
+
with tab3:
|
| 378 |
+
st.markdown("### Labor Allocation")
|
| 379 |
+
|
| 380 |
+
employee_hours = results.get('employee_hours', {})
|
| 381 |
+
headcount_req = results.get('headcount_requirements', {})
|
| 382 |
+
|
| 383 |
+
if employee_hours:
|
| 384 |
+
# Create labor hours visualization
|
| 385 |
+
labor_data = []
|
| 386 |
+
for emp_type, shifts in employee_hours.items():
|
| 387 |
+
for shift, daily_hours in shifts.items():
|
| 388 |
+
total_hours = sum(daily_hours)
|
| 389 |
+
if total_hours > 0:
|
| 390 |
+
labor_data.append({
|
| 391 |
+
'Employee Type': emp_type,
|
| 392 |
+
'Shift': f"Shift {shift}",
|
| 393 |
+
'Total Hours': total_hours,
|
| 394 |
+
'Avg Daily Hours': total_hours / len(daily_hours) if daily_hours else 0
|
| 395 |
+
})
|
| 396 |
+
|
| 397 |
+
if labor_data:
|
| 398 |
+
labor_df = pd.DataFrame(labor_data)
|
| 399 |
+
st.dataframe(labor_df, use_container_width=True)
|
| 400 |
+
|
| 401 |
+
# Labor hours chart
|
| 402 |
+
fig_labor = px.bar(
|
| 403 |
+
labor_df,
|
| 404 |
+
x='Employee Type',
|
| 405 |
+
y='Total Hours',
|
| 406 |
+
color='Shift',
|
| 407 |
+
title='Total Labor Hours by Employee Type and Shift',
|
| 408 |
+
barmode='group'
|
| 409 |
+
)
|
| 410 |
+
st.plotly_chart(fig_labor, use_container_width=True)
|
| 411 |
+
|
| 412 |
+
# Headcount requirements
|
| 413 |
+
if headcount_req:
|
| 414 |
+
st.markdown("#### Required Headcount")
|
| 415 |
+
headcount_data = []
|
| 416 |
+
for emp_type, shifts in headcount_req.items():
|
| 417 |
+
for shift, daily_count in shifts.items():
|
| 418 |
+
max_count = max(daily_count) if daily_count else 0
|
| 419 |
+
avg_count = sum(daily_count) / len(daily_count) if daily_count else 0
|
| 420 |
+
if max_count > 0:
|
| 421 |
+
headcount_data.append({
|
| 422 |
+
'Employee Type': emp_type,
|
| 423 |
+
'Shift': f"Shift {shift}",
|
| 424 |
+
'Max Daily': max_count,
|
| 425 |
+
'Avg Daily': f"{avg_count:.1f}"
|
| 426 |
+
})
|
| 427 |
+
|
| 428 |
+
if headcount_data:
|
| 429 |
+
headcount_df = pd.DataFrame(headcount_data)
|
| 430 |
+
st.dataframe(headcount_df, use_container_width=True)
|
| 431 |
+
|
| 432 |
+
with tab4:
|
| 433 |
+
st.markdown("### Cost Analysis")
|
| 434 |
+
|
| 435 |
+
total_cost = results.get('total_cost', 0)
|
| 436 |
+
st.metric("Total Optimization Cost", f"${total_cost:,.2f}")
|
| 437 |
+
|
| 438 |
+
# Cost breakdown by employee type (estimated)
|
| 439 |
+
employee_hours = results.get('employee_hours', {})
|
| 440 |
+
if employee_hours:
|
| 441 |
+
cost_data = []
|
| 442 |
+
# Use wage data from config
|
| 443 |
+
wage_types = optimization_config.COST_LIST_PER_EMP_SHIFT
|
| 444 |
+
|
| 445 |
+
for emp_type, shifts in employee_hours.items():
|
| 446 |
+
emp_total_cost = 0
|
| 447 |
+
for shift, daily_hours in shifts.items():
|
| 448 |
+
total_hours = sum(daily_hours)
|
| 449 |
+
if total_hours > 0 and emp_type in wage_types and shift in wage_types[emp_type]:
|
| 450 |
+
shift_cost = total_hours * wage_types[emp_type][shift]
|
| 451 |
+
emp_total_cost += shift_cost
|
| 452 |
+
cost_data.append({
|
| 453 |
+
'Employee Type': emp_type,
|
| 454 |
+
'Shift': f"Shift {shift}",
|
| 455 |
+
'Hours': total_hours,
|
| 456 |
+
'Rate': wage_types[emp_type][shift],
|
| 457 |
+
'Cost': shift_cost
|
| 458 |
+
})
|
| 459 |
+
|
| 460 |
+
if cost_data:
|
| 461 |
+
cost_df = pd.DataFrame(cost_data)
|
| 462 |
+
st.dataframe(cost_df, use_container_width=True)
|
| 463 |
+
|
| 464 |
+
# Cost breakdown chart
|
| 465 |
+
fig_cost = px.pie(
|
| 466 |
+
cost_df,
|
| 467 |
+
values='Cost',
|
| 468 |
+
names='Employee Type',
|
| 469 |
+
title='Cost Distribution by Employee Type'
|
| 470 |
+
)
|
| 471 |
+
st.plotly_chart(fig_cost, use_container_width=True)
|
| 472 |
+
|
| 473 |
+
# Cost by shift chart
|
| 474 |
+
fig_shift_cost = px.bar(
|
| 475 |
+
cost_df,
|
| 476 |
+
x='Employee Type',
|
| 477 |
+
y='Cost',
|
| 478 |
+
color='Shift',
|
| 479 |
+
title='Cost Breakdown by Employee Type and Shift',
|
| 480 |
+
barmode='stack'
|
| 481 |
+
)
|
| 482 |
+
st.plotly_chart(fig_shift_cost, use_container_width=True)
|
| 483 |
+
|
| 484 |
+
# Priority mode results
|
| 485 |
+
priority_results = results.get('priority_results')
|
| 486 |
+
if priority_results and priority_results.get('summary'):
|
| 487 |
+
st.markdown("#### Priority Mode Analysis")
|
| 488 |
+
summary = priority_results['summary']
|
| 489 |
+
if summary['unicef_sufficient']:
|
| 490 |
+
st.success("β
UNICEF Fixed term staff sufficient for all demand")
|
| 491 |
+
st.info("β Humanizer staff not needed")
|
| 492 |
+
else:
|
| 493 |
+
st.warning(f"β οΈ {summary['total_capacity_flags']} cases where UNICEF at capacity")
|
| 494 |
+
st.info("β Humanizer staff utilized")
|
| 495 |
+
|
| 496 |
+
else:
|
| 497 |
+
st.info("π Click 'Run Optimization' in the sidebar to see results")
|
| 498 |
+
|
| 499 |
+
# Show placeholder content
|
| 500 |
+
st.markdown("""
|
| 501 |
+
### What you'll see here:
|
| 502 |
+
|
| 503 |
+
- **π Summary**: Overall optimization results and key metrics
|
| 504 |
+
- **π Production**: Production schedule by product and day
|
| 505 |
+
- **π· Labor**: Employee allocation and shift assignments
|
| 506 |
+
- **π° Costs**: Detailed cost breakdown and analysis
|
| 507 |
+
|
| 508 |
+
Configure your parameters in the sidebar and click 'Run Optimization' to get started!
|
| 509 |
+
""")
|
| 510 |
+
|
| 511 |
+
# Footer
|
| 512 |
+
st.markdown("---")
|
| 513 |
+
st.markdown("""
|
| 514 |
+
<div style='text-align: center; color: gray; padding: 2rem;'>
|
| 515 |
+
<small>SD Roster Optimization Tool | Built with Streamlit & OR-Tools</small>
|
| 516 |
+
</div>
|
| 517 |
+
""", unsafe_allow_html=True)
|
streamlit_app_old.py
ADDED
|
@@ -0,0 +1,517 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
|
| 3 |
+
# Page configuration - MUST be first Streamlit command
|
| 4 |
+
st.set_page_config(
|
| 5 |
+
page_title="SD Roster Optimization Tool",
|
| 6 |
+
page_icon="π",
|
| 7 |
+
layout="wide",
|
| 8 |
+
initial_sidebar_state="expanded"
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
# Now import everything else
|
| 12 |
+
import pandas as pd
|
| 13 |
+
import plotly.express as px
|
| 14 |
+
import plotly.graph_objects as go
|
| 15 |
+
from plotly.subplots import make_subplots
|
| 16 |
+
import sys
|
| 17 |
+
import os
|
| 18 |
+
from datetime import datetime, timedelta
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
# Add src to path for imports
|
| 22 |
+
sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
|
| 23 |
+
|
| 24 |
+
from src.models.optimizer_real import OptimizerReal
|
| 25 |
+
from src.config import optimization_config
|
| 26 |
+
import src.etl.extract as extract
|
| 27 |
+
import src.etl.transform as transform
|
| 28 |
+
|
| 29 |
+
# Custom CSS for better styling
|
| 30 |
+
st.markdown("""
|
| 31 |
+
<style>
|
| 32 |
+
.main-header {
|
| 33 |
+
font-size: 2.5rem;
|
| 34 |
+
font-weight: bold;
|
| 35 |
+
color: #1f77b4;
|
| 36 |
+
margin-bottom: 1rem;
|
| 37 |
+
}
|
| 38 |
+
.section-header {
|
| 39 |
+
font-size: 1.5rem;
|
| 40 |
+
font-weight: bold;
|
| 41 |
+
color: #2c3e50;
|
| 42 |
+
margin: 1rem 0;
|
| 43 |
+
}
|
| 44 |
+
.metric-card {
|
| 45 |
+
background-color: #f8f9fa;
|
| 46 |
+
padding: 1rem;
|
| 47 |
+
border-radius: 0.5rem;
|
| 48 |
+
border-left: 4px solid #1f77b4;
|
| 49 |
+
margin-bottom: 1rem;
|
| 50 |
+
}
|
| 51 |
+
.stTabs [data-baseweb="tab-list"] {
|
| 52 |
+
gap: 2rem;
|
| 53 |
+
}
|
| 54 |
+
</style>
|
| 55 |
+
""", unsafe_allow_html=True)
|
| 56 |
+
|
| 57 |
+
# Initialize session state
|
| 58 |
+
if 'optimization_results' not in st.session_state:
|
| 59 |
+
st.session_state.optimization_results = None
|
| 60 |
+
if 'optimizer' not in st.session_state:
|
| 61 |
+
st.session_state.optimizer = None
|
| 62 |
+
if 'date_range' not in st.session_state:
|
| 63 |
+
st.session_state.date_range = None
|
| 64 |
+
|
| 65 |
+
# Title
|
| 66 |
+
st.markdown('<h1 class="main-header">π SD Roster Optimization Tool</h1>', unsafe_allow_html=True)
|
| 67 |
+
|
| 68 |
+
# Create layout: Left sidebar + Main content
|
| 69 |
+
with st.sidebar:
|
| 70 |
+
st.markdown("## ποΈ Control Panel")
|
| 71 |
+
|
| 72 |
+
# Date Selection Section
|
| 73 |
+
st.markdown("### π
Date Range Selection")
|
| 74 |
+
try:
|
| 75 |
+
# Get available date ranges from the data
|
| 76 |
+
date_ranges = transform.get_date_ranges()
|
| 77 |
+
if date_ranges:
|
| 78 |
+
date_range_options = [f"{start.strftime('%Y-%m-%d')} to {end.strftime('%Y-%m-%d')}" for start, end in date_ranges]
|
| 79 |
+
selected_range_str = st.selectbox(
|
| 80 |
+
"Select date range:",
|
| 81 |
+
options=date_range_options,
|
| 82 |
+
help="Available date ranges from released orders"
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
# Extract selected dates
|
| 86 |
+
selected_index = date_range_options.index(selected_range_str)
|
| 87 |
+
start_date, end_date = date_ranges[selected_index]
|
| 88 |
+
st.session_state.date_range = (start_date, end_date)
|
| 89 |
+
|
| 90 |
+
# Display duration
|
| 91 |
+
duration = (end_date - start_date).days + 1
|
| 92 |
+
st.info(f"Duration: {duration} days")
|
| 93 |
+
|
| 94 |
+
else:
|
| 95 |
+
st.warning("No date ranges found in data")
|
| 96 |
+
start_date = datetime(2025, 3, 24).date()
|
| 97 |
+
end_date = datetime(2025, 3, 28).date()
|
| 98 |
+
st.session_state.date_range = (start_date, end_date)
|
| 99 |
+
|
| 100 |
+
except Exception as e:
|
| 101 |
+
st.error(f"Error loading dates: {e}")
|
| 102 |
+
start_date = datetime(2025, 3, 24).date()
|
| 103 |
+
end_date = datetime(2025, 3, 28).date()
|
| 104 |
+
st.session_state.date_range = (start_date, end_date)
|
| 105 |
+
|
| 106 |
+
st.markdown("---")
|
| 107 |
+
|
| 108 |
+
# Optimization Parameters Section
|
| 109 |
+
st.markdown("### βοΈ Optimization Parameters")
|
| 110 |
+
|
| 111 |
+
# Employee Type Selection
|
| 112 |
+
try:
|
| 113 |
+
employee_df = extract.read_employee_data()
|
| 114 |
+
available_emp_types = employee_df["employment_type"].unique().tolist()
|
| 115 |
+
except:
|
| 116 |
+
available_emp_types = ["UNICEF Fixed term", "Humanizer"]
|
| 117 |
+
|
| 118 |
+
selected_emp_types = st.multiselect(
|
| 119 |
+
"Employee Types:",
|
| 120 |
+
available_emp_types,
|
| 121 |
+
default=available_emp_types,
|
| 122 |
+
help="Select employee types to include in optimization"
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Shift Selection
|
| 126 |
+
try:
|
| 127 |
+
shift_df = extract.get_shift_info()
|
| 128 |
+
available_shifts = shift_df["id"].unique().tolist()
|
| 129 |
+
except:
|
| 130 |
+
available_shifts = [1, 2, 3]
|
| 131 |
+
|
| 132 |
+
selected_shifts = st.multiselect(
|
| 133 |
+
"Shifts:",
|
| 134 |
+
available_shifts,
|
| 135 |
+
default=available_shifts,
|
| 136 |
+
help="1=Regular, 2=Overtime, 3=Evening"
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
# Line Selection
|
| 140 |
+
try:
|
| 141 |
+
line_df = extract.read_packaging_line_data()
|
| 142 |
+
available_lines = line_df["id"].unique().tolist()
|
| 143 |
+
except:
|
| 144 |
+
available_lines = [6, 7]
|
| 145 |
+
|
| 146 |
+
selected_lines = st.multiselect(
|
| 147 |
+
"Production Lines:",
|
| 148 |
+
available_lines,
|
| 149 |
+
default=available_lines,
|
| 150 |
+
help="Select production lines to include"
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
# Advanced Parameters
|
| 154 |
+
with st.expander("π§ Advanced Parameters"):
|
| 155 |
+
constraint_mode = st.selectbox(
|
| 156 |
+
"Fixed Staff Constraint Mode:",
|
| 157 |
+
["priority", "mandatory", "none"],
|
| 158 |
+
index=0,
|
| 159 |
+
help="priority=Use fixed staff first, mandatory=Force all fixed hours, none=Demand-driven"
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
max_hours_per_person = st.number_input(
|
| 163 |
+
"Max hours per person per day:",
|
| 164 |
+
min_value=8,
|
| 165 |
+
max_value=24,
|
| 166 |
+
value=14,
|
| 167 |
+
help="Legal daily limit"
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
# Employee availability override
|
| 171 |
+
st.markdown("**Employee Availability Override:**")
|
| 172 |
+
col1, col2 = st.columns(2)
|
| 173 |
+
with col1:
|
| 174 |
+
unicef_count = st.number_input("UNICEF Fixed term:", min_value=0, value=8)
|
| 175 |
+
with col2:
|
| 176 |
+
humanizer_count = st.number_input("Humanizer:", min_value=0, value=6)
|
| 177 |
+
|
| 178 |
+
st.markdown("---")
|
| 179 |
+
|
| 180 |
+
# Run Optimization Button
|
| 181 |
+
run_optimization = st.button("π Run Optimization", type="primary", use_container_width=True)
|
| 182 |
+
|
| 183 |
+
# Main content area
|
| 184 |
+
col1, col2 = st.columns([1, 2])
|
| 185 |
+
|
| 186 |
+
# Left column - Metadata
|
| 187 |
+
with col1:
|
| 188 |
+
st.markdown('<h2 class="section-header">π Metadata & Overview</h2>', unsafe_allow_html=True)
|
| 189 |
+
|
| 190 |
+
# Show current date range
|
| 191 |
+
if st.session_state.date_range:
|
| 192 |
+
start_date, end_date = st.session_state.date_range
|
| 193 |
+
st.markdown(f"**Selected Period:** {start_date} to {end_date}")
|
| 194 |
+
|
| 195 |
+
# Demand Information
|
| 196 |
+
with st.expander("π Demand Overview", expanded=True):
|
| 197 |
+
try:
|
| 198 |
+
if st.session_state.date_range:
|
| 199 |
+
start_date, end_date = st.session_state.date_range
|
| 200 |
+
demand_df = extract.read_released_orders_data(start_date=start_date, end_date=end_date)
|
| 201 |
+
|
| 202 |
+
# Total demand metrics
|
| 203 |
+
total_orders = len(demand_df)
|
| 204 |
+
total_quantity = demand_df["Order quantity (GMEIN)"].sum()
|
| 205 |
+
unique_products = demand_df["Material Number"].nunique()
|
| 206 |
+
|
| 207 |
+
col_d1, col_d2, col_d3 = st.columns(3)
|
| 208 |
+
with col_d1:
|
| 209 |
+
st.metric("Total Orders", total_orders)
|
| 210 |
+
with col_d2:
|
| 211 |
+
st.metric("Total Quantity", f"{total_quantity:,.0f}")
|
| 212 |
+
with col_d3:
|
| 213 |
+
st.metric("Unique Products", unique_products)
|
| 214 |
+
|
| 215 |
+
# Top products by demand
|
| 216 |
+
top_products = demand_df.groupby('Material Number')["Order quantity (GMEIN)"].sum().sort_values(ascending=False).head(5)
|
| 217 |
+
|
| 218 |
+
if not top_products.empty:
|
| 219 |
+
st.markdown("**Top 5 Products by Demand:**")
|
| 220 |
+
for product, quantity in top_products.items():
|
| 221 |
+
st.markdown(f"β’ {product}: {quantity:,.0f}")
|
| 222 |
+
|
| 223 |
+
except Exception as e:
|
| 224 |
+
st.error(f"Error loading demand data: {e}")
|
| 225 |
+
|
| 226 |
+
# Employee Information
|
| 227 |
+
with st.expander("π₯ Employee Overview", expanded=True):
|
| 228 |
+
try:
|
| 229 |
+
employee_df = extract.read_employee_data()
|
| 230 |
+
|
| 231 |
+
# Employee metrics
|
| 232 |
+
total_employees = len(employee_df)
|
| 233 |
+
emp_by_type = employee_df.groupby("employment_type").size()
|
| 234 |
+
|
| 235 |
+
st.metric("Total Employees", total_employees)
|
| 236 |
+
|
| 237 |
+
st.markdown("**By Employment Type:**")
|
| 238 |
+
for emp_type, count in emp_by_type.items():
|
| 239 |
+
st.markdown(f"β’ {emp_type}: {count}")
|
| 240 |
+
|
| 241 |
+
except Exception as e:
|
| 242 |
+
st.error(f"Error loading employee data: {e}")
|
| 243 |
+
|
| 244 |
+
# Production Lines
|
| 245 |
+
with st.expander("π Production Lines", expanded=True):
|
| 246 |
+
try:
|
| 247 |
+
line_df = extract.read_packaging_line_data()
|
| 248 |
+
|
| 249 |
+
st.markdown("**Available Lines:**")
|
| 250 |
+
for _, row in line_df.iterrows():
|
| 251 |
+
st.markdown(f"β’ Line {row['id']}: {row['line_count']} units")
|
| 252 |
+
|
| 253 |
+
except Exception as e:
|
| 254 |
+
st.error(f"Error loading line data: {e}")
|
| 255 |
+
|
| 256 |
+
# Right column - Optimization Results
|
| 257 |
+
with col2:
|
| 258 |
+
st.markdown('<h2 class="section-header">π― Optimization Results</h2>', unsafe_allow_html=True)
|
| 259 |
+
|
| 260 |
+
if run_optimization:
|
| 261 |
+
with st.spinner("Running optimization..."):
|
| 262 |
+
try:
|
| 263 |
+
# Create optimizer instance
|
| 264 |
+
optimizer = OptimizerReal()
|
| 265 |
+
|
| 266 |
+
# Run optimization and get structured results
|
| 267 |
+
results = optimizer.solve_option_A_multi_day_generalized()
|
| 268 |
+
|
| 269 |
+
if results is None:
|
| 270 |
+
st.error("β Optimization returned no results")
|
| 271 |
+
elif results.get('status') == 'failed':
|
| 272 |
+
st.error(f"β Optimization failed: {results.get('message', 'Unknown error')}")
|
| 273 |
+
else:
|
| 274 |
+
st.session_state.optimization_results = results
|
| 275 |
+
st.session_state.optimizer = optimizer
|
| 276 |
+
st.success("β
Optimization completed successfully!")
|
| 277 |
+
|
| 278 |
+
except Exception as e:
|
| 279 |
+
st.error(f"β Optimization failed: {e}")
|
| 280 |
+
st.exception(e)
|
| 281 |
+
|
| 282 |
+
# Display results if available
|
| 283 |
+
if st.session_state.optimization_results:
|
| 284 |
+
results = st.session_state.optimization_results
|
| 285 |
+
|
| 286 |
+
# Create tabs for different result views
|
| 287 |
+
tab1, tab2, tab3, tab4 = st.tabs(["π Summary", "π Production", "π· Labor", "π° Costs"])
|
| 288 |
+
|
| 289 |
+
with tab1:
|
| 290 |
+
st.markdown("### Optimization Summary")
|
| 291 |
+
|
| 292 |
+
# Display key metrics
|
| 293 |
+
total_cost = results.get('total_cost', 0)
|
| 294 |
+
st.metric("π° Total Optimization Cost", f"${total_cost:,.2f}")
|
| 295 |
+
|
| 296 |
+
# Additional summary metrics
|
| 297 |
+
params = results.get('parameters', {})
|
| 298 |
+
col_s1, col_s2, col_s3 = st.columns(3)
|
| 299 |
+
|
| 300 |
+
with col_s1:
|
| 301 |
+
st.metric("Total Products", len(params.get('product_list', [])))
|
| 302 |
+
with col_s2:
|
| 303 |
+
st.metric("Employee Types", len(params.get('employee_types', [])))
|
| 304 |
+
with col_s3:
|
| 305 |
+
st.metric("Total Demand", f"{params.get('total_demand', 0):,.0f}")
|
| 306 |
+
|
| 307 |
+
# Show optimization parameters used
|
| 308 |
+
st.markdown("**Optimization Parameters:**")
|
| 309 |
+
if st.session_state.date_range:
|
| 310 |
+
start_date, end_date = st.session_state.date_range
|
| 311 |
+
duration = (end_date - start_date).days + 1
|
| 312 |
+
st.markdown(f"β’ Date Range: {start_date} to {end_date} ({duration} days)")
|
| 313 |
+
st.markdown(f"β’ Employee Types: {', '.join(selected_emp_types)}")
|
| 314 |
+
st.markdown(f"β’ Shifts: {', '.join(map(str, selected_shifts))}")
|
| 315 |
+
st.markdown(f"β’ Production Lines: {', '.join(map(str, selected_lines))}")
|
| 316 |
+
st.markdown(f"β’ Constraint Mode: {params.get('constraint_mode', 'N/A')}")
|
| 317 |
+
|
| 318 |
+
# Cost efficiency metrics
|
| 319 |
+
if st.session_state.date_range:
|
| 320 |
+
start_date, end_date = st.session_state.date_range
|
| 321 |
+
duration = (end_date - start_date).days + 1
|
| 322 |
+
cost_per_day = total_cost / duration if duration > 0 else 0
|
| 323 |
+
cost_per_unit = total_cost / params.get('total_demand', 1) if params.get('total_demand', 0) > 0 else 0
|
| 324 |
+
|
| 325 |
+
col_e1, col_e2 = st.columns(2)
|
| 326 |
+
with col_e1:
|
| 327 |
+
st.metric("Cost per Day", f"${cost_per_day:,.2f}")
|
| 328 |
+
with col_e2:
|
| 329 |
+
st.metric("Cost per Unit", f"${cost_per_unit:.3f}")
|
| 330 |
+
|
| 331 |
+
with tab2:
|
| 332 |
+
st.markdown("### Production Results")
|
| 333 |
+
|
| 334 |
+
production_results = results.get('production_results', {})
|
| 335 |
+
if production_results:
|
| 336 |
+
# Create production summary table
|
| 337 |
+
prod_data = []
|
| 338 |
+
for product, data in production_results.items():
|
| 339 |
+
prod_data.append({
|
| 340 |
+
'Product': product,
|
| 341 |
+
'Demand': data['demand'],
|
| 342 |
+
'Produced': data['produced'],
|
| 343 |
+
'Fulfillment %': f"{data['fulfillment_rate']:.1f}%"
|
| 344 |
+
})
|
| 345 |
+
|
| 346 |
+
if prod_data:
|
| 347 |
+
prod_df = pd.DataFrame(prod_data)
|
| 348 |
+
st.dataframe(prod_df, use_container_width=True)
|
| 349 |
+
|
| 350 |
+
# Production fulfillment chart
|
| 351 |
+
fig_prod = px.bar(
|
| 352 |
+
prod_df,
|
| 353 |
+
x='Product',
|
| 354 |
+
y=['Demand', 'Produced'],
|
| 355 |
+
title='Production vs Demand by Product',
|
| 356 |
+
barmode='group'
|
| 357 |
+
)
|
| 358 |
+
st.plotly_chart(fig_prod, use_container_width=True)
|
| 359 |
+
|
| 360 |
+
# Fulfillment rate chart
|
| 361 |
+
fulfillment_data = [(row['Product'], float(row['Fulfillment %'].rstrip('%'))) for row in prod_data]
|
| 362 |
+
fulfill_df = pd.DataFrame(fulfillment_data, columns=['Product', 'Fulfillment_Rate'])
|
| 363 |
+
|
| 364 |
+
fig_fulfill = px.bar(
|
| 365 |
+
fulfill_df,
|
| 366 |
+
x='Product',
|
| 367 |
+
y='Fulfillment_Rate',
|
| 368 |
+
title='Fulfillment Rate by Product (%)',
|
| 369 |
+
color='Fulfillment_Rate',
|
| 370 |
+
color_continuous_scale='RdYlGn'
|
| 371 |
+
)
|
| 372 |
+
fig_fulfill.update_layout(yaxis_title="Fulfillment Rate (%)")
|
| 373 |
+
st.plotly_chart(fig_fulfill, use_container_width=True)
|
| 374 |
+
else:
|
| 375 |
+
st.info("No production data available")
|
| 376 |
+
|
| 377 |
+
with tab3:
|
| 378 |
+
st.markdown("### Labor Allocation")
|
| 379 |
+
|
| 380 |
+
employee_hours = results.get('employee_hours', {})
|
| 381 |
+
headcount_req = results.get('headcount_requirements', {})
|
| 382 |
+
|
| 383 |
+
if employee_hours:
|
| 384 |
+
# Create labor hours visualization
|
| 385 |
+
labor_data = []
|
| 386 |
+
for emp_type, shifts in employee_hours.items():
|
| 387 |
+
for shift, daily_hours in shifts.items():
|
| 388 |
+
total_hours = sum(daily_hours)
|
| 389 |
+
if total_hours > 0:
|
| 390 |
+
labor_data.append({
|
| 391 |
+
'Employee Type': emp_type,
|
| 392 |
+
'Shift': f"Shift {shift}",
|
| 393 |
+
'Total Hours': total_hours,
|
| 394 |
+
'Avg Daily Hours': total_hours / len(daily_hours) if daily_hours else 0
|
| 395 |
+
})
|
| 396 |
+
|
| 397 |
+
if labor_data:
|
| 398 |
+
labor_df = pd.DataFrame(labor_data)
|
| 399 |
+
st.dataframe(labor_df, use_container_width=True)
|
| 400 |
+
|
| 401 |
+
# Labor hours chart
|
| 402 |
+
fig_labor = px.bar(
|
| 403 |
+
labor_df,
|
| 404 |
+
x='Employee Type',
|
| 405 |
+
y='Total Hours',
|
| 406 |
+
color='Shift',
|
| 407 |
+
title='Total Labor Hours by Employee Type and Shift',
|
| 408 |
+
barmode='group'
|
| 409 |
+
)
|
| 410 |
+
st.plotly_chart(fig_labor, use_container_width=True)
|
| 411 |
+
|
| 412 |
+
# Headcount requirements
|
| 413 |
+
if headcount_req:
|
| 414 |
+
st.markdown("#### Required Headcount")
|
| 415 |
+
headcount_data = []
|
| 416 |
+
for emp_type, shifts in headcount_req.items():
|
| 417 |
+
for shift, daily_count in shifts.items():
|
| 418 |
+
max_count = max(daily_count) if daily_count else 0
|
| 419 |
+
avg_count = sum(daily_count) / len(daily_count) if daily_count else 0
|
| 420 |
+
if max_count > 0:
|
| 421 |
+
headcount_data.append({
|
| 422 |
+
'Employee Type': emp_type,
|
| 423 |
+
'Shift': f"Shift {shift}",
|
| 424 |
+
'Max Daily': max_count,
|
| 425 |
+
'Avg Daily': f"{avg_count:.1f}"
|
| 426 |
+
})
|
| 427 |
+
|
| 428 |
+
if headcount_data:
|
| 429 |
+
headcount_df = pd.DataFrame(headcount_data)
|
| 430 |
+
st.dataframe(headcount_df, use_container_width=True)
|
| 431 |
+
|
| 432 |
+
with tab4:
|
| 433 |
+
st.markdown("### Cost Analysis")
|
| 434 |
+
|
| 435 |
+
total_cost = results.get('total_cost', 0)
|
| 436 |
+
st.metric("Total Optimization Cost", f"${total_cost:,.2f}")
|
| 437 |
+
|
| 438 |
+
# Cost breakdown by employee type (estimated)
|
| 439 |
+
employee_hours = results.get('employee_hours', {})
|
| 440 |
+
if employee_hours:
|
| 441 |
+
cost_data = []
|
| 442 |
+
# Use wage data from config
|
| 443 |
+
wage_types = optimization_config.COST_LIST_PER_EMP_SHIFT
|
| 444 |
+
|
| 445 |
+
for emp_type, shifts in employee_hours.items():
|
| 446 |
+
emp_total_cost = 0
|
| 447 |
+
for shift, daily_hours in shifts.items():
|
| 448 |
+
total_hours = sum(daily_hours)
|
| 449 |
+
if total_hours > 0 and emp_type in wage_types and shift in wage_types[emp_type]:
|
| 450 |
+
shift_cost = total_hours * wage_types[emp_type][shift]
|
| 451 |
+
emp_total_cost += shift_cost
|
| 452 |
+
cost_data.append({
|
| 453 |
+
'Employee Type': emp_type,
|
| 454 |
+
'Shift': f"Shift {shift}",
|
| 455 |
+
'Hours': total_hours,
|
| 456 |
+
'Rate': wage_types[emp_type][shift],
|
| 457 |
+
'Cost': shift_cost
|
| 458 |
+
})
|
| 459 |
+
|
| 460 |
+
if cost_data:
|
| 461 |
+
cost_df = pd.DataFrame(cost_data)
|
| 462 |
+
st.dataframe(cost_df, use_container_width=True)
|
| 463 |
+
|
| 464 |
+
# Cost breakdown chart
|
| 465 |
+
fig_cost = px.pie(
|
| 466 |
+
cost_df,
|
| 467 |
+
values='Cost',
|
| 468 |
+
names='Employee Type',
|
| 469 |
+
title='Cost Distribution by Employee Type'
|
| 470 |
+
)
|
| 471 |
+
st.plotly_chart(fig_cost, use_container_width=True)
|
| 472 |
+
|
| 473 |
+
# Cost by shift chart
|
| 474 |
+
fig_shift_cost = px.bar(
|
| 475 |
+
cost_df,
|
| 476 |
+
x='Employee Type',
|
| 477 |
+
y='Cost',
|
| 478 |
+
color='Shift',
|
| 479 |
+
title='Cost Breakdown by Employee Type and Shift',
|
| 480 |
+
barmode='stack'
|
| 481 |
+
)
|
| 482 |
+
st.plotly_chart(fig_shift_cost, use_container_width=True)
|
| 483 |
+
|
| 484 |
+
# Priority mode results
|
| 485 |
+
priority_results = results.get('priority_results')
|
| 486 |
+
if priority_results and priority_results.get('summary'):
|
| 487 |
+
st.markdown("#### Priority Mode Analysis")
|
| 488 |
+
summary = priority_results['summary']
|
| 489 |
+
if summary['unicef_sufficient']:
|
| 490 |
+
st.success("β
UNICEF Fixed term staff sufficient for all demand")
|
| 491 |
+
st.info("β Humanizer staff not needed")
|
| 492 |
+
else:
|
| 493 |
+
st.warning(f"β οΈ {summary['total_capacity_flags']} cases where UNICEF at capacity")
|
| 494 |
+
st.info("β Humanizer staff utilized")
|
| 495 |
+
|
| 496 |
+
else:
|
| 497 |
+
st.info("π Click 'Run Optimization' in the sidebar to see results")
|
| 498 |
+
|
| 499 |
+
# Show placeholder content
|
| 500 |
+
st.markdown("""
|
| 501 |
+
### What you'll see here:
|
| 502 |
+
|
| 503 |
+
- **π Summary**: Overall optimization results and key metrics
|
| 504 |
+
- **π Production**: Production schedule by product and day
|
| 505 |
+
- **π· Labor**: Employee allocation and shift assignments
|
| 506 |
+
- **π° Costs**: Detailed cost breakdown and analysis
|
| 507 |
+
|
| 508 |
+
Configure your parameters in the sidebar and click 'Run Optimization' to get started!
|
| 509 |
+
""")
|
| 510 |
+
|
| 511 |
+
# Footer
|
| 512 |
+
st.markdown("---")
|
| 513 |
+
st.markdown("""
|
| 514 |
+
<div style='text-align: center; color: gray; padding: 2rem;'>
|
| 515 |
+
<small>SD Roster Optimization Tool | Built with Streamlit & OR-Tools</small>
|
| 516 |
+
</div>
|
| 517 |
+
""", unsafe_allow_html=True)
|