Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,13 +5,7 @@ import plotly.express as px
|
|
| 5 |
import plotly.graph_objects as go
|
| 6 |
from plotly.subplots import make_subplots
|
| 7 |
from datetime import datetime, timedelta
|
| 8 |
-
from sklearn.model_selection import train_test_split
|
| 9 |
-
from sklearn.compose import ColumnTransformer
|
| 10 |
-
from sklearn.preprocessing import OneHotEncoder
|
| 11 |
-
from sklearn.pipeline import Pipeline
|
| 12 |
-
from sklearn.ensemble import RandomForestRegressor
|
| 13 |
from sklearn.linear_model import LinearRegression
|
| 14 |
-
from sklearn.metrics import r2_score, mean_absolute_error
|
| 15 |
import warnings
|
| 16 |
warnings.filterwarnings('ignore')
|
| 17 |
|
|
@@ -76,6 +70,7 @@ st.markdown("""
|
|
| 76 |
|
| 77 |
@st.cache_data(show_spinner=False)
|
| 78 |
def generate_synthetic_data(days=60, seed=42, rows_per_day=600):
|
|
|
|
| 79 |
rng = np.random.default_rng(seed)
|
| 80 |
start_date = datetime.today().date() - timedelta(days=days)
|
| 81 |
dates = pd.date_range(start_date, periods=days, freq="D")
|
|
@@ -90,44 +85,59 @@ def generate_synthetic_data(days=60, seed=42, rows_per_day=600):
|
|
| 90 |
channel_discount_mean = {"Direct Sales": 0.06, "Distribution Partners": 0.12, "E-Commerce": 0.04}
|
| 91 |
channel_discount_std = {"Direct Sales": 0.02, "Distribution Partners": 0.03, "E-Commerce": 0.02}
|
| 92 |
|
|
|
|
| 93 |
seg_epsilon = {}
|
| 94 |
for p in products:
|
| 95 |
for r in regions:
|
| 96 |
for c in channels:
|
| 97 |
-
|
|
|
|
| 98 |
if c == "Distribution Partners":
|
| 99 |
-
base_eps -= rng.uniform(0.
|
| 100 |
if c == "E-Commerce":
|
| 101 |
-
base_eps
|
| 102 |
seg_epsilon[(p, r, c)] = base_eps
|
| 103 |
|
| 104 |
records = []
|
| 105 |
-
for d in dates:
|
| 106 |
dow = d.weekday()
|
| 107 |
-
dow_mult = 1.0 + (0.
|
| 108 |
-
macro = 1.0 + 0.03*np.sin((d.toordinal()%365)/365*2*np.pi)
|
| 109 |
|
| 110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
prod = rng.choice(products, size=n, p=[0.35, 0.3, 0.2, 0.15])
|
| 112 |
reg = rng.choice(regions, size=n, p=[0.4, 0.35, 0.25])
|
| 113 |
ch = rng.choice(channels, size=n, p=[0.45, 0.35, 0.20])
|
| 114 |
|
| 115 |
base_p = np.array([base_price[x] for x in prod]) * np.array([region_price_bump[x] for x in reg])
|
| 116 |
-
base_c = np.array([base_cost[x] for x in prod]) * np.array([region_cost_bump[x] for x in reg])
|
| 117 |
|
|
|
|
| 118 |
discount = np.clip(
|
| 119 |
np.array([channel_discount_mean[x] for x in ch]) +
|
| 120 |
-
rng.normal(0, [channel_discount_std[x] for x in ch]),
|
|
|
|
| 121 |
)
|
| 122 |
|
| 123 |
-
list_price = rng.normal(base_p,
|
| 124 |
net_price = np.clip(list_price * (1 - discount), 20, None)
|
| 125 |
-
unit_cost = np.clip(rng.normal(base_c,
|
| 126 |
|
| 127 |
eps = np.array([seg_epsilon[(pp, rr, cc)] for pp, rr, cc in zip(prod, reg, ch)])
|
| 128 |
ref_price = np.array([base_price[x] for x in prod])
|
| 129 |
qty_mu = np.exp(eps * (net_price - ref_price) / np.maximum(ref_price, 1e-6))
|
| 130 |
-
qty = np.maximum(1, rng.poisson(8 * dow_mult *
|
| 131 |
|
| 132 |
revenue = net_price * qty
|
| 133 |
cogs = unit_cost * qty
|
|
@@ -156,14 +166,10 @@ def generate_synthetic_data(days=60, seed=42, rows_per_day=600):
|
|
| 156 |
return df
|
| 157 |
|
| 158 |
def analyze_margin_bridge(df, current_date, prior_date):
|
| 159 |
-
"""
|
| 160 |
-
Professional Price-Volume-Mix (PVM) analysis following FP&A best practices
|
| 161 |
-
Breaks down GM variance into: Price Effect, Volume Effect, Mix Effect, Cost Effect
|
| 162 |
-
"""
|
| 163 |
current_data = df[df["date"] == current_date].copy()
|
| 164 |
prior_data = df[df["date"] == prior_date].copy()
|
| 165 |
|
| 166 |
-
# Calculate totals for both periods
|
| 167 |
current_total_revenue = current_data["revenue"].sum()
|
| 168 |
current_total_cogs = current_data["cogs"].sum()
|
| 169 |
current_total_gm = current_total_revenue - current_total_cogs
|
|
@@ -176,7 +182,6 @@ def analyze_margin_bridge(df, current_date, prior_date):
|
|
| 176 |
|
| 177 |
total_gm_variance = current_total_gm - prior_total_gm
|
| 178 |
|
| 179 |
-
# Aggregate by segment
|
| 180 |
current_seg = current_data.groupby(["product", "region", "channel"]).agg({
|
| 181 |
"revenue": "sum",
|
| 182 |
"cogs": "sum",
|
|
@@ -197,7 +202,6 @@ def analyze_margin_bridge(df, current_date, prior_date):
|
|
| 197 |
prior_seg["gm"] = prior_seg["revenue"] - prior_seg["cogs"]
|
| 198 |
prior_seg["gm_pct"] = prior_seg["gm"] / prior_seg["revenue"]
|
| 199 |
|
| 200 |
-
# Merge segments
|
| 201 |
merged = pd.merge(
|
| 202 |
current_seg,
|
| 203 |
prior_seg,
|
|
@@ -206,17 +210,10 @@ def analyze_margin_bridge(df, current_date, prior_date):
|
|
| 206 |
how="outer"
|
| 207 |
).fillna(0)
|
| 208 |
|
| 209 |
-
#
|
| 210 |
-
# Price Effect: (Current Price - Prior Price) Γ Current Volume
|
| 211 |
merged["price_effect"] = (merged["net_price_curr"] - merged["net_price_prior"]) * merged["qty_curr"]
|
| 212 |
-
|
| 213 |
-
# Volume Effect: (Current Volume - Prior Volume) Γ Prior Price Γ Prior GM%
|
| 214 |
merged["volume_effect"] = (merged["qty_curr"] - merged["qty_prior"]) * merged["net_price_prior"] * merged["gm_pct_prior"]
|
| 215 |
-
|
| 216 |
-
# Cost Effect: -(Current Cost - Prior Cost) Γ Current Volume
|
| 217 |
merged["cost_effect"] = -(merged["unit_cost_curr"] - merged["unit_cost_prior"]) * merged["qty_curr"]
|
| 218 |
-
|
| 219 |
-
# Mix Effect: Residual (actual GM change minus price/volume/cost effects)
|
| 220 |
merged["gm_variance"] = merged["gm_curr"] - merged["gm_prior"]
|
| 221 |
merged["mix_effect"] = merged["gm_variance"] - (merged["price_effect"] + merged["volume_effect"] + merged["cost_effect"])
|
| 222 |
|
|
@@ -233,62 +230,86 @@ def analyze_margin_bridge(df, current_date, prior_date):
|
|
| 233 |
}
|
| 234 |
|
| 235 |
def estimate_segment_elasticity(df, product, region, channel):
|
|
|
|
| 236 |
seg_df = df[(df["product"]==product)&(df["region"]==region)&(df["channel"]==channel)]
|
| 237 |
if len(seg_df) < 100 or seg_df["net_price"].std() < 1e-6 or seg_df["qty"].std() < 1e-6:
|
| 238 |
-
return -
|
| 239 |
try:
|
| 240 |
x = np.log(np.clip(seg_df["net_price"].values, 1e-6, None)).reshape(-1,1)
|
| 241 |
y = np.log(np.clip(seg_df["qty"].values, 1e-6, None))
|
| 242 |
lin = LinearRegression().fit(x, y)
|
| 243 |
-
|
|
|
|
|
|
|
|
|
|
| 244 |
except:
|
| 245 |
-
return -
|
| 246 |
|
| 247 |
-
def
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 263 |
else:
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
"
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
|
| 287 |
# Main App
|
| 288 |
st.markdown('<h1 class="main-header">π― Daily Profitability Variance Analysis</h1>', unsafe_allow_html=True)
|
| 289 |
st.markdown('<p class="sub-header">Understanding What Drives Daily Margin Changes</p>', unsafe_allow_html=True)
|
| 290 |
|
| 291 |
-
# Generate data
|
| 292 |
with st.spinner("π Loading business data..."):
|
| 293 |
df = generate_synthetic_data(days=60, seed=42, rows_per_day=600)
|
| 294 |
|
|
@@ -350,7 +371,7 @@ with col4:
|
|
| 350 |
delta_color="normal"
|
| 351 |
)
|
| 352 |
|
| 353 |
-
# Trend chart
|
| 354 |
st.markdown("#### π Gross Margin Trend (Last 30 Days)")
|
| 355 |
recent_daily = daily.tail(30)
|
| 356 |
|
|
@@ -365,7 +386,7 @@ fig_trend.add_trace(go.Scatter(
|
|
| 365 |
fillcolor="rgba(31, 119, 180, 0.1)"
|
| 366 |
))
|
| 367 |
fig_trend.add_hline(y=roll7*100, line_dash="dash", line_color="red",
|
| 368 |
-
annotation_text="7-Day
|
| 369 |
fig_trend.update_layout(
|
| 370 |
xaxis_title="Date",
|
| 371 |
yaxis_title="Gross Margin %",
|
|
@@ -380,8 +401,8 @@ st.markdown("---")
|
|
| 380 |
with st.spinner("π¬ Performing Price-Volume-Mix analysis..."):
|
| 381 |
variance_detail, summary = analyze_margin_bridge(df, current_date, prior_date)
|
| 382 |
|
| 383 |
-
# Main
|
| 384 |
-
tab1, tab2, tab3 = st.tabs(["π Margin Bridge (PVM)", "π Segment Deep Dive", "π‘ Pricing
|
| 385 |
|
| 386 |
with tab1:
|
| 387 |
st.markdown(f"### Gross Margin Bridge: {prior_date.strftime('%b %d')} β {current_date.strftime('%b %d')}")
|
|
@@ -394,7 +415,7 @@ with tab1:
|
|
| 394 |
</div>
|
| 395 |
""", unsafe_allow_html=True)
|
| 396 |
|
| 397 |
-
# Waterfall Chart
|
| 398 |
st.markdown("#### Price-Volume-Mix (PVM) Waterfall Analysis")
|
| 399 |
|
| 400 |
waterfall_data = pd.DataFrame({
|
|
@@ -438,23 +459,21 @@ with tab1:
|
|
| 438 |
)
|
| 439 |
st.plotly_chart(fig_waterfall, use_container_width=True)
|
| 440 |
|
| 441 |
-
#
|
| 442 |
col_exp1, col_exp2 = st.columns(2)
|
| 443 |
|
| 444 |
with col_exp1:
|
| 445 |
st.markdown(f"""
|
| 446 |
<div class="insight-box">
|
| 447 |
<b>π° Price Effect:</b> ${summary['price_effect_total']/1000:+.1f}K<br>
|
| 448 |
-
<small>Impact of changes in
|
| 449 |
-
Positive = higher prices captured, Negative = price erosion or higher discounts.</small>
|
| 450 |
</div>
|
| 451 |
""", unsafe_allow_html=True)
|
| 452 |
|
| 453 |
st.markdown(f"""
|
| 454 |
<div class="insight-box">
|
| 455 |
<b>π¦ Volume Effect:</b> ${summary['volume_effect_total']/1000:+.1f}K<br>
|
| 456 |
-
<small>Impact of selling more
|
| 457 |
-
Positive = higher volumes, Negative = volume decline.</small>
|
| 458 |
</div>
|
| 459 |
""", unsafe_allow_html=True)
|
| 460 |
|
|
@@ -462,187 +481,173 @@ with tab1:
|
|
| 462 |
st.markdown(f"""
|
| 463 |
<div class="insight-box">
|
| 464 |
<b>π Cost Effect:</b> ${summary['cost_effect_total']/1000:+.1f}K<br>
|
| 465 |
-
<small>Impact of changes in unit costs
|
| 466 |
-
Positive = cost reduction, Negative = cost inflation.</small>
|
| 467 |
</div>
|
| 468 |
""", unsafe_allow_html=True)
|
| 469 |
|
| 470 |
st.markdown(f"""
|
| 471 |
<div class="insight-box">
|
| 472 |
<b>π Mix Effect:</b> ${summary['mix_effect_total']/1000:+.1f}K<br>
|
| 473 |
-
<small>Impact of
|
| 474 |
-
Reflects selling relatively more/less of high-margin items.</small>
|
| 475 |
</div>
|
| 476 |
""", unsafe_allow_html=True)
|
| 477 |
|
| 478 |
-
# Key Insight
|
| 479 |
-
dominant_effect = max([
|
| 480 |
-
("Price changes", summary['price_effect_total']),
|
| 481 |
-
("Volume changes", summary['volume_effect_total']),
|
| 482 |
-
("Cost changes", summary['cost_effect_total']),
|
| 483 |
-
("Mix shifts", summary['mix_effect_total'])
|
| 484 |
-
], key=lambda x: abs(x[1]))
|
| 485 |
-
|
| 486 |
-
st.markdown(f"""
|
| 487 |
-
<div class="{'insight-box' if gm_variance_dollar > 0 else 'warning-box'}">
|
| 488 |
-
<b>π― Key Takeaway:</b><br>
|
| 489 |
-
The primary driver of today's margin {'improvement' if gm_variance_dollar > 0 else 'decline'} was
|
| 490 |
-
<b>{dominant_effect[0]}</b>, contributing ${dominant_effect[1]/1000:+.1f}K to the overall variance.
|
| 491 |
-
</div>
|
| 492 |
-
""", unsafe_allow_html=True)
|
| 493 |
-
|
| 494 |
with tab2:
|
| 495 |
st.markdown("### Segment-Level Variance Analysis")
|
| 496 |
-
st.markdown("""
|
| 497 |
-
<div class="insight-box">
|
| 498 |
-
<b>π Detailed Breakdown:</b> Which specific product-region-channel combinations drove the margin change?
|
| 499 |
-
</div>
|
| 500 |
-
""", unsafe_allow_html=True)
|
| 501 |
|
| 502 |
-
# Top positive and negative contributors
|
| 503 |
variance_detail_sorted = variance_detail.sort_values("gm_variance", ascending=False)
|
| 504 |
|
| 505 |
col_seg1, col_seg2 = st.columns(2)
|
| 506 |
|
| 507 |
with col_seg1:
|
| 508 |
st.markdown("#### π Top 5 Margin Gainers")
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
for idx, row in top_gainers.iterrows():
|
| 512 |
if row["gm_variance"] > 0:
|
| 513 |
st.markdown(f"""
|
| 514 |
<div class="recommendation-card" style="border-left: 4px solid #28a745;">
|
| 515 |
<b>{row['product']}</b><br>
|
| 516 |
<small>{row['region']} β’ {row['channel']}</small><br>
|
| 517 |
<span class="positive-impact">+${row['gm_variance']:.2f}</span><br>
|
| 518 |
-
<small>
|
| 519 |
-
β’ Price Effect: ${row['price_effect']:+.2f}<br>
|
| 520 |
-
β’ Volume Effect: ${row['volume_effect']:+.2f}<br>
|
| 521 |
-
β’ Cost Effect: ${row['cost_effect']:+.2f}<br>
|
| 522 |
-
β’ Mix Effect: ${row['mix_effect']:+.2f}
|
| 523 |
-
</small>
|
| 524 |
</div>
|
| 525 |
""", unsafe_allow_html=True)
|
| 526 |
|
| 527 |
with col_seg2:
|
| 528 |
st.markdown("#### π Top 5 Margin Losers")
|
| 529 |
-
|
| 530 |
-
|
| 531 |
-
for idx, row in top_losers.iterrows():
|
| 532 |
if row["gm_variance"] < 0:
|
| 533 |
st.markdown(f"""
|
| 534 |
<div class="recommendation-card" style="border-left: 4px solid #dc3545;">
|
| 535 |
<b>{row['product']}</b><br>
|
| 536 |
<small>{row['region']} β’ {row['channel']}</small><br>
|
| 537 |
<span class="negative-impact">${row['gm_variance']:.2f}</span><br>
|
| 538 |
-
<small>
|
| 539 |
-
β’ Price Effect: ${row['price_effect']:+.2f}<br>
|
| 540 |
-
β’ Volume Effect: ${row['volume_effect']:+.2f}<br>
|
| 541 |
-
β’ Cost Effect: ${row['cost_effect']:+.2f}<br>
|
| 542 |
-
β’ Mix Effect: ${row['mix_effect']:+.2f}
|
| 543 |
-
</small>
|
| 544 |
</div>
|
| 545 |
""", unsafe_allow_html=True)
|
| 546 |
|
| 547 |
-
# Detailed table
|
| 548 |
-
st.markdown("---")
|
| 549 |
-
st.markdown("#### Complete Segment Variance Table")
|
| 550 |
-
|
| 551 |
-
display_variance = variance_detail[[
|
| 552 |
-
"product", "region", "channel", "gm_variance",
|
| 553 |
-
"price_effect", "volume_effect", "cost_effect", "mix_effect"
|
| 554 |
-
]].sort_values("gm_variance", ascending=False)
|
| 555 |
-
|
| 556 |
-
display_variance.columns = [
|
| 557 |
-
"Product", "Region", "Channel", "GM Variance",
|
| 558 |
-
"Price Effect", "Volume Effect", "Cost Effect", "Mix Effect"
|
| 559 |
-
]
|
| 560 |
-
|
| 561 |
-
st.dataframe(display_variance.style.format({
|
| 562 |
-
"GM Variance": "${:,.2f}",
|
| 563 |
-
"Price Effect": "${:,.2f}",
|
| 564 |
-
"Volume Effect": "${:,.2f}",
|
| 565 |
-
"Cost Effect": "${:,.2f}",
|
| 566 |
-
"Mix Effect": "${:,.2f}"
|
| 567 |
-
}).background_gradient(subset=["GM Variance"], cmap="RdYlGn", vmin=-1000, vmax=1000),
|
| 568 |
-
use_container_width=True, height=400)
|
| 569 |
-
|
| 570 |
with tab3:
|
| 571 |
-
st.markdown("### Pricing
|
| 572 |
st.markdown("""
|
| 573 |
<div class="insight-box">
|
| 574 |
-
<b
|
|
|
|
| 575 |
</div>
|
| 576 |
""", unsafe_allow_html=True)
|
| 577 |
|
| 578 |
-
#
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
|
|
|
|
| 585 |
|
| 586 |
-
|
| 587 |
-
for _, seg in problem_segments.head(15).iterrows():
|
| 588 |
p, r, c = seg["product"], seg["region"], seg["channel"]
|
| 589 |
hist = df[(df["product"]==p)&(df["region"]==r)&(df["channel"]==c)].sort_values("date")
|
| 590 |
|
| 591 |
-
if hist.empty or len(hist) <
|
| 592 |
continue
|
| 593 |
|
| 594 |
-
|
| 595 |
-
discount_reduction = 2.0 # Standard 2pp reduction
|
| 596 |
-
sim = simulate_pricing_action(hist, eps, discount_reduction)
|
| 597 |
|
| 598 |
-
if
|
| 599 |
-
|
| 600 |
-
|
|
|
|
|
|
|
| 601 |
|
| 602 |
-
|
| 603 |
-
|
|
|
|
| 604 |
"Region": r,
|
| 605 |
"Channel": c,
|
| 606 |
-
"
|
| 607 |
-
"
|
| 608 |
-
"
|
| 609 |
-
"
|
| 610 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 611 |
})
|
| 612 |
|
| 613 |
-
|
| 614 |
|
| 615 |
-
if len(
|
| 616 |
-
st.markdown("#### π Top
|
|
|
|
|
|
|
|
|
|
| 617 |
|
| 618 |
-
for i, (_, rec) in enumerate(recs_df.head(3).iterrows()):
|
| 619 |
st.markdown(f"""
|
| 620 |
-
<div class="recommendation-card">
|
| 621 |
-
<h4>#{i+1}: {rec['
|
| 622 |
-
<p><b>
|
| 623 |
-
<p><b>
|
| 624 |
-
<p><
|
| 625 |
-
<p class="positive-impact">π° Expected
|
| 626 |
-
<p><small
|
| 627 |
</div>
|
| 628 |
""", unsafe_allow_html=True)
|
| 629 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 630 |
st.markdown("---")
|
| 631 |
-
st.markdown("#### Complete
|
| 632 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 633 |
|
| 634 |
st.download_button(
|
| 635 |
-
label="π₯ Download
|
| 636 |
-
data=
|
| 637 |
-
file_name=f"
|
| 638 |
mime="text/csv"
|
| 639 |
)
|
| 640 |
else:
|
| 641 |
-
st.
|
| 642 |
|
| 643 |
st.markdown("---")
|
| 644 |
st.markdown("""
|
| 645 |
<div style="text-align: center; color: #666; padding: 1rem;">
|
| 646 |
-
<small>π Demo Mode: Using synthetic
|
| 647 |
</div>
|
| 648 |
""", unsafe_allow_html=True)
|
|
|
|
| 5 |
import plotly.graph_objects as go
|
| 6 |
from plotly.subplots import make_subplots
|
| 7 |
from datetime import datetime, timedelta
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
from sklearn.linear_model import LinearRegression
|
|
|
|
| 9 |
import warnings
|
| 10 |
warnings.filterwarnings('ignore')
|
| 11 |
|
|
|
|
| 70 |
|
| 71 |
@st.cache_data(show_spinner=False)
|
| 72 |
def generate_synthetic_data(days=60, seed=42, rows_per_day=600):
|
| 73 |
+
"""Generate data with REALISTIC variance patterns"""
|
| 74 |
rng = np.random.default_rng(seed)
|
| 75 |
start_date = datetime.today().date() - timedelta(days=days)
|
| 76 |
dates = pd.date_range(start_date, periods=days, freq="D")
|
|
|
|
| 85 |
channel_discount_mean = {"Direct Sales": 0.06, "Distribution Partners": 0.12, "E-Commerce": 0.04}
|
| 86 |
channel_discount_std = {"Direct Sales": 0.02, "Distribution Partners": 0.03, "E-Commerce": 0.02}
|
| 87 |
|
| 88 |
+
# Elasticity varies by segment
|
| 89 |
seg_epsilon = {}
|
| 90 |
for p in products:
|
| 91 |
for r in regions:
|
| 92 |
for c in channels:
|
| 93 |
+
# More realistic elasticity range: -0.5 to -2.5
|
| 94 |
+
base_eps = rng.uniform(-2.5, -0.5)
|
| 95 |
if c == "Distribution Partners":
|
| 96 |
+
base_eps -= rng.uniform(0.3, 0.8) # More price sensitive
|
| 97 |
if c == "E-Commerce":
|
| 98 |
+
base_eps -= rng.uniform(0.2, 0.5) # Also price sensitive
|
| 99 |
seg_epsilon[(p, r, c)] = base_eps
|
| 100 |
|
| 101 |
records = []
|
| 102 |
+
for idx, d in enumerate(dates):
|
| 103 |
dow = d.weekday()
|
| 104 |
+
dow_mult = 1.0 + (0.08 if dow in (5, 6) else 0)
|
|
|
|
| 105 |
|
| 106 |
+
# Add realistic seasonality and random shocks
|
| 107 |
+
seasonal = 1.0 + 0.05*np.sin((d.toordinal()%365)/365*2*np.pi)
|
| 108 |
+
|
| 109 |
+
# Random market shocks (some days have big changes)
|
| 110 |
+
if rng.random() < 0.15: # 15% of days have shocks
|
| 111 |
+
market_shock = rng.uniform(0.85, 1.15)
|
| 112 |
+
else:
|
| 113 |
+
market_shock = 1.0
|
| 114 |
+
|
| 115 |
+
# Gradual cost trends
|
| 116 |
+
cost_trend = 1.0 + (idx / len(dates)) * 0.03 # 3% cost increase over period
|
| 117 |
+
|
| 118 |
+
n = int(rows_per_day * market_shock * seasonal)
|
| 119 |
prod = rng.choice(products, size=n, p=[0.35, 0.3, 0.2, 0.15])
|
| 120 |
reg = rng.choice(regions, size=n, p=[0.4, 0.35, 0.25])
|
| 121 |
ch = rng.choice(channels, size=n, p=[0.45, 0.35, 0.20])
|
| 122 |
|
| 123 |
base_p = np.array([base_price[x] for x in prod]) * np.array([region_price_bump[x] for x in reg])
|
| 124 |
+
base_c = np.array([base_cost[x] for x in prod]) * np.array([region_cost_bump[x] for x in reg]) * cost_trend
|
| 125 |
|
| 126 |
+
# More variance in discounts
|
| 127 |
discount = np.clip(
|
| 128 |
np.array([channel_discount_mean[x] for x in ch]) +
|
| 129 |
+
rng.normal(0, [channel_discount_std[x] * 2 for x in ch]), # Double the variance
|
| 130 |
+
0, 0.45
|
| 131 |
)
|
| 132 |
|
| 133 |
+
list_price = rng.normal(base_p, 8) # More price variance
|
| 134 |
net_price = np.clip(list_price * (1 - discount), 20, None)
|
| 135 |
+
unit_cost = np.clip(rng.normal(base_c, 6), 10, None)
|
| 136 |
|
| 137 |
eps = np.array([seg_epsilon[(pp, rr, cc)] for pp, rr, cc in zip(prod, reg, ch)])
|
| 138 |
ref_price = np.array([base_price[x] for x in prod])
|
| 139 |
qty_mu = np.exp(eps * (net_price - ref_price) / np.maximum(ref_price, 1e-6))
|
| 140 |
+
qty = np.maximum(1, rng.poisson(8 * dow_mult * seasonal * market_shock * qty_mu))
|
| 141 |
|
| 142 |
revenue = net_price * qty
|
| 143 |
cogs = unit_cost * qty
|
|
|
|
| 166 |
return df
|
| 167 |
|
| 168 |
def analyze_margin_bridge(df, current_date, prior_date):
|
| 169 |
+
"""Professional Price-Volume-Mix (PVM) analysis"""
|
|
|
|
|
|
|
|
|
|
| 170 |
current_data = df[df["date"] == current_date].copy()
|
| 171 |
prior_data = df[df["date"] == prior_date].copy()
|
| 172 |
|
|
|
|
| 173 |
current_total_revenue = current_data["revenue"].sum()
|
| 174 |
current_total_cogs = current_data["cogs"].sum()
|
| 175 |
current_total_gm = current_total_revenue - current_total_cogs
|
|
|
|
| 182 |
|
| 183 |
total_gm_variance = current_total_gm - prior_total_gm
|
| 184 |
|
|
|
|
| 185 |
current_seg = current_data.groupby(["product", "region", "channel"]).agg({
|
| 186 |
"revenue": "sum",
|
| 187 |
"cogs": "sum",
|
|
|
|
| 202 |
prior_seg["gm"] = prior_seg["revenue"] - prior_seg["cogs"]
|
| 203 |
prior_seg["gm_pct"] = prior_seg["gm"] / prior_seg["revenue"]
|
| 204 |
|
|
|
|
| 205 |
merged = pd.merge(
|
| 206 |
current_seg,
|
| 207 |
prior_seg,
|
|
|
|
| 210 |
how="outer"
|
| 211 |
).fillna(0)
|
| 212 |
|
| 213 |
+
# PVM Decomposition
|
|
|
|
| 214 |
merged["price_effect"] = (merged["net_price_curr"] - merged["net_price_prior"]) * merged["qty_curr"]
|
|
|
|
|
|
|
| 215 |
merged["volume_effect"] = (merged["qty_curr"] - merged["qty_prior"]) * merged["net_price_prior"] * merged["gm_pct_prior"]
|
|
|
|
|
|
|
| 216 |
merged["cost_effect"] = -(merged["unit_cost_curr"] - merged["unit_cost_prior"]) * merged["qty_curr"]
|
|
|
|
|
|
|
| 217 |
merged["gm_variance"] = merged["gm_curr"] - merged["gm_prior"]
|
| 218 |
merged["mix_effect"] = merged["gm_variance"] - (merged["price_effect"] + merged["volume_effect"] + merged["cost_effect"])
|
| 219 |
|
|
|
|
| 230 |
}
|
| 231 |
|
| 232 |
def estimate_segment_elasticity(df, product, region, channel):
|
| 233 |
+
"""Estimate price elasticity for a segment"""
|
| 234 |
seg_df = df[(df["product"]==product)&(df["region"]==region)&(df["channel"]==channel)]
|
| 235 |
if len(seg_df) < 100 or seg_df["net_price"].std() < 1e-6 or seg_df["qty"].std() < 1e-6:
|
| 236 |
+
return -1.2, False # Default elasticity
|
| 237 |
try:
|
| 238 |
x = np.log(np.clip(seg_df["net_price"].values, 1e-6, None)).reshape(-1,1)
|
| 239 |
y = np.log(np.clip(seg_df["qty"].values, 1e-6, None))
|
| 240 |
lin = LinearRegression().fit(x, y)
|
| 241 |
+
elasticity = float(lin.coef_[0])
|
| 242 |
+
# Bound elasticity to realistic range
|
| 243 |
+
elasticity = np.clip(elasticity, -5.0, -0.3)
|
| 244 |
+
return elasticity, True
|
| 245 |
except:
|
| 246 |
+
return -1.2, False
|
| 247 |
|
| 248 |
+
def find_optimal_discount(base_data, elasticity, search_range=(-10, 10)):
|
| 249 |
+
"""
|
| 250 |
+
Find profit-maximizing discount using price elasticity of demand
|
| 251 |
+
Can recommend INCREASING or DECREASING discount
|
| 252 |
+
"""
|
| 253 |
+
current_discount = base_data["discount_pct"]
|
| 254 |
+
current_list_price = base_data["list_price"]
|
| 255 |
+
current_price = base_data["net_price"]
|
| 256 |
+
current_cost = base_data["unit_cost"]
|
| 257 |
+
current_qty = base_data["qty"]
|
| 258 |
+
|
| 259 |
+
# Test discount changes from -10pp to +10pp
|
| 260 |
+
discount_changes = np.linspace(search_range[0], search_range[1], 41)
|
| 261 |
+
results = []
|
| 262 |
+
|
| 263 |
+
for disc_change in discount_changes:
|
| 264 |
+
new_discount = np.clip(current_discount + (disc_change/100), 0.0, 0.50)
|
| 265 |
+
new_price = current_list_price * (1 - new_discount)
|
| 266 |
+
|
| 267 |
+
# Apply elasticity
|
| 268 |
+
if current_price > 0:
|
| 269 |
+
price_ratio = new_price / current_price
|
| 270 |
+
new_qty = current_qty * (price_ratio ** elasticity)
|
| 271 |
else:
|
| 272 |
+
new_qty = current_qty
|
| 273 |
+
|
| 274 |
+
new_revenue = new_price * new_qty
|
| 275 |
+
new_cogs = current_cost * new_qty
|
| 276 |
+
new_gm = new_revenue - new_cogs
|
| 277 |
+
|
| 278 |
+
results.append({
|
| 279 |
+
"discount_change": disc_change,
|
| 280 |
+
"new_discount": new_discount * 100,
|
| 281 |
+
"new_price": new_price,
|
| 282 |
+
"new_qty": new_qty,
|
| 283 |
+
"new_gm": new_gm,
|
| 284 |
+
"new_revenue": new_revenue
|
| 285 |
+
})
|
| 286 |
+
|
| 287 |
+
results_df = pd.DataFrame(results)
|
| 288 |
+
optimal_idx = results_df["new_gm"].idxmax()
|
| 289 |
+
optimal = results_df.iloc[optimal_idx]
|
| 290 |
+
|
| 291 |
+
current_gm = current_price * current_qty - current_cost * current_qty
|
| 292 |
+
|
| 293 |
+
return {
|
| 294 |
+
"current_discount": current_discount * 100,
|
| 295 |
+
"optimal_discount": optimal["new_discount"],
|
| 296 |
+
"discount_change": optimal["discount_change"],
|
| 297 |
+
"current_price": current_price,
|
| 298 |
+
"optimal_price": optimal["new_price"],
|
| 299 |
+
"current_qty": current_qty,
|
| 300 |
+
"optimal_qty": optimal["new_qty"],
|
| 301 |
+
"current_gm": current_gm,
|
| 302 |
+
"optimal_gm": optimal["new_gm"],
|
| 303 |
+
"gm_uplift": optimal["new_gm"] - current_gm,
|
| 304 |
+
"elasticity": elasticity,
|
| 305 |
+
"all_scenarios": results_df
|
| 306 |
+
}
|
| 307 |
|
| 308 |
# Main App
|
| 309 |
st.markdown('<h1 class="main-header">π― Daily Profitability Variance Analysis</h1>', unsafe_allow_html=True)
|
| 310 |
st.markdown('<p class="sub-header">Understanding What Drives Daily Margin Changes</p>', unsafe_allow_html=True)
|
| 311 |
|
| 312 |
+
# Generate data with realistic variance
|
| 313 |
with st.spinner("π Loading business data..."):
|
| 314 |
df = generate_synthetic_data(days=60, seed=42, rows_per_day=600)
|
| 315 |
|
|
|
|
| 371 |
delta_color="normal"
|
| 372 |
)
|
| 373 |
|
| 374 |
+
# Trend chart with REAL variance
|
| 375 |
st.markdown("#### π Gross Margin Trend (Last 30 Days)")
|
| 376 |
recent_daily = daily.tail(30)
|
| 377 |
|
|
|
|
| 386 |
fillcolor="rgba(31, 119, 180, 0.1)"
|
| 387 |
))
|
| 388 |
fig_trend.add_hline(y=roll7*100, line_dash="dash", line_color="red",
|
| 389 |
+
annotation_text=f"7-Day Avg: {roll7*100:.2f}%", annotation_position="right")
|
| 390 |
fig_trend.update_layout(
|
| 391 |
xaxis_title="Date",
|
| 392 |
yaxis_title="Gross Margin %",
|
|
|
|
| 401 |
with st.spinner("π¬ Performing Price-Volume-Mix analysis..."):
|
| 402 |
variance_detail, summary = analyze_margin_bridge(df, current_date, prior_date)
|
| 403 |
|
| 404 |
+
# Main Tabs
|
| 405 |
+
tab1, tab2, tab3 = st.tabs(["π Margin Bridge (PVM)", "π Segment Deep Dive", "π‘ Optimal Pricing"])
|
| 406 |
|
| 407 |
with tab1:
|
| 408 |
st.markdown(f"### Gross Margin Bridge: {prior_date.strftime('%b %d')} β {current_date.strftime('%b %d')}")
|
|
|
|
| 415 |
</div>
|
| 416 |
""", unsafe_allow_html=True)
|
| 417 |
|
| 418 |
+
# Waterfall Chart
|
| 419 |
st.markdown("#### Price-Volume-Mix (PVM) Waterfall Analysis")
|
| 420 |
|
| 421 |
waterfall_data = pd.DataFrame({
|
|
|
|
| 459 |
)
|
| 460 |
st.plotly_chart(fig_waterfall, use_container_width=True)
|
| 461 |
|
| 462 |
+
# Explanations
|
| 463 |
col_exp1, col_exp2 = st.columns(2)
|
| 464 |
|
| 465 |
with col_exp1:
|
| 466 |
st.markdown(f"""
|
| 467 |
<div class="insight-box">
|
| 468 |
<b>π° Price Effect:</b> ${summary['price_effect_total']/1000:+.1f}K<br>
|
| 469 |
+
<small>Impact of changes in realized selling prices</small>
|
|
|
|
| 470 |
</div>
|
| 471 |
""", unsafe_allow_html=True)
|
| 472 |
|
| 473 |
st.markdown(f"""
|
| 474 |
<div class="insight-box">
|
| 475 |
<b>π¦ Volume Effect:</b> ${summary['volume_effect_total']/1000:+.1f}K<br>
|
| 476 |
+
<small>Impact of selling more/fewer units</small>
|
|
|
|
| 477 |
</div>
|
| 478 |
""", unsafe_allow_html=True)
|
| 479 |
|
|
|
|
| 481 |
st.markdown(f"""
|
| 482 |
<div class="insight-box">
|
| 483 |
<b>π Cost Effect:</b> ${summary['cost_effect_total']/1000:+.1f}K<br>
|
| 484 |
+
<small>Impact of changes in unit costs</small>
|
|
|
|
| 485 |
</div>
|
| 486 |
""", unsafe_allow_html=True)
|
| 487 |
|
| 488 |
st.markdown(f"""
|
| 489 |
<div class="insight-box">
|
| 490 |
<b>π Mix Effect:</b> ${summary['mix_effect_total']/1000:+.1f}K<br>
|
| 491 |
+
<small>Impact of product/channel mix shifts</small>
|
|
|
|
| 492 |
</div>
|
| 493 |
""", unsafe_allow_html=True)
|
| 494 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 495 |
with tab2:
|
| 496 |
st.markdown("### Segment-Level Variance Analysis")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 497 |
|
|
|
|
| 498 |
variance_detail_sorted = variance_detail.sort_values("gm_variance", ascending=False)
|
| 499 |
|
| 500 |
col_seg1, col_seg2 = st.columns(2)
|
| 501 |
|
| 502 |
with col_seg1:
|
| 503 |
st.markdown("#### π Top 5 Margin Gainers")
|
| 504 |
+
for _, row in variance_detail_sorted.head(5).iterrows():
|
|
|
|
|
|
|
| 505 |
if row["gm_variance"] > 0:
|
| 506 |
st.markdown(f"""
|
| 507 |
<div class="recommendation-card" style="border-left: 4px solid #28a745;">
|
| 508 |
<b>{row['product']}</b><br>
|
| 509 |
<small>{row['region']} β’ {row['channel']}</small><br>
|
| 510 |
<span class="positive-impact">+${row['gm_variance']:.2f}</span><br>
|
| 511 |
+
<small>Price: ${row['price_effect']:+.2f} | Volume: ${row['volume_effect']:+.2f} | Cost: ${row['cost_effect']:+.2f}</small>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 512 |
</div>
|
| 513 |
""", unsafe_allow_html=True)
|
| 514 |
|
| 515 |
with col_seg2:
|
| 516 |
st.markdown("#### π Top 5 Margin Losers")
|
| 517 |
+
for _, row in variance_detail_sorted.tail(5).iterrows():
|
|
|
|
|
|
|
| 518 |
if row["gm_variance"] < 0:
|
| 519 |
st.markdown(f"""
|
| 520 |
<div class="recommendation-card" style="border-left: 4px solid #dc3545;">
|
| 521 |
<b>{row['product']}</b><br>
|
| 522 |
<small>{row['region']} β’ {row['channel']}</small><br>
|
| 523 |
<span class="negative-impact">${row['gm_variance']:.2f}</span><br>
|
| 524 |
+
<small>Price: ${row['price_effect']:+.2f} | Volume: ${row['volume_effect']:+.2f} | Cost: ${row['cost_effect']:+.2f}</small>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 525 |
</div>
|
| 526 |
""", unsafe_allow_html=True)
|
| 527 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 528 |
with tab3:
|
| 529 |
+
st.markdown("### Optimal Pricing Analysis")
|
| 530 |
st.markdown("""
|
| 531 |
<div class="insight-box">
|
| 532 |
+
<b>π― Profit Maximization:</b> Using price elasticity of demand to find the optimal discount level.
|
| 533 |
+
<br>May recommend <b>increasing</b> or <b>decreasing</b> discount depending on elasticity.
|
| 534 |
</div>
|
| 535 |
""", unsafe_allow_html=True)
|
| 536 |
|
| 537 |
+
# Get segments with meaningful volume
|
| 538 |
+
recent_segments = df[df["date"] >= (current_date - timedelta(days=7))].groupby(["product", "region", "channel"]).agg({
|
| 539 |
+
"qty": "sum",
|
| 540 |
+
"gm_value": "sum"
|
| 541 |
+
}).reset_index()
|
| 542 |
+
recent_segments = recent_segments[recent_segments["qty"] > 100] # Minimum volume threshold
|
| 543 |
+
|
| 544 |
+
optimization_results = []
|
| 545 |
|
| 546 |
+
for _, seg in recent_segments.iterrows():
|
|
|
|
| 547 |
p, r, c = seg["product"], seg["region"], seg["channel"]
|
| 548 |
hist = df[(df["product"]==p)&(df["region"]==r)&(df["channel"]==c)].sort_values("date")
|
| 549 |
|
| 550 |
+
if hist.empty or len(hist) < 100:
|
| 551 |
continue
|
| 552 |
|
| 553 |
+
elasticity, is_valid = estimate_segment_elasticity(hist, p, r, c)
|
|
|
|
|
|
|
| 554 |
|
| 555 |
+
if not is_valid:
|
| 556 |
+
continue
|
| 557 |
+
|
| 558 |
+
current_state = hist.iloc[-1]
|
| 559 |
+
optimal_result = find_optimal_discount(current_state, elasticity)
|
| 560 |
|
| 561 |
+
if abs(optimal_result["gm_uplift"]) > 5: # Only show meaningful opportunities
|
| 562 |
+
optimization_results.append({
|
| 563 |
+
"Product": p,
|
| 564 |
"Region": r,
|
| 565 |
"Channel": c,
|
| 566 |
+
"Current Discount": optimal_result["current_discount"],
|
| 567 |
+
"Optimal Discount": optimal_result["optimal_discount"],
|
| 568 |
+
"Discount Change": optimal_result["discount_change"],
|
| 569 |
+
"Price Elasticity": elasticity,
|
| 570 |
+
"Current GM/Day": optimal_result["current_gm"],
|
| 571 |
+
"Optimal GM/Day": optimal_result["optimal_gm"],
|
| 572 |
+
"Daily GM Uplift": optimal_result["gm_uplift"],
|
| 573 |
+
"Direction": "Increase Discount" if optimal_result["discount_change"] > 0 else "Decrease Discount",
|
| 574 |
+
"all_scenarios": optimal_result["all_scenarios"]
|
| 575 |
})
|
| 576 |
|
| 577 |
+
opt_df = pd.DataFrame(optimization_results).sort_values("Daily GM Uplift", ascending=False)
|
| 578 |
|
| 579 |
+
if len(opt_df) > 0:
|
| 580 |
+
st.markdown("#### π Top 5 Optimization Opportunities")
|
| 581 |
+
|
| 582 |
+
for i, (_, rec) in enumerate(opt_df.head(5).iterrows()):
|
| 583 |
+
direction_color = "#ff7f0e" if rec["Direction"] == "Increase Discount" else "#1f77b4"
|
| 584 |
|
|
|
|
| 585 |
st.markdown(f"""
|
| 586 |
+
<div class="recommendation-card" style="border-left: 5px solid {direction_color};">
|
| 587 |
+
<h4>#{i+1}: {rec['Product']} β’ {rec['Region']} β’ {rec['Channel']}</h4>
|
| 588 |
+
<p><b>Elasticity:</b> {rec['Price Elasticity']:.2f} ({"Elastic" if rec['Price Elasticity'] < -1.5 else "Inelastic"})</p>
|
| 589 |
+
<p><b>Recommendation:</b> {rec['Direction']} by {abs(rec['Discount Change']):.1f}pp</p>
|
| 590 |
+
<p><small>Current: {rec['Current Discount']:.1f}% β Optimal: {rec['Optimal Discount']:.1f}%</small></p>
|
| 591 |
+
<p class="positive-impact">π° Expected Uplift: ${rec['Daily GM Uplift']:.2f}/day</p>
|
| 592 |
+
<p><small>Annual Impact: ${rec['Daily GM Uplift']*365/1000:.1f}K</small></p>
|
| 593 |
</div>
|
| 594 |
""", unsafe_allow_html=True)
|
| 595 |
|
| 596 |
+
# Show elasticity curve
|
| 597 |
+
with st.expander(f"π View Profit Curve for {rec['Product']} β’ {rec['Region']} β’ {rec['Channel']}"):
|
| 598 |
+
scenario_df = rec["all_scenarios"]
|
| 599 |
+
|
| 600 |
+
fig_curve = go.Figure()
|
| 601 |
+
fig_curve.add_trace(go.Scatter(
|
| 602 |
+
x=scenario_df["new_discount"],
|
| 603 |
+
y=scenario_df["new_gm"],
|
| 604 |
+
mode='lines',
|
| 605 |
+
name='Gross Margin',
|
| 606 |
+
line=dict(color='#1f77b4', width=3)
|
| 607 |
+
))
|
| 608 |
+
|
| 609 |
+
fig_curve.add_vline(x=rec["Current Discount"], line_dash="dash", line_color="red",
|
| 610 |
+
annotation_text=f"Current: {rec['Current Discount']:.1f}%")
|
| 611 |
+
fig_curve.add_vline(x=rec["Optimal Discount"], line_dash="dash", line_color="green",
|
| 612 |
+
annotation_text=f"Optimal: {rec['Optimal Discount']:.1f}%")
|
| 613 |
+
|
| 614 |
+
fig_curve.update_layout(
|
| 615 |
+
title=f"Profit Maximization Curve (Elasticity: {rec['Price Elasticity']:.2f})",
|
| 616 |
+
xaxis_title="Discount Level (%)",
|
| 617 |
+
yaxis_title="Expected Gross Margin ($)",
|
| 618 |
+
height=400
|
| 619 |
+
)
|
| 620 |
+
st.plotly_chart(fig_curve, use_container_width=True)
|
| 621 |
+
|
| 622 |
st.markdown("---")
|
| 623 |
+
st.markdown("#### Complete Optimization List")
|
| 624 |
+
|
| 625 |
+
display_opt = opt_df[[
|
| 626 |
+
"Product", "Region", "Channel", "Current Discount", "Optimal Discount",
|
| 627 |
+
"Discount Change", "Price Elasticity", "Daily GM Uplift", "Direction"
|
| 628 |
+
]].copy()
|
| 629 |
+
|
| 630 |
+
st.dataframe(display_opt.style.format({
|
| 631 |
+
"Current Discount": "{:.1f}%",
|
| 632 |
+
"Optimal Discount": "{:.1f}%",
|
| 633 |
+
"Discount Change": "{:+.1f}pp",
|
| 634 |
+
"Price Elasticity": "{:.2f}",
|
| 635 |
+
"Daily GM Uplift": "${:,.2f}"
|
| 636 |
+
}).background_gradient(subset=["Daily GM Uplift"], cmap="Greens"),
|
| 637 |
+
use_container_width=True, height=400)
|
| 638 |
|
| 639 |
st.download_button(
|
| 640 |
+
label="π₯ Download Optimization Plan (CSV)",
|
| 641 |
+
data=opt_df.drop(columns=["all_scenarios"]).to_csv(index=False).encode("utf-8"),
|
| 642 |
+
file_name=f"optimal_pricing_plan_{current_date.strftime('%Y%m%d')}.csv",
|
| 643 |
mime="text/csv"
|
| 644 |
)
|
| 645 |
else:
|
| 646 |
+
st.info("All segments are currently near optimal pricing levels.")
|
| 647 |
|
| 648 |
st.markdown("---")
|
| 649 |
st.markdown("""
|
| 650 |
<div style="text-align: center; color: #666; padding: 1rem;">
|
| 651 |
+
<small>π Demo Mode: Using synthetic transaction data with realistic variance patterns</small>
|
| 652 |
</div>
|
| 653 |
""", unsafe_allow_html=True)
|