Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,479 +8,212 @@ import numpy as np
|
|
8 |
import plotly.express as px
|
9 |
import joblib
|
10 |
|
11 |
-
# ZeroGPU hooks (safe on CPU Spaces too)
|
12 |
import spaces
|
13 |
import torch
|
14 |
-
|
15 |
-
# Optional micro-model to "polish" text when GPU window is available
|
16 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
|
|
|
17 |
|
18 |
-
#
|
19 |
-
#
|
20 |
-
#
|
21 |
DATA_DIR = Path("data"); DATA_DIR.mkdir(exist_ok=True)
|
22 |
TS_FMT = "%Y-%m-%d %H:%M:%S"
|
23 |
|
24 |
-
# Load your regressor
|
25 |
DT_PATH = "./decision_tree_regressor.joblib"
|
26 |
decision_tree_regressor = joblib.load(DT_PATH)
|
27 |
|
28 |
-
#
|
29 |
-
GEN_MODEL =
|
30 |
_tokenizer = AutoTokenizer.from_pretrained(GEN_MODEL)
|
31 |
_model = AutoModelForSeq2SeqLM.from_pretrained(GEN_MODEL)
|
32 |
_generate_cpu = pipeline("text2text-generation", model=_model, tokenizer=_tokenizer, device=-1)
|
33 |
|
34 |
-
#
|
35 |
-
|
36 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
@spaces.GPU
|
38 |
def gpu_warmup() -> str:
|
39 |
return f"cuda={torch.cuda.is_available()}"
|
40 |
|
41 |
@spaces.GPU
|
42 |
-
def
|
43 |
-
"""Polish/translate the already-generated plan inside a GPU window.
|
44 |
-
Falls back to CPU gracefully if needed.
|
45 |
-
"""
|
46 |
try:
|
47 |
if torch.cuda.is_available():
|
48 |
-
gen = pipeline(
|
49 |
-
|
50 |
-
model=_model.to("cuda"),
|
51 |
-
tokenizer=_tokenizer,
|
52 |
-
device=0,
|
53 |
-
)
|
54 |
else:
|
55 |
-
|
56 |
-
prompt = (
|
57 |
-
"Rewrite the following fasting plan in a friendly coaching tone, keep markdown structure, "
|
58 |
-
f"and output language '{lang}'. Keep tables and numbered lists concise.\n\n" + text
|
59 |
-
)
|
60 |
-
out = gen(prompt, max_new_tokens=700)
|
61 |
return out[0]["generated_text"].strip()
|
62 |
except Exception as e:
|
63 |
-
out = _generate_cpu(
|
64 |
-
return
|
65 |
|
66 |
try:
|
67 |
_ = gpu_warmup()
|
68 |
except Exception:
|
69 |
pass
|
70 |
|
71 |
-
#
|
72 |
-
#
|
73 |
-
#
|
74 |
-
ACTIVITY = {
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
s = 5 if sex == "Male" else -161
|
94 |
-
return 10 * weight_kg + 6.25 * height_cm - 5 * age + s
|
95 |
-
|
96 |
-
|
97 |
-
def tdee(bmr: float, activity: str) -> float:
|
98 |
-
return bmr * ACTIVITY.get(activity, 1.2)
|
99 |
-
|
100 |
-
|
101 |
-
def parse_hhmm(hhmm: str) -> Tuple[int, int]:
|
102 |
-
h, m = hhmm.split(":")
|
103 |
-
h = int(h); m = int(m)
|
104 |
-
if not (0 <= h <= 23 and 0 <= m <= 59):
|
105 |
-
raise ValueError("Time must be HH:MM in 24h format.")
|
106 |
-
return h, m
|
107 |
-
|
108 |
-
|
109 |
-
def fmt_hhmm(h: int, m: int) -> str:
|
110 |
-
return f"{h:02d}:{m:02d}"
|
111 |
-
|
112 |
-
# ---------------------
|
113 |
-
# Plan generator (deterministic, rich)
|
114 |
-
# ---------------------
|
115 |
-
DIET_STYLES = ["Omnivore", "Mediterranean", "Vegetarian", "Vegan", "Low-carb"]
|
116 |
-
|
117 |
-
MEAL_IDEAS = {
|
118 |
-
"Omnivore": [
|
119 |
-
"Greek yogurt + berries + nuts",
|
120 |
-
"Chicken bowl (rice, veggies, olive oil)",
|
121 |
-
"Eggs, avocado, sourdough",
|
122 |
-
"Salmon, quinoa, asparagus",
|
123 |
-
"Lean beef, sweet potato, salad",
|
124 |
-
"Tuna whole-grain wrap",
|
125 |
-
"Cottage cheese + fruit + seeds",
|
126 |
-
],
|
127 |
-
"Mediterranean": [
|
128 |
-
"Oats with dates, walnuts, olive oil drizzle",
|
129 |
-
"Grilled fish, lentil salad, greens",
|
130 |
-
"Hummus platter, wholegrain pita, veg",
|
131 |
-
"Chickpea tomato stew",
|
132 |
-
"Feta + olive salad, quinoa",
|
133 |
-
"Shakshuka + side salad",
|
134 |
-
"Lentils, roasted veg, tahini",
|
135 |
-
],
|
136 |
-
"Vegetarian": [
|
137 |
-
"Tofu scramble, toast, avocado",
|
138 |
-
"Paneer tikka bowl",
|
139 |
-
"Bean chili + brown rice",
|
140 |
-
"Halloumi, couscous, veg",
|
141 |
-
"Greek salad + eggs",
|
142 |
-
"Tempeh stir-fry",
|
143 |
-
"Yogurt parfait + granola",
|
144 |
-
],
|
145 |
-
"Vegan": [
|
146 |
-
"Tofu scramble, avocado toast",
|
147 |
-
"Lentil curry + basmati",
|
148 |
-
"Burrito bowl (beans, corn, salsa)",
|
149 |
-
"Seitan, roasted potatoes, veg",
|
150 |
-
"Tofu poke bowl",
|
151 |
-
"Chickpea pasta + marinara",
|
152 |
-
"Overnight oats + banana + peanut butter",
|
153 |
-
],
|
154 |
-
"Low-carb": [
|
155 |
-
"Eggs, smoked salmon, salad",
|
156 |
-
"Chicken Caesar (no croutons)",
|
157 |
-
"Beef & greens stir-fry",
|
158 |
-
"Omelette + veg + cheese",
|
159 |
-
"Zoodles + turkey bolognese",
|
160 |
-
"Tofu salad w/ tahini",
|
161 |
-
"Yogurt + nuts (moderate)",
|
162 |
-
],
|
163 |
-
}
|
164 |
-
|
165 |
-
WORKOUTS = {
|
166 |
-
"Fat loss": [
|
167 |
-
"3× LISS cardio 30–40min",
|
168 |
-
"2× full‑body strength 45min",
|
169 |
-
"1× intervals 12–16min",
|
170 |
-
"Daily 8–10k steps"
|
171 |
-
],
|
172 |
-
"Recomp/Maintenance": [
|
173 |
-
"3× full‑body strength 45–60min",
|
174 |
-
"1–2× LISS cardio 30min",
|
175 |
-
"Mobility 10min daily",
|
176 |
-
"8–10k steps"
|
177 |
-
],
|
178 |
-
"Muscle gain": [
|
179 |
-
"4× strength split 45–60min",
|
180 |
-
"Optional 1× LISS 20–30min",
|
181 |
-
"Mobility 10min",
|
182 |
-
"7–9k steps"
|
183 |
-
],
|
184 |
-
}
|
185 |
-
|
186 |
-
|
187 |
-
def feeding_schedule(first_meal_hhmm: str, fasting_hours: float) -> List[Tuple[str, str]]:
|
188 |
-
"""Return 7 (start,end) strings for the eating window each day."""
|
189 |
-
h, m = parse_hhmm(first_meal_hhmm)
|
190 |
-
window = max(0.0, 24 - float(fasting_hours))
|
191 |
-
start_minutes = h * 60 + m
|
192 |
-
end_minutes = int((start_minutes + window * 60) % (24 * 60))
|
193 |
-
|
194 |
-
sched = []
|
195 |
-
for _ in range(7):
|
196 |
-
start = fmt_hhmm(h, m)
|
197 |
-
end = fmt_hhmm(end_minutes // 60, end_minutes % 60)
|
198 |
-
sched.append((start, end))
|
199 |
-
return sched
|
200 |
-
|
201 |
-
|
202 |
-
def weekly_plan(diet: str, sched: List[Tuple[str, str]], kcal: int, protein_g: int) -> pd.DataFrame:
|
203 |
-
ideas = MEAL_IDEAS[diet]
|
204 |
-
rows = []
|
205 |
-
for i in range(7):
|
206 |
-
day = ["Mon","Tue","Wed","Thu","Fri","Sat","Sun"][i]
|
207 |
-
start, end = sched[i]
|
208 |
-
meal1 = ideas[i % len(ideas)]
|
209 |
-
meal2 = ideas[(i+3) % len(ideas)]
|
210 |
-
snack = "Fruit or nuts (optional)"
|
211 |
-
rows.append({
|
212 |
-
"Day": day,
|
213 |
-
"Feeding window": f"{start}–{end}",
|
214 |
-
"Meal 1": meal1,
|
215 |
-
"Meal 2": meal2,
|
216 |
-
"Protein target": f"≥ {protein_g} g",
|
217 |
-
"Daily kcal": kcal,
|
218 |
-
"Snack": snack,
|
219 |
-
})
|
220 |
-
return pd.DataFrame(rows)
|
221 |
-
|
222 |
-
|
223 |
-
def shopping_list(diet: str) -> List[str]:
|
224 |
-
core = [
|
225 |
-
"Leafy greens, mixed veg, berries",
|
226 |
-
"Olive oil, nuts/seeds, herbs & spices",
|
227 |
-
"Coffee/tea, mineral water, electrolytes",
|
228 |
-
]
|
229 |
-
extras = {
|
230 |
-
"Omnivore": ["Chicken, fish, eggs, yogurt, cottage cheese", "Rice/quinoa/sourdough", "Beans/lentils"],
|
231 |
-
"Mediterranean": ["Fish, feta, olives", "Whole grains (bulgur, farro)", "Chickpeas/lentils"],
|
232 |
-
"Vegetarian": ["Eggs, dairy, paneer", "Legumes", "Tofu/tempeh"],
|
233 |
-
"Vegan": ["Tofu/tempeh/seitan", "Beans/lentils", "Plant yogurt/milk"],
|
234 |
-
"Low-carb": ["Eggs, fish, meat", "Green veg", "Greek yogurt, cheese"],
|
235 |
-
}
|
236 |
-
return core + extras[diet]
|
237 |
-
|
238 |
-
# ---------------------
|
239 |
-
# Tracker (history)
|
240 |
-
# ---------------------
|
241 |
-
active_fasts: Dict[str, pd.Timestamp] = {}
|
242 |
-
|
243 |
-
def _csv(u: str) -> Path:
|
244 |
-
safe = "".join(ch for ch in (u or "default") if ch.isalnum() or ch in ("_","-"))
|
245 |
-
return DATA_DIR / f"{safe}.csv"
|
246 |
-
|
247 |
-
def hist_load(u: str) -> pd.DataFrame:
|
248 |
-
p = _csv(u)
|
249 |
-
if p.exists():
|
250 |
-
d = pd.read_csv(p)
|
251 |
-
for c in ["start_time","end_time"]:
|
252 |
-
if c in d: d[c] = pd.to_datetime(d[c], errors="coerce")
|
253 |
return d
|
254 |
return pd.DataFrame(columns=["start_time","end_time","duration_hours","note"])
|
255 |
|
256 |
-
def hist_save(u
|
257 |
-
d.to_csv(_csv(u), index=False)
|
258 |
|
259 |
-
#
|
260 |
-
#
|
261 |
-
#
|
|
|
|
|
|
|
|
|
|
|
262 |
|
263 |
-
def predict_and_plan(fasting_duration, meal_timing, weight, age, gender, height,
|
264 |
-
activity, goal, diet, lang, ai_polish) -> Tuple[Optional[float], str, str, pd.DataFrame, object, str]:
|
265 |
try:
|
266 |
-
#
|
267 |
-
if fasting_duration
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
"
|
276 |
-
"
|
277 |
-
"
|
278 |
-
"
|
279 |
-
"
|
280 |
-
"Gender_Male": [1 if gender == "Male" else 0],
|
281 |
-
"Gender_Other": [1 if gender == "Other" else 0],
|
282 |
})
|
283 |
-
score
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
"es": "## Tu plan de ayuno intermitente de 7 días",
|
310 |
-
}[lang]
|
311 |
-
kpis = (
|
312 |
-
f"**Score:** {score:.1f} • **BMI:** {bmi_val} • **BMR:** {int(bmr)} kcal • **TDEE:** {int(tdee_kcal)} kcal • "
|
313 |
-
f"**Target:** {target_kcal} kcal • **Protein:** ≥ {protein_g} g • **Diet:** {diet}\n"
|
314 |
-
)
|
315 |
-
sched_md = "\n".join([f"- **{d}**: {s} – {e}" for d,(s,e) in zip(["Mon","Tue","Wed","Thu","Fri","Sat","Sun"], sched)])
|
316 |
-
workouts = "\n".join([f"- {w}" for w in WORKOUTS[goal]])
|
317 |
-
shop = "\n".join([f"- {x}" for x in shopping_list(diet)])
|
318 |
-
|
319 |
-
plan_md = f"""
|
320 |
-
{hdr}
|
321 |
-
|
322 |
-
{kpis}
|
323 |
-
|
324 |
-
### Feeding window (daily)
|
325 |
-
{sched_md}
|
326 |
-
|
327 |
-
### Weekly training
|
328 |
-
{workouts}
|
329 |
-
|
330 |
-
### Daily meals (example week)
|
331 |
-
(See table below for details.)
|
332 |
-
|
333 |
-
### Shopping list
|
334 |
-
{shop}
|
335 |
-
|
336 |
-
> Hydration & electrolytes during the fast, protein at each meal, whole foods, and 7–9 hours sleep.
|
337 |
-
""".strip()
|
338 |
-
|
339 |
-
# Optional AI polish (ZeroGPU window)
|
340 |
-
if ai_polish:
|
341 |
-
try:
|
342 |
-
plan_md = polish_on_gpu(plan_md, lang)
|
343 |
-
except Exception:
|
344 |
-
pass
|
345 |
-
|
346 |
-
# Export file path (Markdown)
|
347 |
-
md_path = DATA_DIR / "plan.md"
|
348 |
-
md_path.write_text(plan_md, encoding="utf-8")
|
349 |
-
|
350 |
-
return score, kpis, plan_md, plan_df, fig, str(md_path)
|
351 |
except Exception as e:
|
352 |
-
return None,
|
353 |
|
354 |
-
#
|
355 |
# Tracker actions
|
356 |
-
#
|
357 |
-
|
358 |
-
|
359 |
-
if
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
df.loc[len(df)] = [start, end, dur, ""]
|
372 |
-
hist_save(user, df)
|
373 |
-
chart = make_hist_chart(df)
|
374 |
-
return f"✅ Fast ended at {end.strftime(TS_FMT)} • {dur} h", df.tail(12), chart, hist_stats(df)
|
375 |
-
|
376 |
-
|
377 |
-
def refresh_hist(user: str):
|
378 |
-
df = hist_load(user)
|
379 |
-
return df.tail(12), make_hist_chart(df), hist_stats(df)
|
380 |
-
|
381 |
-
|
382 |
-
def make_hist_chart(df: pd.DataFrame):
|
383 |
-
if df.empty: return None
|
384 |
-
d = df.dropna(subset=["end_time"]).copy()
|
385 |
-
d["date"] = pd.to_datetime(d["end_time"]).dt.date
|
386 |
-
fig = px.bar(d, x="date", y="duration_hours", title="Fasting duration by day (h)")
|
387 |
-
fig.update_layout(height=300, margin=dict(l=10,r=10,t=40,b=10))
|
388 |
-
return fig
|
389 |
-
|
390 |
-
|
391 |
-
def hist_stats(df: pd.DataFrame) -> str:
|
392 |
-
if df.empty: return "No history yet."
|
393 |
-
last7 = df.tail(7)
|
394 |
-
avg = last7["duration_hours"].mean()
|
395 |
-
streak = compute_streak(df)
|
396 |
-
return f"Total fasts: {len(df)}\nAvg (last 7): {avg:.2f} h\nCurrent streak: {streak} day(s)"
|
397 |
-
|
398 |
-
|
399 |
-
def compute_streak(df: pd.DataFrame) -> int:
|
400 |
-
d = df.dropna(subset=["end_time"]).copy()
|
401 |
-
if d.empty: return 0
|
402 |
-
days = set(pd.to_datetime(d["end_time"]).dt.date)
|
403 |
-
cur = pd.Timestamp.now().date(); streak=0
|
404 |
-
while cur in days:
|
405 |
-
streak+=1; cur = cur - pd.Timedelta(days=1)
|
406 |
-
return streak
|
407 |
-
|
408 |
-
# ---------------------
|
409 |
# UI
|
410 |
-
#
|
411 |
-
with gr.Blocks(
|
412 |
-
|
413 |
-
theme=gr.themes.Soft(primary_hue=gr.themes.colors.orange, neutral_hue=gr.themes.colors.gray),
|
414 |
-
) as demo:
|
415 |
-
gr.Markdown("""
|
416 |
-
# 🥣 Intermittent Fasting — Pro
|
417 |
-
Detailed coaching plans + tracker. ZeroGPU‑ready (with CPU fallback). All data stored locally in this Space.
|
418 |
-
""")
|
419 |
|
420 |
with gr.Tabs():
|
421 |
-
# --- Coach tab
|
422 |
with gr.TabItem("Coach"):
|
423 |
with gr.Row():
|
424 |
with gr.Column():
|
425 |
-
fasting_duration
|
426 |
-
meal_timing
|
427 |
-
weight
|
428 |
with gr.Column():
|
429 |
-
age
|
430 |
-
gender
|
431 |
-
height
|
432 |
with gr.Row():
|
433 |
-
activity
|
434 |
-
goal
|
435 |
-
diet
|
436 |
-
lang
|
437 |
-
ai_polish
|
|
|
|
|
|
|
|
|
|
|
438 |
|
439 |
-
btn = gr.Button("Predict & Build Plan", variant="primary")
|
440 |
-
|
441 |
-
score_out = gr.Number(label="Predicted score")
|
442 |
-
kpi_out = gr.Markdown()
|
443 |
-
plan_md = gr.Markdown()
|
444 |
-
plan_tbl = gr.Dataframe(headers=["Day","Feeding window","Meal 1","Meal 2","Protein target","Daily kcal","Snack"], interactive=False)
|
445 |
-
fig = gr.Plot()
|
446 |
-
dl = gr.DownloadButton(label="Download plan (.md)")
|
447 |
-
|
448 |
-
btn.click(
|
449 |
-
predict_and_plan,
|
450 |
-
inputs=[fasting_duration, meal_timing, weight, age, gender, height, activity, goal, diet, lang, ai_polish],
|
451 |
-
outputs=[score_out, kpi_out, plan_md, plan_tbl, fig, dl],
|
452 |
-
api_name="coach_plan"
|
453 |
-
)
|
454 |
-
|
455 |
-
# --- Tracker tab
|
456 |
with gr.TabItem("Tracker"):
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
hist = gr.Dataframe(interactive=False)
|
466 |
-
hist_fig = gr.Plot()
|
467 |
-
stats = gr.Markdown()
|
468 |
-
|
469 |
-
b1.click(start_fast, inputs=[user, note], outputs=[status, note])
|
470 |
-
b2.click(end_fast, inputs=[user], outputs=[status, hist, hist_fig, stats])
|
471 |
-
b3.click(refresh_hist, inputs=[user], outputs=[hist, hist_fig, stats])
|
472 |
-
demo.load(refresh_hist, inputs=[user], outputs=[hist, hist_fig, stats])
|
473 |
-
|
474 |
-
# --- About tab
|
475 |
-
with gr.TabItem("About"):
|
476 |
-
gr.Markdown("""
|
477 |
-
**How it works**
|
478 |
-
• Your predictor estimates a health score from inputs.
|
479 |
-
• The coach builds a 7‑day schedule matching your fasting window, goal, activity and diet style.
|
480 |
-
• Optional AI polish refines wording using a tiny model (ZeroGPU window).
|
481 |
-
• Tracker stores CSVs under `/data/` and never sends data elsewhere.
|
482 |
-
""")
|
483 |
-
|
484 |
-
if __name__ == "__main__":
|
485 |
-
demo.queue().launch()
|
486 |
-
|
|
|
8 |
import plotly.express as px
|
9 |
import joblib
|
10 |
|
|
|
11 |
import spaces
|
12 |
import torch
|
|
|
|
|
13 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
|
14 |
+
from huggingface_hub import InferenceClient
|
15 |
|
16 |
+
# ------------------------
|
17 |
+
# Config & storage
|
18 |
+
# ------------------------
|
19 |
DATA_DIR = Path("data"); DATA_DIR.mkdir(exist_ok=True)
|
20 |
TS_FMT = "%Y-%m-%d %H:%M:%S"
|
21 |
|
|
|
22 |
DT_PATH = "./decision_tree_regressor.joblib"
|
23 |
decision_tree_regressor = joblib.load(DT_PATH)
|
24 |
|
25 |
+
# Local lightweight model for fallback
|
26 |
+
GEN_MODEL = "google/flan-t5-small"
|
27 |
_tokenizer = AutoTokenizer.from_pretrained(GEN_MODEL)
|
28 |
_model = AutoModelForSeq2SeqLM.from_pretrained(GEN_MODEL)
|
29 |
_generate_cpu = pipeline("text2text-generation", model=_model, tokenizer=_tokenizer, device=-1)
|
30 |
|
31 |
+
# SOTA models available via Inference API
|
32 |
+
SOTA_MODELS = [
|
33 |
+
"Qwen/Qwen2.5-72B-Instruct", # default
|
34 |
+
"meta-llama/Meta-Llama-3.1-70B-Instruct",
|
35 |
+
"mistralai/Mistral-Nemo-Instruct-2407",
|
36 |
+
"Qwen/Qwen2.5-32B-Instruct",
|
37 |
+
"Qwen/Qwen2.5-7B-Instruct"
|
38 |
+
]
|
39 |
+
|
40 |
+
def _hf_client(model_id: str) -> InferenceClient:
|
41 |
+
token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
42 |
+
return InferenceClient(model=model_id, token=token, timeout=90)
|
43 |
+
|
44 |
+
def generate_with_hf_inference(prompt: str, model_id: str, max_new_tokens: int = 700) -> str:
|
45 |
+
try:
|
46 |
+
client = _hf_client(model_id)
|
47 |
+
text = client.text_generation(
|
48 |
+
prompt,
|
49 |
+
max_new_tokens=max_new_tokens,
|
50 |
+
temperature=0.6,
|
51 |
+
top_p=0.9,
|
52 |
+
repetition_penalty=1.05,
|
53 |
+
stop=["</s>"],
|
54 |
+
return_full_text=False,
|
55 |
+
)
|
56 |
+
return text.strip()
|
57 |
+
except Exception as e:
|
58 |
+
return f"(HF Inference error: {e})\n" + generate_on_gpu(prompt, max_new_tokens=max_new_tokens)
|
59 |
+
|
60 |
+
# ------------------------
|
61 |
+
# ZeroGPU functions
|
62 |
+
# ------------------------
|
63 |
@spaces.GPU
|
64 |
def gpu_warmup() -> str:
|
65 |
return f"cuda={torch.cuda.is_available()}"
|
66 |
|
67 |
@spaces.GPU
|
68 |
+
def generate_on_gpu(prompt: str, max_new_tokens: int = 600) -> str:
|
|
|
|
|
|
|
69 |
try:
|
70 |
if torch.cuda.is_available():
|
71 |
+
gen = pipeline("text2text-generation", model=_model.to("cuda"), tokenizer=_tokenizer, device=0)
|
72 |
+
out = gen(prompt, max_new_tokens=max_new_tokens)
|
|
|
|
|
|
|
|
|
73 |
else:
|
74 |
+
out = _generate_cpu(prompt, max_new_tokens=max_new_tokens)
|
|
|
|
|
|
|
|
|
|
|
75 |
return out[0]["generated_text"].strip()
|
76 |
except Exception as e:
|
77 |
+
out = _generate_cpu(prompt, max_new_tokens=max_new_tokens)
|
78 |
+
return out[0]["generated_text"].strip() + f"\n\n(Note: GPU path failed: {e})"
|
79 |
|
80 |
try:
|
81 |
_ = gpu_warmup()
|
82 |
except Exception:
|
83 |
pass
|
84 |
|
85 |
+
# ------------------------
|
86 |
+
# Metrics
|
87 |
+
# ------------------------
|
88 |
+
ACTIVITY = {"Sedentary":1.2,"Lightly active":1.375,"Moderately active":1.55,"Very active":1.725,"Athlete":1.9}
|
89 |
+
GOAL_CAL_ADJ = {"Fat loss":-0.15,"Recomp/Maintenance":0.0,"Muscle gain":0.10}
|
90 |
+
|
91 |
+
def bmi(w,h): return w/((h/100)**2)
|
92 |
+
def bmr_mifflin(sex,w,h,a): return 10*w+6.25*h-5*a+(5 if sex=="Male" else -161)
|
93 |
+
def tdee(bmr,act): return bmr*ACTIVITY.get(act,1.2)
|
94 |
+
|
95 |
+
# ------------------------
|
96 |
+
# Tracker storage
|
97 |
+
# ------------------------
|
98 |
+
active_fasts: Dict[str,pd.Timestamp] = {}
|
99 |
+
def _csv(u): return DATA_DIR/f"{''.join(ch for ch in u if ch.isalnum() or ch in ('_','-'))}.csv"
|
100 |
+
|
101 |
+
def hist_load(u):
|
102 |
+
p=_csv(u)
|
103 |
+
if p.exists():
|
104 |
+
d=pd.read_csv(p);
|
105 |
+
for c in ["start_time","end_time"]:
|
106 |
+
if c in d: d[c]=pd.to_datetime(d[c],errors="coerce")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
return d
|
108 |
return pd.DataFrame(columns=["start_time","end_time","duration_hours","note"])
|
109 |
|
110 |
+
def hist_save(u,d): d.to_csv(_csv(u),index=False)
|
|
|
111 |
|
112 |
+
# ------------------------
|
113 |
+
# Plan generator
|
114 |
+
# ------------------------
|
115 |
+
def predict_and_plan(
|
116 |
+
fasting_duration, meal_timing, weight, age, gender, height,
|
117 |
+
activity, goal, diet, lang,
|
118 |
+
ai_polish, use_sota_model, sota_model_id
|
119 |
+
) -> Tuple[Optional[float], str, str, pd.DataFrame, object, str]:
|
120 |
|
|
|
|
|
121 |
try:
|
122 |
+
# Input check
|
123 |
+
if fasting_duration<0 or fasting_duration>72: raise ValueError("Fasting must be 0–72h.")
|
124 |
+
h,m = map(int,meal_timing.split(":")); assert 0<=h<24 and 0<=m<60
|
125 |
+
if weight<=0 or height<=0 or age<0: raise ValueError("Invalid weight/height/age.")
|
126 |
+
|
127 |
+
# Score
|
128 |
+
df=pd.DataFrame({
|
129 |
+
"Fasting Duration (hours)":[float(fasting_duration)],
|
130 |
+
"Meal Timing (hour:minute)":[h+m/60],
|
131 |
+
"Body Weight (kg)":[float(weight)],
|
132 |
+
"Age (years)":[float(age)],
|
133 |
+
"Height (cm)":[float(height)],
|
134 |
+
"Gender_Male":[1 if gender=="Male" else 0],
|
135 |
+
"Gender_Other":[1 if gender=="Other" else 0],
|
|
|
|
|
136 |
})
|
137 |
+
score=float(decision_tree_regressor.predict(df)[0])
|
138 |
+
|
139 |
+
bmr=bmr_mifflin(gender,weight,height,age)
|
140 |
+
tdee_kcal=tdee(bmr,activity)
|
141 |
+
adj=GOAL_CAL_ADJ[goal]; target_kcal=int(round(tdee_kcal*(1+adj)))
|
142 |
+
protein_g=int(round(max(1.6*weight,80)))
|
143 |
+
bmi_val=round(bmi(weight,height),1)
|
144 |
+
|
145 |
+
plan_md=f"""
|
146 |
+
## Intermittent Fasting Plan
|
147 |
+
|
148 |
+
**Score:** {score:.1f} • **BMI:** {bmi_val} • **BMR:** {int(bmr)} kcal • **TDEE:** {int(tdee_kcal)} kcal • **Target:** {target_kcal} kcal • **Protein:** ≥ {protein_g} g
|
149 |
+
|
150 |
+
- Goal: {goal}
|
151 |
+
- Diet: {diet}
|
152 |
+
- First meal: {meal_timing}
|
153 |
+
- Fasting duration: {fasting_duration} h
|
154 |
+
"""
|
155 |
+
# Choose generator
|
156 |
+
if use_sota_model:
|
157 |
+
plan_md = generate_with_hf_inference(plan_md, sota_model_id, max_new_tokens=700)
|
158 |
+
elif ai_polish:
|
159 |
+
plan_md = generate_on_gpu(plan_md, max_new_tokens=600)
|
160 |
+
|
161 |
+
md_path=DATA_DIR/"plan.md"; md_path.write_text(plan_md,encoding="utf-8")
|
162 |
+
return score, f"Target kcal {target_kcal}, protein {protein_g} g", plan_md, pd.DataFrame(), None, str(md_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
except Exception as e:
|
164 |
+
return None,"",f"⚠️ {e}",pd.DataFrame(),None,""
|
165 |
|
166 |
+
# ------------------------
|
167 |
# Tracker actions
|
168 |
+
# ------------------------
|
169 |
+
def start_fast(user,note):
|
170 |
+
if not user: return "Enter username.",None
|
171 |
+
if user in active_fasts: return f"Already fasting since {active_fasts[user]}.",None
|
172 |
+
active_fasts[user]=pd.Timestamp.now(); return f"✅ Fast started at {active_fasts[user].strftime(TS_FMT)}.",None
|
173 |
+
|
174 |
+
def end_fast(user):
|
175 |
+
if not user: return "Enter username.",None,None,None
|
176 |
+
if user not in active_fasts: return "No active fast.",None,None,None
|
177 |
+
end=pd.Timestamp.now(); start=active_fasts.pop(user)
|
178 |
+
dur=round((end-start).total_seconds()/3600,2)
|
179 |
+
df=hist_load(user); df.loc[len(df)]=[start,end,dur,""]; hist_save(user,df)
|
180 |
+
return f"✅ Fast ended {end.strftime(TS_FMT)} • {dur} h", df.tail(12), None, f"Total fasts {len(df)}"
|
181 |
+
|
182 |
+
# ------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
# UI
|
184 |
+
# ------------------------
|
185 |
+
with gr.Blocks(title="Fasting Coach — SOTA", theme=gr.themes.Soft()) as demo:
|
186 |
+
gr.Markdown("# 🥣 Intermittent Fasting Coach — SOTA\nZeroGPU + HF Inference API")
|
|
|
|
|
|
|
|
|
|
|
|
|
187 |
|
188 |
with gr.Tabs():
|
|
|
189 |
with gr.TabItem("Coach"):
|
190 |
with gr.Row():
|
191 |
with gr.Column():
|
192 |
+
fasting_duration=gr.Number(label="Fasting Duration (h)",value=16,minimum=0,maximum=72)
|
193 |
+
meal_timing=gr.Textbox(label="First meal (HH:MM)",value="12:30")
|
194 |
+
weight=gr.Number(label="Weight (kg)",value=70)
|
195 |
with gr.Column():
|
196 |
+
age=gr.Slider(label="Age",minimum=18,maximum=100,value=35)
|
197 |
+
gender=gr.Radio(["Male","Female","Other"],label="Gender",value="Male")
|
198 |
+
height=gr.Number(label="Height (cm)",value=175)
|
199 |
with gr.Row():
|
200 |
+
activity=gr.Dropdown(list(ACTIVITY.keys()),label="Activity",value="Lightly active")
|
201 |
+
goal=gr.Dropdown(list(GOAL_CAL_ADJ.keys()),label="Goal",value="Recomp/Maintenance")
|
202 |
+
diet=gr.Dropdown(["Mediterranean","Omnivore","Vegan"],label="Diet",value="Mediterranean")
|
203 |
+
lang=gr.Radio(["en","es"],label="Language",value="en")
|
204 |
+
ai_polish=gr.Checkbox(label="Local polish (tiny model)")
|
205 |
+
use_sota_model=gr.Checkbox(label="Use SOTA model (HF Inference)",value=True)
|
206 |
+
sota_model_id=gr.Dropdown(SOTA_MODELS,value=SOTA_MODELS[0],label="HF model")
|
207 |
+
btn=gr.Button("Predict & Build Plan",variant="primary")
|
208 |
+
score_out=gr.Number(label="Score"); kpi_out=gr.Markdown(); plan_md=gr.Markdown(); plan_tbl=gr.Dataframe(); fig=gr.Plot(); dl=gr.DownloadButton(label="Download plan")
|
209 |
+
btn.click(predict_and_plan,inputs=[fasting_duration,meal_timing,weight,age,gender,height,activity,goal,diet,lang,ai_polish,use_sota_model,sota_model_id],outputs=[score_out,kpi_out,plan_md,plan_tbl,fig,dl])
|
210 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
with gr.TabItem("Tracker"):
|
212 |
+
user=gr.Textbox(label="Username"); note=gr.Textbox(label="Note")
|
213 |
+
b1=gr.Button("Start fast"); b2=gr.Button("End fast")
|
214 |
+
status=gr.Markdown(); hist=gr.Dataframe(); stats=gr.Markdown()
|
215 |
+
b1.click(start_fast,inputs=[user,note],outputs=[status,note])
|
216 |
+
b2.click(end_fast,inputs=[user],outputs=[status,hist,None,stats])
|
217 |
+
|
218 |
+
if __name__=="__main__":
|
219 |
+
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|