Spaces:
Sleeping
Sleeping
| # app.py | |
| # π GIfty β Smart Gift Recommender (Embeddings + FAISS) | |
| # Dataset: ckandemir/amazon-products (Hugging Face) | |
| # UI: Gradio (English) | |
| # | |
| # Requirements (see requirements.txt): | |
| # gradio, datasets, pandas, numpy, sentence-transformers, faiss-cpu, tabulate | |
| import os, re, random | |
| from typing import Dict, List, Tuple | |
| import numpy as np | |
| import pandas as pd | |
| import gradio as gr | |
| from datasets import load_dataset | |
| from sentence_transformers import SentenceTransformer | |
| import faiss | |
| # ========================= Config ========================= | |
| MAX_ROWS = int(os.getenv("MAX_ROWS", "10000")) # cap for speed | |
| TITLE = "# π GIfty β Smart Gift Recommender\n*Top-3 similar picks + 1 generated idea + personalized message*" | |
| OCCASION_OPTIONS = [ | |
| "birthday", "anniversary", "valentines", "graduation", | |
| "housewarming", "christmas", "hanukkah", "thank_you", | |
| ] | |
| AGE_OPTIONS = { | |
| "any": "any", | |
| "kid (3β12)": "kids", | |
| "teen (13β17)": "teens", | |
| "adult (18β64)": "adult", | |
| "senior (65+)": "senior", | |
| } | |
| INTEREST_OPTIONS = [ | |
| "reading","writing","tech","travel","fitness","cooking","tea","coffee", | |
| "games","movies","plants","music","design","stationery","home","experience", | |
| "digital","aesthetic","premium","eco","practical","minimalist","social","party", | |
| "photography","outdoors","pets","beauty","jewelry" | |
| ] | |
| MODEL_CHOICES = { | |
| "MiniLM (384d)": "sentence-transformers/all-MiniLM-L6-v2", | |
| "MPNet (768d)": "sentence-transformers/all-mpnet-base-v2", | |
| "E5-base (768d)": "intfloat/e5-base-v2", | |
| } | |
| # ========================= Data loading & schema ========================= | |
| def _to_price_usd(x): | |
| s = str(x).strip().replace("$","").replace(",","") | |
| try: return float(s) | |
| except: return np.nan | |
| def _infer_age_from_category(cat: str) -> str: | |
| s = (cat or "").lower() | |
| if any(k in s for k in ["baby", "toddler", "infant"]): return "kids" | |
| if "toys & games" in s or "board games" in s or "toy" in s: return "kids" | |
| if any(k in s for k in ["teen", "young adult", "ya"]): return "teens" | |
| return "any" | |
| def _infer_occasion_tags(cat: str) -> str: | |
| s = (cat or "").lower() | |
| tags = set(["birthday"]) # default | |
| if any(k in s for k in ["home & kitchen","furniture","home dΓ©cor","home decor","garden","tools","appliance","cookware","kitchen"]): | |
| tags.update(["housewarming","thank_you"]) | |
| if any(k in s for k in ["beauty","jewelry","watch","fragrance","cosmetic","makeup","skincare"]): | |
| tags.update(["valentines","anniversary"]) | |
| if any(k in s for k in ["toys","board game","puzzle","kids","lego"]): | |
| tags.update(["hanukkah","christmas"]) | |
| if any(k in s for k in ["office","stationery","notebook","pen","planner"]): | |
| tags.update(["graduation","thank_you"]) | |
| if any(k in s for k in ["electronics","camera","audio","headphones","gaming","computer"]): | |
| tags.update(["birthday","christmas"]) | |
| if any(k in s for k in ["book","novel","literature"]): | |
| tags.update(["graduation","thank_you"]) | |
| if any(k in s for k in ["sports","fitness","outdoor","camping","hiking","run","yoga"]): | |
| tags.update(["birthday"]) | |
| return ",".join(sorted(tags)) | |
| def map_amazon_to_schema(df_raw: pd.DataFrame) -> pd.DataFrame: | |
| cols = {c.lower().strip(): c for c in df_raw.columns} | |
| get = lambda key: df_raw.get(cols.get(key, ""), "") | |
| out = pd.DataFrame({ | |
| "name": get("product name"), | |
| "short_desc": get("description"), | |
| "tags": get("category"), | |
| "price_usd": get("selling price").map(_to_price_usd) if "selling price" in cols else np.nan, | |
| "age_range": "", | |
| "gender_tags": "any", | |
| "occasion_tags": "", | |
| "persona_fit": get("category"), | |
| "image_url": get("image") if "image" in cols else "", | |
| }) | |
| # clean | |
| out["name"] = out["name"].astype(str).str.strip().str.slice(0, 120) | |
| out["short_desc"] = out["short_desc"].astype(str).str.strip().str.slice(0, 500) | |
| out["tags"] = out["tags"].astype(str).str.replace("|", ", ").str.lower() | |
| out["persona_fit"] = out["persona_fit"].astype(str).str.lower() | |
| # infer occasion & age | |
| out["occasion_tags"] = out["tags"].map(_infer_occasion_tags) | |
| out["age_range"] = out["tags"].map(_infer_age_from_category).fillna("any") | |
| return out | |
| def build_doc(row: pd.Series) -> str: | |
| parts = [ | |
| str(row.get("name","")), | |
| str(row.get("short_desc","")), | |
| str(row.get("tags","")), | |
| str(row.get("persona_fit","")), | |
| str(row.get("occasion_tags","")), | |
| str(row.get("age_range","")), | |
| ] | |
| return " | ".join([p for p in parts if p]) | |
| def load_catalog() -> pd.DataFrame: | |
| try: | |
| ds = load_dataset("ckandemir/amazon-products", split="train") | |
| raw = ds.to_pandas() | |
| except Exception: | |
| # Fallback (keeps the app alive if internet is blocked) | |
| raw = pd.DataFrame({ | |
| "Product Name": ["Wireless Earbuds", "Coffee Sampler", "Strategy Board Game"], | |
| "Description": [ | |
| "Compact earbuds with noise isolation and long battery life.", | |
| "Four single-origin roasts from small roasters.", | |
| "Modern eurogame for 2β4 players, 45β60 minutes." | |
| ], | |
| "Category": ["Electronics | Audio","Grocery | Coffee","Toys & Games | Board Games"], | |
| "Selling Price": ["$59.00","$34.00","$39.00"], | |
| "Image": ["","",""], | |
| }) | |
| df = map_amazon_to_schema(raw).drop_duplicates(subset=["name","short_desc"]) | |
| if len(df) > MAX_ROWS: | |
| df = df.sample(n=MAX_ROWS, random_state=42).reset_index(drop=True) | |
| df["doc"] = df.apply(build_doc, axis=1) | |
| return df | |
| CATALOG = load_catalog() | |
| # ========================= Business filters ========================= | |
| def _contains_ci(series: pd.Series, needle: str) -> pd.Series: | |
| if not needle: return pd.Series(True, index=series.index) | |
| pat = re.escape(needle) | |
| return series.fillna("").str.contains(pat, case=False, regex=True) | |
| def filter_business(df: pd.DataFrame, budget_min=None, budget_max=None, | |
| occasion: str=None, age_range: str="any") -> pd.DataFrame: | |
| m = pd.Series(True, index=df.index) | |
| if budget_min is not None: | |
| m &= df["price_usd"].fillna(0) >= float(budget_min) | |
| if budget_max is not None: | |
| m &= df["price_usd"].fillna(1e9) <= float(budget_max) | |
| if occasion: | |
| m &= _contains_ci(df["occasion_tags"], occasion) | |
| if age_range and age_range != "any": | |
| m &= (df["age_range"].fillna("any").isin([age_range, "any"])) | |
| return df[m] | |
| # ========================= Embeddings + FAISS ========================= | |
| class EmbeddingStore: | |
| def __init__(self, docs: List[str]): | |
| self.docs = docs | |
| self.model_cache: Dict[str, SentenceTransformer] = {} | |
| self.index_cache: Dict[str, faiss.Index] = {} | |
| self.dim_cache: Dict[str, int] = {} | |
| def _build(self, model_id: str): | |
| model = SentenceTransformer(model_id) | |
| embs = model.encode(self.docs, convert_to_numpy=True, normalize_embeddings=True) | |
| index = faiss.IndexFlatIP(embs.shape[1]) # cosine if normalized | |
| index.add(embs) | |
| self.model_cache[model_id] = model | |
| self.index_cache[model_id] = index | |
| self.dim_cache[model_id] = embs.shape[1] | |
| def ensure_ready(self, model_id: str): | |
| if model_id not in self.index_cache: | |
| self._build(model_id) | |
| def search(self, model_id: str, query: str, topn: int) -> Tuple[np.ndarray, np.ndarray]: | |
| self.ensure_ready(model_id) | |
| model = self.model_cache[model_id] | |
| index = self.index_cache[model_id] | |
| qv = model.encode([query], convert_to_numpy=True, normalize_embeddings=True) | |
| sims, idxs = index.search(qv, topn) | |
| return sims[0], idxs[0] | |
| EMB_STORE = EmbeddingStore(CATALOG["doc"].tolist()) | |
| def profile_to_query(profile: Dict) -> str: | |
| """Weighted, doc-aligned query: focuses on interests/occasion/age used in docs.""" | |
| interests = [t.strip().lower() for t in profile.get("interests", []) if t.strip()] | |
| interests_expanded = interests + interests + interests # weight *3 | |
| occasion = (profile.get("occasion", "") or "").lower() | |
| age = profile.get("age_range", "any") | |
| parts = [] | |
| if interests_expanded: parts.append(", ".join(interests_expanded)) | |
| if occasion: parts.append(occasion) | |
| if age and age != "any": parts.append(age) | |
| return " | ".join(parts).strip() | |
| def recommend_topk_embeddings(profile: Dict, model_key: str, k: int=3) -> pd.DataFrame: | |
| model_id = MODEL_CHOICES.get(model_key, list(MODEL_CHOICES.values())[0]) | |
| query = profile_to_query(profile) | |
| # global search on whole catalog | |
| sims, idxs = EMB_STORE.search(model_id, query, topn=min(max(k*50, k), len(CATALOG))) | |
| # filter to business subset | |
| df_f = filter_business( | |
| CATALOG, | |
| budget_min=profile.get("budget_min"), | |
| budget_max=profile.get("budget_max"), | |
| occasion=profile.get("occasion"), | |
| age_range=profile.get("age_range","any"), | |
| ) | |
| if df_f.empty: df_f = CATALOG | |
| order = np.argsort(-sims) # descending similarity | |
| seen, picks = set(), [] | |
| for gi in idxs[order]: | |
| gi = int(gi) | |
| if gi not in df_f.index: | |
| continue | |
| nm = CATALOG.loc[gi, "name"] | |
| if nm in seen: | |
| continue | |
| seen.add(nm) | |
| picks.append(gi) | |
| if len(picks) >= k: | |
| break | |
| if not picks: | |
| res = df_f.head(k).copy() | |
| res["similarity"] = np.nan | |
| return res[["name","short_desc","price_usd","occasion_tags","persona_fit","age_range","image_url","similarity"]] | |
| gi_to_sim = {int(i): float(s) for i, s in zip(idxs, sims)} | |
| res = CATALOG.loc[picks].copy() | |
| res["similarity"] = [gi_to_sim.get(int(i), np.nan) for i in picks] | |
| return res[["name","short_desc","price_usd","occasion_tags","persona_fit","age_range","image_url","similarity"]] | |
| # ========================= Synthetic item + message ========================= | |
| def generate_item(profile: Dict) -> Dict: | |
| interests = profile.get("interests", []) | |
| occasion = profile.get("occasion","birthday") | |
| budget = profile.get("budget_max", profile.get("budget_usd", 50)) or 50 | |
| age = profile.get("age_range","any") | |
| core = (interests[0] if interests else "hobby").strip() or "hobby" | |
| style = random.choice(["personalized","experience","bundle"]) | |
| if style == "personalized": | |
| base_name = f"Custom {core} accessory with initials" | |
| base_desc = f"Thoughtful personalized {core} accessory tailored to their taste." | |
| elif style == "experience": | |
| base_name = f"{core.title()} workshop voucher" | |
| base_desc = f"A guided intro session to explore {core} in a fun, hands-on way." | |
| else: | |
| base_name = f"{core.title()} starter bundle" | |
| base_desc = f"A curated set to kickstart their {core} passion." | |
| if age == "kids": | |
| base_desc += " Suitable for kids with safe, age-appropriate materials." | |
| elif age == "teens": | |
| base_desc += " Trendy pick that suits young enthusiasts." | |
| elif age == "senior": | |
| base_desc += " Comfortable and easy to use." | |
| price = float(np.clip(float(budget), 10, 300)) | |
| return { | |
| "name": f"{base_name} ({occasion})", | |
| "short_desc": base_desc, | |
| "price_usd": price, | |
| "occasion_tags": occasion, | |
| "persona_fit": ", ".join(interests) or "general", | |
| "age_range": age, | |
| "image_url": "" | |
| } | |
| def generate_message(profile: Dict) -> str: | |
| name = profile.get("recipient_name","Friend") | |
| occasion = profile.get("occasion","birthday") | |
| tone = profile.get("tone","warm and friendly") | |
| return (f"Dear {name},\n" | |
| f"Happy {occasion}! Wishing you health, joy, and wonderful memories. " | |
| f"May your goals come true. With {tone}.") | |
| # ========================= Gradio UI ========================= | |
| EXAMPLES = [ | |
| [["tech","music"], "birthday", 20, 60, "Noa", "adult (18β64)", "MiniLM (384d)", "warm and friendly"], | |
| [["home","cooking","practical"], "housewarming", 25, 45, "Daniel", "adult (18β64)", "MiniLM (384d)", "warm"], | |
| [["games","photography"], "birthday", 30, 120, "Omer", "teen (13β17)", "MPNet (768d)", "fun"], | |
| [["reading","design","aesthetic"], "thank_you", 15, 35, "Maya", "any", "E5-base (768d)", "friendly"], | |
| ] | |
| def safe_markdown_table(df: pd.DataFrame) -> str: | |
| try: | |
| return df.to_markdown(index=False) | |
| except Exception: | |
| return df.to_string(index=False) | |
| def ui_predict(interests_list: List[str], occasion: str, budget_min: float, budget_max: float, | |
| recipient_name: str, age_label: str, model_key: str, tone: str): | |
| try: | |
| # sanity | |
| if budget_min is None: budget_min = 20.0 | |
| if budget_max is None: budget_max = 60.0 | |
| if budget_min > budget_max: | |
| budget_min, budget_max = budget_max, budget_min | |
| age_range = AGE_OPTIONS.get(age_label, "any") | |
| profile = { | |
| "recipient_name": recipient_name or "Friend", | |
| "interests": interests_list or [], | |
| "occasion": occasion or "birthday", | |
| "budget_min": float(budget_min), | |
| "budget_max": float(budget_max), | |
| "budget_usd": float(budget_max), | |
| "age_range": age_range, | |
| "tone": tone or "warm and friendly", | |
| } | |
| recs = recommend_topk_embeddings(profile, model_key, k=3) | |
| gen = generate_item(profile) | |
| msg = generate_message(profile) | |
| top3_md = safe_markdown_table(recs[["name","short_desc","price_usd","age_range","similarity"]]) | |
| gen_md = f"**{gen['name']}**\n\n{gen['short_desc']}\n\n~${gen['price_usd']:.0f}" | |
| return top3_md, gen_md, msg | |
| except Exception as e: | |
| return f":warning: Error: {e}", "", "" | |
| with gr.Blocks() as demo: | |
| gr.Markdown(TITLE) | |
| with gr.Row(): | |
| interests = gr.CheckboxGroup( | |
| label="Interests (select a few)", | |
| choices=INTEREST_OPTIONS, | |
| value=["tech","music"], | |
| interactive=True | |
| ) | |
| with gr.Row(): | |
| occasion = gr.Dropdown(label="Occasion", choices=OCCASION_OPTIONS, value="birthday") | |
| age = gr.Dropdown(label="Age group", choices=list(AGE_OPTIONS.keys()), value="adult (18β64)") | |
| model = gr.Dropdown(label="Embedding model", choices=list(MODEL_CHOICES.keys()), value="MiniLM (384d)") | |
| # Two sliders (for older Gradio versions): min + max budget | |
| with gr.Row(): | |
| budget_min = gr.Slider(label="Min budget (USD)", minimum=5, maximum=500, step=1, value=20) | |
| budget_max = gr.Slider(label="Max budget (USD)", minimum=5, maximum=500, step=1, value=60) | |
| with gr.Row(): | |
| recipient_name = gr.Textbox(label="Recipient name", value="Noa") | |
| tone = gr.Textbox(label="Message tone", value="warm and friendly") | |
| go = gr.Button("Get GIfty π―") | |
| out_top3 = gr.Markdown(label="Top-3 recommendations") | |
| out_gen = gr.Markdown(label="Generated item") | |
| out_msg = gr.Markdown(label="Personalized message") | |
| gr.Examples( | |
| EXAMPLES, | |
| [interests, occasion, budget_min, budget_max, recipient_name, age, model, tone], | |
| label="Quick examples", | |
| ) | |
| go.click( | |
| ui_predict, | |
| [interests, occasion, budget_min, budget_max, recipient_name, age, model, tone], | |
| [out_top3, out_gen, out_msg] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |