import streamlit as st from transformers import pipeline from PIL import Image import io from gtts import gTTS st.title("🖼️ → 📖 Image-to-Story Demo") st.write("Upload an image and watch as it’s captioned, turned into a short story, and even read aloud!") @st.cache_resource def load_captioner(): return pipeline("image-captioning", model="nlpconnect/vit-gpt2-image-captioning") @st.cache_resource def load_story_gen(): return pipeline("text-generation", model="gpt2", tokenizer="gpt2") captioner = load_captioner() story_gen = load_story_gen() # 1) Upload uploaded = st.file_uploader("Upload an image", type=["png","jpg","jpeg"], key="image") if uploaded: img = Image.open(uploaded) st.image(img, use_column_width=True) # 2) Caption if "caption" not in st.session_state: with st.spinner("Generating caption…"): st.session_state.caption = captioner(img)[0]["generated_text"] st.write("**Caption:**", st.session_state.caption) # 3) Story if "story" not in st.session_state: with st.spinner("Spinning up a story…"): out = story_gen( st.session_state.caption, max_length=200, num_return_sequences=1, do_sample=True, top_p=0.9 ) st.session_state.story = out[0]["generated_text"] st.write("**Story:**", st.session_state.story) # 4) Pre-generate raw MP3 bytes if "audio_bytes" not in st.session_state: with st.spinner("Generating audio…"): tts = gTTS(text=st.session_state.story, lang="en") buf = io.BytesIO() tts.write_to_fp(buf) st.session_state.audio_bytes = buf.getvalue() # 5) Play on demand if st.button("🔊 Play Story Audio"): audio_buffer = io.BytesIO(st.session_state.audio_bytes) st.audio(audio_buffer, format="audio/mp3")