Spaces:
Running
Running
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +47 -42
src/streamlit_app.py
CHANGED
|
@@ -1,72 +1,77 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
import os
|
| 3 |
|
| 4 |
-
#
|
| 5 |
VIDEO_FOLDER = "./src/synthda_falling_realreal/"
|
| 6 |
|
|
|
|
| 7 |
st.set_page_config(layout="wide")
|
|
|
|
| 8 |
|
| 9 |
-
# Title and instructions
|
| 10 |
st.markdown("""
|
| 11 |
-
|
| 12 |
-
<p style='text-align: center; font-size: 18px;'>
|
| 13 |
-
AutoSynthDa blends two input motion videos to generate <strong>kinematically coherent, synthetic action sequences</strong>.
|
| 14 |
-
</p>
|
| 15 |
-
<p style='text-align: center; font-size: 16px;'>
|
| 16 |
-
Use the slider to explore how the motion transitions from Input Video 1 (left) to Input Video 2 (right).
|
| 17 |
-
</p>
|
| 18 |
-
<p style='text-align: center;'>
|
| 19 |
-
<a href='https://github.com/nvidia/synthda' target='_blank'>View the code on GitHub</a>
|
| 20 |
-
</p>
|
| 21 |
-
""", unsafe_allow_html=True)
|
| 22 |
|
| 23 |
-
|
| 24 |
-
|
|
|
|
|
|
|
| 25 |
|
| 26 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
filename_interp = f"videos_generated_{weight:.1f}.mp4"
|
| 28 |
filename_input1 = "videos_generated_0.0.mp4"
|
| 29 |
filename_input2 = "videos_generated_1.0.mp4"
|
| 30 |
|
|
|
|
| 31 |
video_interp = os.path.join(VIDEO_FOLDER, filename_interp)
|
| 32 |
video_input1 = os.path.join(VIDEO_FOLDER, filename_input1)
|
| 33 |
video_input2 = os.path.join(VIDEO_FOLDER, filename_input2)
|
| 34 |
|
|
|
|
| 35 |
exists_interp = os.path.exists(video_interp)
|
| 36 |
exists_1 = os.path.exists(video_input1)
|
| 37 |
exists_2 = os.path.exists(video_input2)
|
| 38 |
|
| 39 |
-
# Interpolation
|
| 40 |
if weight == 0.0:
|
| 41 |
-
|
| 42 |
elif weight == 1.0:
|
| 43 |
-
|
| 44 |
else:
|
| 45 |
w2 = round(1.0 - weight, 1)
|
| 46 |
-
|
|
|
|
|
|
|
| 47 |
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
# Layout with 3 video panels
|
| 51 |
col1, col2, col3 = st.columns(3)
|
| 52 |
|
| 53 |
-
with
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
st.
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
else:
|
| 72 |
-
st.error("Video 2 not found")
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import os
|
| 3 |
|
| 4 |
+
# Video folder path
|
| 5 |
VIDEO_FOLDER = "./src/synthda_falling_realreal/"
|
| 6 |
|
| 7 |
+
# Streamlit page setup
|
| 8 |
st.set_page_config(layout="wide")
|
| 9 |
+
st.title("AutoSynthDa Pose Interpolation Viewer")
|
| 10 |
|
|
|
|
| 11 |
st.markdown("""
|
| 12 |
+
### AutoSynthDa Interpolation Viewer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
AutoSynthDa blends two input motion videos to **generate kinematically coherent, synthetic action videos**.
|
| 15 |
+
Use the slider below to explore how the system interpolates motion from one video to another.
|
| 16 |
+
Source: [github.com/nvidia/synthda](https://github.com/nvidia/synthda)
|
| 17 |
+
""")
|
| 18 |
|
| 19 |
+
# Slider explanation
|
| 20 |
+
st.markdown(
|
| 21 |
+
'<p style="text-align:center;"><strong>Use the slider to control the interpolation between Input Video 1 (left) and Input Video 2 (right).</strong></p>',
|
| 22 |
+
unsafe_allow_html=True
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
# Slider (starts at 0.5)
|
| 26 |
+
weight = st.slider("Interpolation Weight", 0.0, 1.0, 0.5, step=0.1)
|
| 27 |
+
|
| 28 |
+
# Filenames
|
| 29 |
filename_interp = f"videos_generated_{weight:.1f}.mp4"
|
| 30 |
filename_input1 = "videos_generated_0.0.mp4"
|
| 31 |
filename_input2 = "videos_generated_1.0.mp4"
|
| 32 |
|
| 33 |
+
# Full paths
|
| 34 |
video_interp = os.path.join(VIDEO_FOLDER, filename_interp)
|
| 35 |
video_input1 = os.path.join(VIDEO_FOLDER, filename_input1)
|
| 36 |
video_input2 = os.path.join(VIDEO_FOLDER, filename_input2)
|
| 37 |
|
| 38 |
+
# File checks
|
| 39 |
exists_interp = os.path.exists(video_interp)
|
| 40 |
exists_1 = os.path.exists(video_input1)
|
| 41 |
exists_2 = os.path.exists(video_input2)
|
| 42 |
|
| 43 |
+
# Interpolation status
|
| 44 |
if weight == 0.0:
|
| 45 |
+
st.success("Showing Input Video 1 (no interpolation)")
|
| 46 |
elif weight == 1.0:
|
| 47 |
+
st.success("Showing Input Video 2 (no interpolation)")
|
| 48 |
else:
|
| 49 |
w2 = round(1.0 - weight, 1)
|
| 50 |
+
st.info(
|
| 51 |
+
f"Generated motion: {weight:.1f} from Input Video 1 + {w2:.1f} from Input Video 2"
|
| 52 |
+
)
|
| 53 |
|
| 54 |
+
# Layout: 3 columns for videos
|
|
|
|
|
|
|
| 55 |
col1, col2, col3 = st.columns(3)
|
| 56 |
|
| 57 |
+
# Helper to show video with autoplay
|
| 58 |
+
def render_video_column(col, label, video_path, exists):
|
| 59 |
+
with col:
|
| 60 |
+
st.markdown(f"<div style='text-align: center; font-weight: bold;'>{label}</div>", unsafe_allow_html=True)
|
| 61 |
+
if exists:
|
| 62 |
+
st.markdown(
|
| 63 |
+
f"""
|
| 64 |
+
<video width="100%" autoplay loop muted playsinline>
|
| 65 |
+
<source src="{video_path}" type="video/mp4">
|
| 66 |
+
Your browser does not support the video tag.
|
| 67 |
+
</video>
|
| 68 |
+
""",
|
| 69 |
+
unsafe_allow_html=True
|
| 70 |
+
)
|
| 71 |
+
else:
|
| 72 |
+
st.error(f"{label} not found")
|
| 73 |
|
| 74 |
+
# Show videos
|
| 75 |
+
render_video_column(col1, "Input Video 1", video_input1, exists_1)
|
| 76 |
+
render_video_column(col2, "Interpolated Video", video_interp, exists_interp)
|
| 77 |
+
render_video_column(col3, "Input Video 2", video_input2, exists_2)
|
|
|
|
|
|