Merge pull request #9 from shanks28/test_branch
Browse files- .gitignore +12 -1
- __pycache__/model.cpython-310.pyc +0 -0
- frames.py +2 -2
- main.py +16 -8
.gitignore
CHANGED
|
@@ -3,4 +3,15 @@ output
|
|
| 3 |
SuperSloMo.ckpt
|
| 4 |
Test.mp4
|
| 5 |
Result_Test
|
| 6 |
-
interpolated_frames
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
SuperSloMo.ckpt
|
| 4 |
Test.mp4
|
| 5 |
Result_Test
|
| 6 |
+
interpolated_frames
|
| 7 |
+
Test1-15fps.mp4
|
| 8 |
+
Test1.mp4
|
| 9 |
+
interpolated_frames1
|
| 10 |
+
interpolated_frames2
|
| 11 |
+
|
| 12 |
+
result.mp4
|
| 13 |
+
result2.mp4
|
| 14 |
+
result3.mp4
|
| 15 |
+
result4.mp4
|
| 16 |
+
result5.mp4
|
| 17 |
+
result6.mp4
|
__pycache__/model.cpython-310.pyc
CHANGED
|
Binary files a/__pycache__/model.cpython-310.pyc and b/__pycache__/model.cpython-310.pyc differ
|
|
|
frames.py
CHANGED
|
@@ -20,7 +20,7 @@ def extract_frames(url_path, output_dir) -> int :
|
|
| 20 |
ret, frame = cap.read() # frame is a numpy array
|
| 21 |
if not ret:
|
| 22 |
break
|
| 23 |
-
frame_name = f"{frame_count}.png"
|
| 24 |
frame_count += 1
|
| 25 |
cv2.imwrite(os.path.join(output_dir, frame_name), frame)
|
| 26 |
cap.release()
|
|
@@ -34,4 +34,4 @@ def downsample(video_path, output_dir, target_fps):
|
|
| 34 |
|
| 35 |
if __name__ == "__main__": # sets the __name__ variable to __main__ for this script
|
| 36 |
|
| 37 |
-
extract_frames("Test.mp4", "output")
|
|
|
|
| 20 |
ret, frame = cap.read() # frame is a numpy array
|
| 21 |
if not ret:
|
| 22 |
break
|
| 23 |
+
frame_name = f"frame_{frame_count}.png"
|
| 24 |
frame_count += 1
|
| 25 |
cv2.imwrite(os.path.join(output_dir, frame_name), frame)
|
| 26 |
cap.release()
|
|
|
|
| 34 |
|
| 35 |
if __name__ == "__main__": # sets the __name__ variable to __main__ for this script
|
| 36 |
|
| 37 |
+
print(extract_frames("Test.mp4", "output"))
|
main.py
CHANGED
|
@@ -23,8 +23,10 @@ def normalize_frames(tensor):
|
|
| 23 |
return tensor
|
| 24 |
def laod_allframes(frame_dir):
|
| 25 |
frames_path = sorted(
|
| 26 |
-
[os.path.join(frame_dir, f) for f in os.listdir(frame_dir) if f.endswith('.png')]
|
|
|
|
| 27 |
)
|
|
|
|
| 28 |
for frame_path in frames_path:
|
| 29 |
yield load_frames(frame_path)
|
| 30 |
def load_frames(image_path)->torch.Tensor:
|
|
@@ -90,7 +92,6 @@ def interpolate(model_FC, A, B, input_fps, output_fps)-> list[torch.Tensor]:
|
|
| 90 |
generated_frames.append(interpolated_frame)
|
| 91 |
return generated_frames
|
| 92 |
|
| 93 |
-
|
| 94 |
def warp_frames(frame, flow):
|
| 95 |
b, c, h, w = frame.size()
|
| 96 |
i,j,flow_h, flow_w = flow.size()
|
|
@@ -110,11 +111,18 @@ def warp_frames(frame, flow):
|
|
| 110 |
warped_frame = F.grid_sample(frame, grid, align_corners=True,mode='bilinear', padding_mode='border')
|
| 111 |
return warped_frame
|
| 112 |
def frames_to_video(frame_dir,output_video,fps):
|
| 113 |
-
|
| 114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
"ffmpeg", "-framerate", str(fps), "-i", frame_pattern,
|
| 116 |
"-c:v", "libx264", "-pix_fmt", "yuv420p", output_video
|
| 117 |
-
])
|
| 118 |
def solve():
|
| 119 |
checkpoint = torch.load("SuperSloMo.ckpt")
|
| 120 |
model_FC = UNet(6, 4).to(device) # Initialize flow computation model
|
|
@@ -124,11 +132,11 @@ def solve():
|
|
| 124 |
model_AT.load_state_dict(checkpoint["state_dictAT"], strict=False) # Load weights
|
| 125 |
model_AT.eval()
|
| 126 |
frames_dir="output"
|
| 127 |
-
input_fps=
|
| 128 |
output_fps=120
|
| 129 |
-
output_dir="
|
| 130 |
interpolate_video(frames_dir,model_FC,input_fps,output_fps,output_dir)
|
| 131 |
-
final_video="
|
| 132 |
frames_to_video(output_dir,final_video,output_fps)
|
| 133 |
|
| 134 |
def main():
|
|
|
|
| 23 |
return tensor
|
| 24 |
def laod_allframes(frame_dir):
|
| 25 |
frames_path = sorted(
|
| 26 |
+
[os.path.join(frame_dir, f) for f in os.listdir(frame_dir) if f.endswith('.png')],
|
| 27 |
+
key=lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[-1])
|
| 28 |
)
|
| 29 |
+
print(frames_path)
|
| 30 |
for frame_path in frames_path:
|
| 31 |
yield load_frames(frame_path)
|
| 32 |
def load_frames(image_path)->torch.Tensor:
|
|
|
|
| 92 |
generated_frames.append(interpolated_frame)
|
| 93 |
return generated_frames
|
| 94 |
|
|
|
|
| 95 |
def warp_frames(frame, flow):
|
| 96 |
b, c, h, w = frame.size()
|
| 97 |
i,j,flow_h, flow_w = flow.size()
|
|
|
|
| 111 |
warped_frame = F.grid_sample(frame, grid, align_corners=True,mode='bilinear', padding_mode='border')
|
| 112 |
return warped_frame
|
| 113 |
def frames_to_video(frame_dir,output_video,fps):
|
| 114 |
+
frame_files = sorted(
|
| 115 |
+
[f for f in os.listdir(frame_dir) if f.endswith('.png')],
|
| 116 |
+
key=lambda x: int(os.path.splitext(x)[0].split('_')[-1])
|
| 117 |
+
)
|
| 118 |
+
print(frame_files)
|
| 119 |
+
for i, frame in enumerate(frame_files):
|
| 120 |
+
os.rename(os.path.join(frame_dir, frame), os.path.join(frame_dir, f"frame_{i}.png"))
|
| 121 |
+
frame_pattern = os.path.join(frame_dir, "frame_%d.png")
|
| 122 |
+
subprocess.run([ # run shell command
|
| 123 |
"ffmpeg", "-framerate", str(fps), "-i", frame_pattern,
|
| 124 |
"-c:v", "libx264", "-pix_fmt", "yuv420p", output_video
|
| 125 |
+
],check=True)
|
| 126 |
def solve():
|
| 127 |
checkpoint = torch.load("SuperSloMo.ckpt")
|
| 128 |
model_FC = UNet(6, 4).to(device) # Initialize flow computation model
|
|
|
|
| 132 |
model_AT.load_state_dict(checkpoint["state_dictAT"], strict=False) # Load weights
|
| 133 |
model_AT.eval()
|
| 134 |
frames_dir="output"
|
| 135 |
+
input_fps=59
|
| 136 |
output_fps=120
|
| 137 |
+
output_dir="interpolated_frames2"
|
| 138 |
interpolate_video(frames_dir,model_FC,input_fps,output_fps,output_dir)
|
| 139 |
+
final_video="result6.mp4"
|
| 140 |
frames_to_video(output_dir,final_video,output_fps)
|
| 141 |
|
| 142 |
def main():
|