✨ feat: Added SlideIn and SlideOut video transition effects and optimized front-end implementation
This commit is contained in:
@@ -18,6 +18,15 @@ class VideoConcatMode(str, Enum):
|
||||
sequential = "sequential"
|
||||
|
||||
|
||||
class VideoTransitionMode(str, Enum):
|
||||
none = None
|
||||
shuffle = "Shuffle"
|
||||
fade_in = "FadeIn"
|
||||
fade_out = "FadeOut"
|
||||
slide_in = "SlideIn"
|
||||
slide_out = "SlideOut"
|
||||
|
||||
|
||||
class VideoAspect(str, Enum):
|
||||
landscape = "16:9"
|
||||
portrait = "9:16"
|
||||
@@ -44,44 +53,6 @@ class MaterialInfo:
|
||||
duration: int = 0
|
||||
|
||||
|
||||
# VoiceNames = [
|
||||
# # zh-CN
|
||||
# "female-zh-CN-XiaoxiaoNeural",
|
||||
# "female-zh-CN-XiaoyiNeural",
|
||||
# "female-zh-CN-liaoning-XiaobeiNeural",
|
||||
# "female-zh-CN-shaanxi-XiaoniNeural",
|
||||
#
|
||||
# "male-zh-CN-YunjianNeural",
|
||||
# "male-zh-CN-YunxiNeural",
|
||||
# "male-zh-CN-YunxiaNeural",
|
||||
# "male-zh-CN-YunyangNeural",
|
||||
#
|
||||
# # "female-zh-HK-HiuGaaiNeural",
|
||||
# # "female-zh-HK-HiuMaanNeural",
|
||||
# # "male-zh-HK-WanLungNeural",
|
||||
# #
|
||||
# # "female-zh-TW-HsiaoChenNeural",
|
||||
# # "female-zh-TW-HsiaoYuNeural",
|
||||
# # "male-zh-TW-YunJheNeural",
|
||||
#
|
||||
# # en-US
|
||||
# "female-en-US-AnaNeural",
|
||||
# "female-en-US-AriaNeural",
|
||||
# "female-en-US-AvaNeural",
|
||||
# "female-en-US-EmmaNeural",
|
||||
# "female-en-US-JennyNeural",
|
||||
# "female-en-US-MichelleNeural",
|
||||
#
|
||||
# "male-en-US-AndrewNeural",
|
||||
# "male-en-US-BrianNeural",
|
||||
# "male-en-US-ChristopherNeural",
|
||||
# "male-en-US-EricNeural",
|
||||
# "male-en-US-GuyNeural",
|
||||
# "male-en-US-RogerNeural",
|
||||
# "male-en-US-SteffanNeural",
|
||||
# ]
|
||||
|
||||
|
||||
class VideoParams(BaseModel):
|
||||
"""
|
||||
{
|
||||
@@ -102,11 +73,14 @@ class VideoParams(BaseModel):
|
||||
video_terms: Optional[str | list] = None # Keywords used to generate the video
|
||||
video_aspect: Optional[VideoAspect] = VideoAspect.portrait.value
|
||||
video_concat_mode: Optional[VideoConcatMode] = VideoConcatMode.random.value
|
||||
video_transition_mode: Optional[VideoTransitionMode] = None
|
||||
video_clip_duration: Optional[int] = 5
|
||||
video_count: Optional[int] = 1
|
||||
|
||||
video_source: Optional[str] = "pexels"
|
||||
video_materials: Optional[List[MaterialInfo]] = None # Materials used to generate the video
|
||||
video_materials: Optional[List[MaterialInfo]] = (
|
||||
None # Materials used to generate the video
|
||||
)
|
||||
|
||||
video_language: Optional[str] = "" # auto detect
|
||||
|
||||
|
||||
@@ -164,6 +164,7 @@ def generate_final_videos(
|
||||
video_concat_mode = (
|
||||
params.video_concat_mode if params.video_count == 1 else VideoConcatMode.random
|
||||
)
|
||||
video_transition_mode = params.video_transition_mode
|
||||
|
||||
_progress = 50
|
||||
for i in range(params.video_count):
|
||||
@@ -178,6 +179,7 @@ def generate_final_videos(
|
||||
audio_file=audio_file,
|
||||
video_aspect=params.video_aspect,
|
||||
video_concat_mode=video_concat_mode,
|
||||
video_transition_mode=video_transition_mode,
|
||||
max_clip_duration=params.video_clip_duration,
|
||||
threads=params.n_threads,
|
||||
)
|
||||
|
||||
@@ -9,3 +9,13 @@ def fadein_transition(clip: Clip, t: float) -> Clip:
|
||||
# FadeOut
|
||||
def fadeout_transition(clip: Clip, t: float) -> Clip:
|
||||
return clip.with_effects([vfx.FadeOut(t)])
|
||||
|
||||
|
||||
# SlideIn
|
||||
def slidein_transition(clip: Clip, t: float, side: str) -> Clip:
|
||||
return clip.with_effects([vfx.SlideIn(t, side)])
|
||||
|
||||
|
||||
# SlideOut
|
||||
def slideout_transition(clip: Clip, t: float, side: str) -> Clip:
|
||||
return clip.with_effects([vfx.SlideOut(t, side)])
|
||||
|
||||
@@ -19,7 +19,14 @@ from moviepy.video.tools.subtitles import SubtitlesClip
|
||||
from PIL import ImageFont
|
||||
|
||||
from app.models import const
|
||||
from app.models.schema import MaterialInfo, VideoAspect, VideoConcatMode, VideoParams
|
||||
from app.models.schema import (
|
||||
MaterialInfo,
|
||||
VideoAspect,
|
||||
VideoConcatMode,
|
||||
VideoParams,
|
||||
VideoTransitionMode,
|
||||
)
|
||||
from app.services.utils import video_effects
|
||||
from app.utils import utils
|
||||
|
||||
|
||||
@@ -45,6 +52,7 @@ def combine_videos(
|
||||
audio_file: str,
|
||||
video_aspect: VideoAspect = VideoAspect.portrait,
|
||||
video_concat_mode: VideoConcatMode = VideoConcatMode.random,
|
||||
video_transition_mode: VideoTransitionMode = None,
|
||||
max_clip_duration: int = 5,
|
||||
threads: int = 2,
|
||||
) -> str:
|
||||
@@ -129,12 +137,34 @@ def combine_videos(
|
||||
f"resizing video to {video_width} x {video_height}, clip size: {clip_w} x {clip_h}"
|
||||
)
|
||||
|
||||
shuffle_side = random.choice(["left", "right", "top", "bottom"])
|
||||
logger.info(f"Using transition mode: {video_transition_mode}")
|
||||
if video_transition_mode.value == VideoTransitionMode.none.value:
|
||||
clip = clip
|
||||
elif video_transition_mode.value == VideoTransitionMode.fade_in.value:
|
||||
clip = video_effects.fadein_transition(clip, 1)
|
||||
elif video_transition_mode.value == VideoTransitionMode.fade_out.value:
|
||||
clip = video_effects.fadeout_transition(clip, 1)
|
||||
elif video_transition_mode.value == VideoTransitionMode.slide_in.value:
|
||||
clip = video_effects.slidein_transition(clip, 1, shuffle_side)
|
||||
elif video_transition_mode.value == VideoTransitionMode.slide_out.value:
|
||||
clip = video_effects.slideout_transition(clip, 1, shuffle_side)
|
||||
elif video_transition_mode.value == VideoTransitionMode.shuffle.value:
|
||||
transition_funcs = [
|
||||
lambda c: video_effects.fadein_transition(c, 1),
|
||||
lambda c: video_effects.fadeout_transition(c, 1),
|
||||
lambda c: video_effects.slidein_transition(c, 1, shuffle_side),
|
||||
lambda c: video_effects.slideout_transition(c, 1, shuffle_side),
|
||||
]
|
||||
shuffle_transition = random.choice(transition_funcs)
|
||||
clip = shuffle_transition(clip)
|
||||
|
||||
if clip.duration > max_clip_duration:
|
||||
clip = clip.subclipped(0, max_clip_duration)
|
||||
|
||||
clips.append(clip)
|
||||
video_duration += clip.duration
|
||||
|
||||
clips = [CompositeVideoClip([clip]) for clip in clips]
|
||||
video_clip = concatenate_videoclips(clips)
|
||||
video_clip = video_clip.with_fps(30)
|
||||
logger.info("writing")
|
||||
|
||||
Reference in New Issue
Block a user