- Converted layout from pack() to grid() for better control and responsiveness. - Set minimum window sizes for MainApplication, ClipSelectionWindow, and ProgressWindow. - Added dynamic font sizing and text wrapping based on window width. - Implemented window resize handlers to adjust layouts and element sizes dynamically. - Enhanced button and frame arrangements for better usability and touch-friendliness. - Improved scrolling behavior in ShortsGeneratorGUI with a scrollable container. - Ensured all elements adapt to various screen sizes for a consistent user experience.
3661 lines
156 KiB
Python
3661 lines
156 KiB
Python
import os
|
||
import numpy as np
|
||
from moviepy import VideoFileClip, TextClip, CompositeVideoClip
|
||
from moviepy.video.fx import FadeIn, FadeOut, Resize
|
||
from moviepy.audio.fx import MultiplyVolume
|
||
from faster_whisper import WhisperModel
|
||
import tkinter as tk
|
||
from tkinter import filedialog, messagebox, ttk
|
||
import threading
|
||
import cv2
|
||
from scipy import signal
|
||
import librosa
|
||
import glob
|
||
import json
|
||
from datetime import datetime
|
||
from PIL import Image, ImageTk
|
||
from PIL import ImageDraw, ImageFont
|
||
import time
|
||
|
||
class ToolTip:
|
||
"""Create a tooltip for a given widget"""
|
||
def __init__(self, widget, text='widget info', side='right'):
|
||
self.widget = widget
|
||
self.text = text
|
||
self.side = side
|
||
self.widget.bind("<Enter>", self.enter)
|
||
self.widget.bind("<Leave>", self.leave)
|
||
self.tipwindow = None
|
||
|
||
def enter(self, event=None):
|
||
self.showtip()
|
||
|
||
def leave(self, event=None):
|
||
self.hidetip()
|
||
|
||
def showtip(self):
|
||
if self.tipwindow or not self.text:
|
||
return
|
||
|
||
# Get widget position
|
||
x = self.widget.winfo_rootx()
|
||
y = self.widget.winfo_rooty()
|
||
w = self.widget.winfo_width()
|
||
h = self.widget.winfo_height()
|
||
|
||
# Position tooltip based on side preference
|
||
if self.side == 'right':
|
||
x = x + w + 10 # 10px to the right of widget
|
||
y = y
|
||
else:
|
||
x = x + 25
|
||
y = y + h + 5
|
||
|
||
self.tipwindow = tw = tk.Toplevel(self.widget)
|
||
tw.wm_overrideredirect(True)
|
||
tw.wm_geometry("+%d+%d" % (x, y))
|
||
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
|
||
background="#ffffe0", relief=tk.SOLID, borderwidth=1,
|
||
font=("Arial", "9", "normal"), wraplength=350)
|
||
label.pack(ipadx=5, ipady=3)
|
||
|
||
def hidetip(self):
|
||
tw = self.tipwindow
|
||
self.tipwindow = None
|
||
if tw:
|
||
tw.destroy()
|
||
|
||
class ComboboxTooltip:
|
||
"""Special tooltip for combobox that shows on listbox hover"""
|
||
def __init__(self, combobox, descriptions):
|
||
self.combobox = combobox
|
||
self.descriptions = descriptions
|
||
self.tipwindow = None
|
||
self.bound_listbox = None
|
||
|
||
# Bind to combobox events
|
||
self.combobox.bind("<Button-1>", self.on_click)
|
||
self.combobox.bind("<KeyPress>", self.on_keypress)
|
||
|
||
def on_click(self, event):
|
||
# Try to find the listbox when dropdown opens
|
||
self.combobox.after(50, self.bind_listbox)
|
||
|
||
def on_keypress(self, event):
|
||
# Handle keyboard navigation
|
||
self.combobox.after(50, self.bind_listbox)
|
||
|
||
def bind_listbox(self):
|
||
# Find the listbox widget more reliably
|
||
try:
|
||
# Look through all toplevel windows for the combobox popdown
|
||
for window in self.combobox.winfo_toplevel().winfo_children():
|
||
window_class = window.winfo_class()
|
||
if window_class == 'Toplevel':
|
||
# Found a toplevel, look for listbox inside
|
||
for child in window.winfo_children():
|
||
if child.winfo_class() == 'Listbox':
|
||
if self.bound_listbox != child:
|
||
self.bound_listbox = child
|
||
child.bind("<Motion>", self.on_listbox_motion)
|
||
child.bind("<Leave>", self.on_listbox_leave)
|
||
child.bind("<ButtonRelease-1>", self.on_listbox_leave)
|
||
return
|
||
except Exception as e:
|
||
# Fallback method - try to find any listbox
|
||
try:
|
||
# Alternative approach: look for the popdown frame
|
||
for child in self.combobox.tk.call('winfo', 'children', '.'):
|
||
if 'popdown' in str(child):
|
||
popdown = self.combobox.nametowidget(child)
|
||
for subchild in popdown.winfo_children():
|
||
if subchild.winfo_class() == 'Listbox':
|
||
if self.bound_listbox != subchild:
|
||
self.bound_listbox = subchild
|
||
subchild.bind("<Motion>", self.on_listbox_motion)
|
||
subchild.bind("<Leave>", self.on_listbox_leave)
|
||
subchild.bind("<ButtonRelease-1>", self.on_listbox_leave)
|
||
return
|
||
except:
|
||
pass
|
||
|
||
def on_listbox_motion(self, event):
|
||
try:
|
||
listbox = event.widget
|
||
index = listbox.nearest(event.y)
|
||
if 0 <= index < len(self.combobox['values']):
|
||
selection = self.combobox['values'][index]
|
||
if selection in self.descriptions:
|
||
self.show_tooltip(event, self.descriptions[selection])
|
||
except Exception:
|
||
pass
|
||
|
||
def on_listbox_leave(self, event):
|
||
self.hide_tooltip()
|
||
|
||
def show_tooltip(self, event, text):
|
||
self.hide_tooltip() # Hide any existing tooltip
|
||
|
||
try:
|
||
x = event.widget.winfo_rootx() + event.widget.winfo_width() + 10
|
||
y = event.widget.winfo_rooty() + event.y - 20
|
||
|
||
self.tipwindow = tw = tk.Toplevel(event.widget)
|
||
tw.wm_overrideredirect(True)
|
||
tw.wm_geometry("+%d+%d" % (x, y))
|
||
label = tk.Label(tw, text=text, justify=tk.LEFT,
|
||
background="#ffffe0", relief=tk.SOLID, borderwidth=1,
|
||
font=("Arial", "9", "normal"), wraplength=350)
|
||
label.pack(ipadx=5, ipady=3)
|
||
except Exception:
|
||
pass
|
||
|
||
def hide_tooltip(self):
|
||
if self.tipwindow:
|
||
try:
|
||
self.tipwindow.destroy()
|
||
except:
|
||
pass
|
||
self.tipwindow = None
|
||
|
||
def detect_loud_moments(video_path, chunk_duration=5, threshold_db=10):
|
||
print("🔍 Analyzing audio...")
|
||
clip = VideoFileClip(video_path)
|
||
audio = clip.audio.to_soundarray(fps=44100)
|
||
volume = np.linalg.norm(audio, axis=1)
|
||
chunk_size = int(chunk_duration * 44100)
|
||
|
||
loud_chunks = []
|
||
max_db = -float('inf')
|
||
for i in range(0, len(volume), chunk_size):
|
||
chunk = volume[i:i+chunk_size]
|
||
db = 20 * np.log10(np.mean(chunk) + 1e-10)
|
||
max_db = max(max_db, db)
|
||
if db > threshold_db:
|
||
start = i / 44100
|
||
loud_chunks.append((start, min(start + chunk_duration, clip.duration)))
|
||
|
||
print(f"🔊 Max volume found: {max_db:.2f} dB, threshold: {threshold_db} dB")
|
||
print(f"📈 Found {len(loud_chunks)} loud moments")
|
||
clip.close()
|
||
return loud_chunks
|
||
|
||
def detect_scene_changes(video_path, chunk_duration=5, threshold=0.3):
|
||
"""Detect dramatic visual scene changes"""
|
||
print("🎬 Analyzing scene changes...")
|
||
clip = VideoFileClip(video_path)
|
||
|
||
# Sample frames at regular intervals
|
||
sample_rate = 2 # Check every 2 seconds
|
||
times = np.arange(0, clip.duration, sample_rate)
|
||
|
||
scene_changes = []
|
||
total_frames = len(times) - 1
|
||
|
||
for i, t in enumerate(times[:-1]):
|
||
try:
|
||
# Periodic progress output
|
||
if i % 10 == 0:
|
||
print(f"🎬 Processing frame {i+1}/{total_frames}...")
|
||
|
||
# Get current and next frame
|
||
frame1 = clip.get_frame(t)
|
||
frame2 = clip.get_frame(times[i + 1])
|
||
|
||
# Convert to grayscale and resize for faster processing
|
||
gray1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY)
|
||
gray2 = cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY)
|
||
gray1 = cv2.resize(gray1, (160, 90)) # Small size for speed
|
||
gray2 = cv2.resize(gray2, (160, 90))
|
||
|
||
# Calculate structural similarity difference
|
||
diff = np.mean(np.abs(gray1.astype(float) - gray2.astype(float))) / 255.0
|
||
|
||
if diff > threshold:
|
||
start = max(0, t - chunk_duration/2)
|
||
end = min(clip.duration, t + chunk_duration/2)
|
||
scene_changes.append((start, end))
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Frame analysis error at {t:.1f}s: {e}")
|
||
continue
|
||
|
||
print(f"🎬 Found {len(scene_changes)} scene changes")
|
||
clip.close()
|
||
return scene_changes
|
||
|
||
def detect_motion_intensity(video_path, chunk_duration=5, threshold=20000000):
|
||
"""Detect high motion/action scenes"""
|
||
print("🏃 Analyzing motion intensity...")
|
||
clip = VideoFileClip(video_path)
|
||
|
||
sample_rate = 1 # Check every second
|
||
times = np.arange(0, clip.duration - 1, sample_rate)
|
||
|
||
motion_scenes = []
|
||
total_frames = len(times)
|
||
|
||
for i, t in enumerate(times):
|
||
try:
|
||
# Periodic progress output
|
||
if i % 20 == 0:
|
||
print(f"🏃 Processing frame {i+1}/{total_frames}...")
|
||
|
||
# Get consecutive frames
|
||
frame1 = clip.get_frame(t)
|
||
frame2 = clip.get_frame(t + 1)
|
||
|
||
# Convert to grayscale and resize
|
||
gray1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY)
|
||
gray2 = cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY)
|
||
gray1 = cv2.resize(gray1, (320, 180))
|
||
gray2 = cv2.resize(gray2, (320, 180))
|
||
|
||
# Calculate optical flow magnitude
|
||
flow = cv2.calcOpticalFlowPyrLK(gray1, gray2,
|
||
np.random.randint(0, 320, (100, 1, 2)).astype(np.float32),
|
||
None)[0]
|
||
|
||
if flow is not None:
|
||
motion_magnitude = np.sum(np.linalg.norm(flow.reshape(-1, 2), axis=1))
|
||
|
||
if motion_magnitude > threshold:
|
||
start = max(0, t - chunk_duration/2)
|
||
end = min(clip.duration, t + chunk_duration/2)
|
||
motion_scenes.append((start, end))
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Motion analysis error at {t:.1f}s: {e}")
|
||
continue
|
||
|
||
print(f"🏃 Found {len(motion_scenes)} motion scenes")
|
||
clip.close()
|
||
return motion_scenes
|
||
|
||
def detect_speech_emotion(video_path, chunk_duration=5):
|
||
"""Detect emotional speech segments using faster_whisper"""
|
||
print("🗣️ Analyzing speech emotion...")
|
||
|
||
try:
|
||
# Load Whisper model for speech detection
|
||
model = WhisperModel("base", device="cpu", compute_type="int8")
|
||
|
||
# Extract audio temporarily
|
||
temp_audio = "temp_audio.wav"
|
||
clip = VideoFileClip(video_path)
|
||
audio = clip.audio
|
||
audio.write_audiofile(temp_audio, verbose=False, logger=None)
|
||
|
||
# Transcribe with word-level timestamps
|
||
segments, _ = model.transcribe(temp_audio, word_timestamps=True)
|
||
|
||
emotional_segments = []
|
||
|
||
for segment in segments:
|
||
# Look for emotional indicators in speech patterns
|
||
text = segment.text.lower()
|
||
|
||
# Check for emotional keywords and speech patterns
|
||
emotional_words = ['amazing', 'incredible', 'wow', 'unbelievable', 'shocking',
|
||
'fantastic', 'awesome', 'terrible', 'horrible', 'beautiful']
|
||
|
||
has_emotion = any(word in text for word in emotional_words)
|
||
has_exclamation = '!' in segment.text
|
||
is_question = '?' in segment.text
|
||
|
||
if has_emotion or has_exclamation or is_question:
|
||
start = max(0, segment.start - chunk_duration/2)
|
||
end = min(clip.duration, segment.end + chunk_duration/2)
|
||
emotional_segments.append((start, end))
|
||
|
||
# Clean up
|
||
audio.close()
|
||
clip.close()
|
||
if os.path.exists(temp_audio):
|
||
os.remove(temp_audio)
|
||
|
||
print(f"🗣️ Found {len(emotional_segments)} emotional speech segments")
|
||
return emotional_segments
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Speech analysis error: {e}")
|
||
return []
|
||
|
||
def detect_audio_peaks(video_path, chunk_duration=5):
|
||
"""Detect audio frequency peaks and interesting sounds"""
|
||
print("🎵 Analyzing audio peaks...")
|
||
|
||
try:
|
||
# Extract audio
|
||
clip = VideoFileClip(video_path)
|
||
audio = clip.audio
|
||
|
||
# Convert to numpy array
|
||
temp_audio = "temp_peak_audio.wav"
|
||
audio.write_audiofile(temp_audio, verbose=False, logger=None)
|
||
|
||
# Load with librosa
|
||
y, sr = librosa.load(temp_audio)
|
||
|
||
# Analyze spectral features
|
||
hop_length = 512
|
||
frame_length = 2048
|
||
|
||
# Calculate spectral centroid (brightness)
|
||
spectral_centroids = librosa.feature.spectral_centroid(y=y, sr=sr, hop_length=hop_length)[0]
|
||
|
||
# Calculate RMS energy
|
||
rms = librosa.feature.rms(y=y, hop_length=hop_length)[0]
|
||
|
||
# Find frames with high spectral activity
|
||
time_frames = librosa.frames_to_time(np.arange(len(spectral_centroids)), sr=sr, hop_length=hop_length)
|
||
|
||
peak_segments = []
|
||
|
||
# Threshold for interesting audio
|
||
centroid_threshold = np.percentile(spectral_centroids, 85)
|
||
rms_threshold = np.percentile(rms, 80)
|
||
|
||
for i, (time, centroid, energy) in enumerate(zip(time_frames, spectral_centroids, rms)):
|
||
if centroid > centroid_threshold and energy > rms_threshold:
|
||
start = max(0, time - chunk_duration/2)
|
||
end = min(clip.duration, time + chunk_duration/2)
|
||
peak_segments.append((start, end))
|
||
|
||
# Clean up
|
||
audio.close()
|
||
clip.close()
|
||
if os.path.exists(temp_audio):
|
||
os.remove(temp_audio)
|
||
|
||
print(f"🎵 Found {len(peak_segments)} audio peak segments")
|
||
return peak_segments
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Audio analysis error: {e}")
|
||
return []
|
||
|
||
def detect_combined_moments(video_path, chunk_duration=5):
|
||
"""Combine multiple detection methods for best results"""
|
||
print("🎯 Running combined analysis...")
|
||
|
||
try:
|
||
# Run multiple detection methods
|
||
loud_moments = detect_loud_moments(video_path, chunk_duration)
|
||
scene_changes = detect_scene_changes(video_path, chunk_duration)
|
||
|
||
# Combine and deduplicate
|
||
all_moments = loud_moments + scene_changes
|
||
|
||
# Simple deduplication by merging overlapping segments
|
||
if not all_moments:
|
||
return []
|
||
|
||
# Sort by start time
|
||
all_moments.sort(key=lambda x: x[0])
|
||
|
||
# Merge overlapping segments
|
||
merged = [all_moments[0]]
|
||
for start, end in all_moments[1:]:
|
||
last_start, last_end = merged[-1]
|
||
if start <= last_end + 1: # Allow 1 second gap
|
||
merged[-1] = (last_start, max(last_end, end))
|
||
else:
|
||
merged.append((start, end))
|
||
|
||
print(f"🎯 Combined analysis found {len(merged)} interesting moments")
|
||
return merged
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Combined analysis error: {e}")
|
||
return []
|
||
|
||
def detect_scene_changes_with_progress(video_path, chunk_duration=5, threshold=0.3, progress_callback=None):
|
||
"""Detect dramatic visual scene changes with progress updates"""
|
||
print("🎬 Analyzing scene changes...")
|
||
clip = VideoFileClip(video_path)
|
||
|
||
# Sample frames at regular intervals
|
||
sample_rate = 2 # Check every 2 seconds
|
||
times = np.arange(0, clip.duration, sample_rate)
|
||
|
||
scene_changes = []
|
||
prev_frame = None
|
||
total_frames = len(times) - 1
|
||
|
||
for i, t in enumerate(times[:-1]):
|
||
try:
|
||
# Update progress every few frames
|
||
if progress_callback and i % 5 == 0:
|
||
progress = (i / total_frames) * 100
|
||
progress_callback(progress, f"🎬 Analyzing scene changes... Frame {i+1}/{total_frames}")
|
||
|
||
# Get current and next frame
|
||
frame1 = clip.get_frame(t)
|
||
frame2 = clip.get_frame(times[i + 1])
|
||
|
||
# Convert to grayscale and resize for faster processing
|
||
gray1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY)
|
||
gray2 = cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY)
|
||
gray1 = cv2.resize(gray1, (160, 90)) # Small size for speed
|
||
gray2 = cv2.resize(gray2, (160, 90))
|
||
|
||
# Calculate structural similarity difference
|
||
diff = np.mean(np.abs(gray1.astype(float) - gray2.astype(float))) / 255.0
|
||
|
||
if diff > threshold:
|
||
start = max(0, t - chunk_duration/2)
|
||
end = min(clip.duration, t + chunk_duration/2)
|
||
scene_changes.append((start, end))
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Frame analysis error at {t:.1f}s: {e}")
|
||
continue
|
||
|
||
if progress_callback:
|
||
progress_callback(100, f"🎬 Found {len(scene_changes)} scene changes")
|
||
|
||
print(f"🎬 Found {len(scene_changes)} scene changes")
|
||
clip.close()
|
||
return scene_changes
|
||
|
||
def detect_motion_intensity_with_progress(video_path, chunk_duration=5, threshold=0.15, progress_callback=None):
|
||
"""Detect high motion/action moments with progress updates"""
|
||
print("🏃 Analyzing motion intensity...")
|
||
clip = VideoFileClip(video_path)
|
||
|
||
sample_rate = 1 # Check every second
|
||
times = np.arange(0, clip.duration - 1, sample_rate)
|
||
|
||
motion_moments = []
|
||
|
||
for i, t in enumerate(times):
|
||
try:
|
||
# Update progress every 10 seconds
|
||
if progress_callback and i % 10 == 0:
|
||
progress = (i / len(times)) * 100
|
||
progress_callback(progress, f"🏃 Analyzing motion... {i+1}/{len(times)} seconds")
|
||
|
||
# Get two consecutive frames
|
||
frame1 = clip.get_frame(t)
|
||
frame2 = clip.get_frame(t + 0.5) # Half second later
|
||
|
||
# Convert to grayscale and resize
|
||
gray1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY)
|
||
gray2 = cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY)
|
||
gray1 = cv2.resize(gray1, (160, 90))
|
||
gray2 = cv2.resize(gray2, (160, 90))
|
||
|
||
# Calculate optical flow magnitude
|
||
flow = cv2.calcOpticalFlowPyrLK(gray1, gray2,
|
||
np.random.randint(0, 160, (100, 1, 2)).astype(np.float32),
|
||
None)[0]
|
||
|
||
if flow is not None:
|
||
motion_magnitude = np.mean(np.linalg.norm(flow.reshape(-1, 2), axis=1))
|
||
|
||
if motion_magnitude > threshold:
|
||
start = max(0, t - chunk_duration/2)
|
||
end = min(clip.duration, t + chunk_duration/2)
|
||
motion_moments.append((start, end))
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Motion analysis error at {t:.1f}s: {e}")
|
||
continue
|
||
|
||
if progress_callback:
|
||
progress_callback(100, f"🏃 Found {len(motion_moments)} high-motion moments")
|
||
|
||
print(f"🏃 Found {len(motion_moments)} high-motion moments")
|
||
clip.close()
|
||
return motion_moments
|
||
|
||
def detect_speech_emotion_with_progress(video_path, chunk_duration=5, progress_callback=None):
|
||
"""Detect emotional/excited speech patterns with progress updates"""
|
||
print("😄 Analyzing speech emotions...")
|
||
|
||
if progress_callback:
|
||
progress_callback(10, "😄 Initializing speech recognition...")
|
||
|
||
# Use Whisper to get detailed speech analysis
|
||
model = WhisperModel("base", device="cpu", compute_type="int8")
|
||
|
||
if progress_callback:
|
||
progress_callback(30, "😄 Transcribing audio...")
|
||
|
||
segments, _ = model.transcribe(video_path, beam_size=5, vad_filter=True, word_timestamps=True)
|
||
|
||
emotional_moments = []
|
||
excitement_keywords = ['wow', 'amazing', 'incredible', 'unbelievable', 'awesome', 'fantastic',
|
||
'omg', 'what', 'no way', 'crazy', 'insane', 'perfect', 'yes', 'exactly']
|
||
|
||
segments_list = list(segments)
|
||
|
||
if progress_callback:
|
||
progress_callback(50, f"😄 Processing {len(segments_list)} speech segments...")
|
||
|
||
for i, segment in enumerate(segments_list):
|
||
if progress_callback and i % 10 == 0:
|
||
progress = 50 + (i / len(segments_list)) * 50
|
||
progress_callback(progress, f"😄 Analyzing speech... {i+1}/{len(segments_list)} segments")
|
||
|
||
text = segment.text.lower()
|
||
|
||
# Check for excitement keywords
|
||
has_keywords = any(keyword in text for keyword in excitement_keywords)
|
||
|
||
# Check for multiple exclamation-worthy patterns
|
||
has_caps = any(word.isupper() for word in segment.text.split())
|
||
has_punctuation = '!' in segment.text or '?' in segment.text
|
||
is_short_excited = len(text.split()) <= 5 and (has_keywords or has_caps)
|
||
|
||
if has_keywords or has_punctuation or is_short_excited:
|
||
start = max(0, segment.start - chunk_duration/2)
|
||
end = min(segment.end + chunk_duration/2, segment.end + chunk_duration)
|
||
emotional_moments.append((start, end))
|
||
|
||
if progress_callback:
|
||
progress_callback(100, f"😄 Found {len(emotional_moments)} emotional speech moments")
|
||
|
||
print(f"😄 Found {len(emotional_moments)} emotional speech moments")
|
||
return emotional_moments
|
||
|
||
def detect_audio_peaks_with_progress(video_path, chunk_duration=5, progress_callback=None):
|
||
"""Detect sudden audio peaks with progress updates"""
|
||
print("🎵 Analyzing audio peaks...")
|
||
|
||
if progress_callback:
|
||
progress_callback(10, "🎵 Loading audio...")
|
||
|
||
clip = VideoFileClip(video_path)
|
||
audio = clip.audio.to_soundarray(fps=22050) # Lower sample rate for speed
|
||
|
||
# Convert to mono if stereo
|
||
if len(audio.shape) > 1:
|
||
audio = np.mean(audio, axis=1)
|
||
|
||
if progress_callback:
|
||
progress_callback(40, "🎵 Finding audio peaks...")
|
||
|
||
# Find spectral peaks (bass, treble spikes)
|
||
peaks, _ = signal.find_peaks(np.abs(audio), height=np.percentile(np.abs(audio), 95))
|
||
|
||
peak_moments = []
|
||
prev_peak = 0
|
||
|
||
if progress_callback:
|
||
progress_callback(70, f"🎵 Processing {len(peaks)} peaks...")
|
||
|
||
for i, peak in enumerate(peaks):
|
||
if progress_callback and i % 1000 == 0:
|
||
progress = 70 + (i / len(peaks)) * 30
|
||
progress_callback(progress, f"🎵 Processing peaks... {i}/{len(peaks)}")
|
||
|
||
peak_time = peak / 22050
|
||
|
||
# Avoid too close peaks
|
||
if peak_time - prev_peak > chunk_duration:
|
||
start = max(0, peak_time - chunk_duration/2)
|
||
end = min(clip.duration, peak_time + chunk_duration/2)
|
||
peak_moments.append((start, end))
|
||
prev_peak = peak_time
|
||
|
||
if progress_callback:
|
||
progress_callback(100, f"🎵 Found {len(peak_moments)} audio peak moments")
|
||
|
||
print(f"🎵 Found {len(peak_moments)} audio peak moments")
|
||
clip.close()
|
||
return peak_moments
|
||
|
||
def detect_combined_intensity_with_progress(video_path, chunk_duration=5, weights=None, progress_callback=None):
|
||
"""Combine multiple detection methods with progress updates"""
|
||
print("🎯 Running comprehensive moment analysis...")
|
||
|
||
if weights is None:
|
||
weights = {'loud': 0.3, 'scene': 0.2, 'motion': 0.2, 'speech': 0.2, 'peaks': 0.1}
|
||
|
||
# Sub-progress callback for each method
|
||
def sub_progress(method_weight, base_percent):
|
||
def callback(percent, status):
|
||
if progress_callback:
|
||
total_percent = base_percent + (percent / 100) * method_weight
|
||
progress_callback(total_percent, f"🎯 {status}")
|
||
return callback
|
||
|
||
# Get all detection results with progress
|
||
if progress_callback:
|
||
progress_callback(5, "🎯 Analyzing loud moments...")
|
||
loud_moments = detect_loud_moments(video_path, chunk_duration, threshold_db=5)
|
||
|
||
if progress_callback:
|
||
progress_callback(15, "🎯 Analyzing scene changes...")
|
||
scene_moments = detect_scene_changes_with_progress(video_path, chunk_duration, progress_callback=sub_progress(20, 15))
|
||
|
||
if progress_callback:
|
||
progress_callback(35, "🎯 Analyzing motion...")
|
||
motion_moments = detect_motion_intensity_with_progress(video_path, chunk_duration, progress_callback=sub_progress(20, 35))
|
||
|
||
if progress_callback:
|
||
progress_callback(55, "🎯 Analyzing speech...")
|
||
speech_moments = detect_speech_emotion_with_progress(video_path, chunk_duration, progress_callback=sub_progress(20, 55))
|
||
|
||
if progress_callback:
|
||
progress_callback(75, "🎯 Analyzing audio peaks...")
|
||
peak_moments = detect_audio_peaks_with_progress(video_path, chunk_duration, progress_callback=sub_progress(15, 75))
|
||
|
||
if progress_callback:
|
||
progress_callback(90, "<EFBFBD> Combining results...")
|
||
|
||
# Create time-based scoring
|
||
clip = VideoFileClip(video_path)
|
||
duration = clip.duration
|
||
clip.close()
|
||
|
||
# Score each second of the video
|
||
time_scores = {}
|
||
|
||
for moments, weight in [(loud_moments, weights['loud']),
|
||
(scene_moments, weights['scene']),
|
||
(motion_moments, weights['motion']),
|
||
(speech_moments, weights['speech']),
|
||
(peak_moments, weights['peaks'])]:
|
||
for start, end in moments:
|
||
for t in range(int(start), int(end) + 1):
|
||
if t not in time_scores:
|
||
time_scores[t] = 0
|
||
time_scores[t] += weight
|
||
|
||
# Find the highest scoring segments
|
||
if not time_scores:
|
||
if progress_callback:
|
||
progress_callback(100, "🎯 No moments found, using loud moments fallback")
|
||
return loud_moments # Fallback to loud moments
|
||
|
||
# Get top scoring time periods
|
||
sorted_times = sorted(time_scores.items(), key=lambda x: x[1], reverse=True)
|
||
|
||
combined_moments = []
|
||
used_times = set()
|
||
|
||
for time_sec, score in sorted_times:
|
||
if time_sec not in used_times and score > 0.3: # Minimum threshold
|
||
start = max(0, time_sec - chunk_duration/2)
|
||
end = min(duration, time_sec + chunk_duration/2)
|
||
combined_moments.append((start, end))
|
||
|
||
# Mark nearby times as used to avoid overlap
|
||
for t in range(max(0, time_sec - chunk_duration),
|
||
min(int(duration), time_sec + chunk_duration)):
|
||
used_times.add(t)
|
||
|
||
if progress_callback:
|
||
progress_callback(100, f"🎯 Found {len(combined_moments)} high-intensity combined moments")
|
||
|
||
print(f"🎯 Found {len(combined_moments)} high-intensity combined moments")
|
||
return combined_moments
|
||
|
||
def detect_motion_intensity(video_path, chunk_duration=5, threshold=0.15):
|
||
"""Detect high motion/action moments"""
|
||
print("🏃 Analyzing motion intensity...")
|
||
clip = VideoFileClip(video_path)
|
||
|
||
sample_rate = 1 # Check every second
|
||
times = np.arange(0, clip.duration - 1, sample_rate)
|
||
|
||
motion_moments = []
|
||
|
||
for i, t in enumerate(times):
|
||
try:
|
||
# Periodic UI update to prevent freezing
|
||
if i % 20 == 0: # Every 20 seconds
|
||
print(f"🏃 Processing motion at {t:.1f}s ({i+1}/{len(times)})...")
|
||
|
||
# Get two consecutive frames
|
||
frame1 = clip.get_frame(t)
|
||
frame2 = clip.get_frame(t + 0.5) # Half second later
|
||
|
||
# Convert to grayscale and resize
|
||
gray1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY)
|
||
gray2 = cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY)
|
||
gray1 = cv2.resize(gray1, (160, 90))
|
||
gray2 = cv2.resize(gray2, (160, 90))
|
||
|
||
# Calculate optical flow magnitude
|
||
flow = cv2.calcOpticalFlowPyrLK(gray1, gray2,
|
||
np.random.randint(0, 160, (100, 1, 2)).astype(np.float32),
|
||
None)[0]
|
||
|
||
if flow is not None:
|
||
motion_magnitude = np.mean(np.linalg.norm(flow.reshape(-1, 2), axis=1))
|
||
|
||
if motion_magnitude > threshold:
|
||
start = max(0, t - chunk_duration/2)
|
||
end = min(clip.duration, t + chunk_duration/2)
|
||
motion_moments.append((start, end))
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Motion analysis error at {t:.1f}s: {e}")
|
||
continue
|
||
|
||
print(f"🏃 Found {len(motion_moments)} high-motion moments")
|
||
clip.close()
|
||
return motion_moments
|
||
|
||
def detect_speech_emotion(video_path, chunk_duration=5):
|
||
"""Detect emotional/excited speech patterns"""
|
||
print("😄 Analyzing speech emotions...")
|
||
|
||
print("😄 Initializing speech recognition...")
|
||
# Use Whisper to get detailed speech analysis
|
||
model = WhisperModel("base", device="cpu", compute_type="int8")
|
||
|
||
print("😄 Transcribing audio...")
|
||
segments, _ = model.transcribe(video_path, beam_size=5, vad_filter=True, word_timestamps=True)
|
||
|
||
emotional_moments = []
|
||
excitement_keywords = ['wow', 'amazing', 'incredible', 'unbelievable', 'awesome', 'fantastic',
|
||
'omg', 'what', 'no way', 'crazy', 'insane', 'perfect', 'yes', 'exactly']
|
||
|
||
segments_list = list(segments)
|
||
print(f"😄 Processing {len(segments_list)} speech segments...")
|
||
|
||
for i, segment in enumerate(segments_list):
|
||
if i % 10 == 0: # Every 10 segments
|
||
print(f"😄 Processing segment {i+1}/{len(segments_list)}...")
|
||
|
||
text = segment.text.lower()
|
||
|
||
# Check for excitement keywords
|
||
has_keywords = any(keyword in text for keyword in excitement_keywords)
|
||
|
||
# Check for multiple exclamation-worthy patterns
|
||
has_caps = any(word.isupper() for word in segment.text.split())
|
||
has_punctuation = '!' in segment.text or '?' in segment.text
|
||
is_short_excited = len(text.split()) <= 5 and (has_keywords or has_caps)
|
||
|
||
if has_keywords or has_punctuation or is_short_excited:
|
||
start = max(0, segment.start - chunk_duration/2)
|
||
end = min(segment.end + chunk_duration/2, segment.end + chunk_duration)
|
||
emotional_moments.append((start, end))
|
||
|
||
print(f"😄 Found {len(emotional_moments)} emotional speech moments")
|
||
return emotional_moments
|
||
|
||
def detect_audio_peaks(video_path, chunk_duration=5):
|
||
"""Detect sudden audio peaks (bass drops, beats, impacts)"""
|
||
print("🎵 Analyzing audio peaks...")
|
||
|
||
print("🎵 Loading audio...")
|
||
clip = VideoFileClip(video_path)
|
||
audio = clip.audio.to_soundarray(fps=22050) # Lower sample rate for speed
|
||
|
||
# Convert to mono if stereo
|
||
if len(audio.shape) > 1:
|
||
audio = np.mean(audio, axis=1)
|
||
|
||
print("🎵 Finding audio peaks...")
|
||
# Find spectral peaks (bass, treble spikes)
|
||
peaks, _ = signal.find_peaks(np.abs(audio), height=np.percentile(np.abs(audio), 95))
|
||
|
||
peak_moments = []
|
||
prev_peak = 0
|
||
|
||
for i, peak in enumerate(peaks):
|
||
if i % 1000 == 0: # Every 1000 peaks
|
||
print(f"🎵 Processing peaks... {i}/{len(peaks)}")
|
||
|
||
peak_time = peak / 22050
|
||
|
||
# Avoid too close peaks
|
||
if peak_time - prev_peak > chunk_duration:
|
||
start = max(0, peak_time - chunk_duration/2)
|
||
end = min(clip.duration, peak_time + chunk_duration/2)
|
||
peak_moments.append((start, end))
|
||
prev_peak = peak_time
|
||
|
||
print(f"🎵 Found {len(peak_moments)} audio peak moments")
|
||
clip.close()
|
||
return peak_moments
|
||
|
||
def detect_combined_intensity(video_path, chunk_duration=5, weights=None):
|
||
"""Combine multiple detection methods for best moments"""
|
||
print("🎯 Running comprehensive moment analysis...")
|
||
|
||
if weights is None:
|
||
weights = {'loud': 0.3, 'scene': 0.2, 'motion': 0.2, 'speech': 0.2, 'peaks': 0.1}
|
||
|
||
# Get all detection results with progress updates
|
||
print("🎯 Analyzing loud moments...")
|
||
loud_moments = detect_loud_moments(video_path, chunk_duration, threshold_db=5) # Lower threshold
|
||
|
||
print("🎯 Analyzing scene changes...")
|
||
scene_moments = detect_scene_changes(video_path, chunk_duration)
|
||
|
||
print("🎯 Analyzing motion...")
|
||
motion_moments = detect_motion_intensity(video_path, chunk_duration)
|
||
|
||
print("🎯 Analyzing speech...")
|
||
speech_moments = detect_speech_emotion(video_path, chunk_duration)
|
||
|
||
print("🎯 Analyzing audio peaks...")
|
||
peak_moments = detect_audio_peaks(video_path, chunk_duration)
|
||
|
||
print("🎯 Combining results...")
|
||
|
||
# Create time-based scoring
|
||
clip = VideoFileClip(video_path)
|
||
duration = clip.duration
|
||
clip.close()
|
||
|
||
# Score each second of the video
|
||
time_scores = {}
|
||
|
||
for moments, weight in [(loud_moments, weights['loud']),
|
||
(scene_moments, weights['scene']),
|
||
(motion_moments, weights['motion']),
|
||
(speech_moments, weights['speech']),
|
||
(peak_moments, weights['peaks'])]:
|
||
for start, end in moments:
|
||
for t in range(int(start), int(end) + 1):
|
||
if t not in time_scores:
|
||
time_scores[t] = 0
|
||
time_scores[t] += weight
|
||
|
||
# Find the highest scoring segments
|
||
if not time_scores:
|
||
print("🎯 No moments found, using loud moments fallback")
|
||
return loud_moments # Fallback to loud moments
|
||
|
||
# Get top scoring time periods
|
||
sorted_times = sorted(time_scores.items(), key=lambda x: x[1], reverse=True)
|
||
|
||
combined_moments = []
|
||
used_times = set()
|
||
|
||
for time_sec, score in sorted_times:
|
||
if time_sec not in used_times and score > 0.3: # Minimum threshold
|
||
start = max(0, time_sec - chunk_duration/2)
|
||
end = min(duration, time_sec + chunk_duration/2)
|
||
combined_moments.append((start, end))
|
||
|
||
# Mark nearby times as used to avoid overlap
|
||
for t in range(max(0, time_sec - chunk_duration),
|
||
min(int(duration), time_sec + chunk_duration)):
|
||
used_times.add(t)
|
||
|
||
print(f"🎯 Found {len(combined_moments)} high-intensity combined moments")
|
||
return combined_moments
|
||
|
||
def transcribe_and_extract_subtitles(video_path, start, end):
|
||
print(f"🗣️ Transcribing audio from {start:.2f}s to {end:.2f}s...")
|
||
model = WhisperModel("base", device="cpu", compute_type="int8")
|
||
segments, _ = model.transcribe(video_path, beam_size=5, language="en", vad_filter=True)
|
||
|
||
subtitles = []
|
||
for segment in segments:
|
||
if start <= segment.start <= end:
|
||
subtitles.append((segment.start - start, segment.end - start, segment.text))
|
||
return subtitles
|
||
|
||
def create_short_clip(video_path, start, end, subtitles, output_path):
|
||
print(f"🎬 Creating short: {output_path}")
|
||
clip = VideoFileClip(video_path).subclipped(start, end)
|
||
video_duration = clip.duration
|
||
print(f"📏 Video clip duration: {video_duration:.2f}s")
|
||
|
||
vertical_clip = clip.resized(height=1920).cropped(width=1080, x_center=clip.w / 2)
|
||
clips = [vertical_clip]
|
||
|
||
subtitle_y_px = 1550 # Fixed Y position for subtitles
|
||
|
||
for (s, e, text) in subtitles:
|
||
try:
|
||
subtitle_start = max(0, s)
|
||
subtitle_end = min(e, video_duration)
|
||
|
||
if subtitle_start >= video_duration or subtitle_end <= subtitle_start:
|
||
print(f"⚠️ Skipping subtitle outside video duration: {text[:30]}...")
|
||
continue
|
||
|
||
words = text.strip().split()
|
||
if not words:
|
||
continue
|
||
|
||
# Split into small readable chunks (max ~3-4 words)
|
||
chunks = []
|
||
current_chunk = []
|
||
for word in words:
|
||
current_chunk.append(word)
|
||
if len(current_chunk) >= 2 or len(' '.join(current_chunk)) > 25:
|
||
chunks.append(' '.join(current_chunk))
|
||
current_chunk = []
|
||
if current_chunk:
|
||
chunks.append(' '.join(current_chunk))
|
||
|
||
chunk_duration = (subtitle_end - subtitle_start) / len(chunks)
|
||
|
||
for chunk_idx, chunk_text in enumerate(chunks):
|
||
chunk_start = subtitle_start + (chunk_idx * chunk_duration)
|
||
chunk_end = min(chunk_start + chunk_duration, subtitle_end)
|
||
|
||
chunk_words = chunk_text.split()
|
||
|
||
# Base subtitle
|
||
base_subtitle = TextClip(
|
||
text=chunk_text.upper(),
|
||
font_size=65,
|
||
color='white',
|
||
stroke_color='black',
|
||
stroke_width=5
|
||
)
|
||
text_width, _ = base_subtitle.size
|
||
base_subtitle = base_subtitle.with_start(chunk_start).with_end(chunk_end).with_position(('center', subtitle_y_px))
|
||
clips.append(base_subtitle)
|
||
|
||
# Highlighted words (perfectly aligned)
|
||
word_duration = chunk_duration / len(chunk_words)
|
||
current_x = 540 - (text_width / 2) # 540 is center X of 1080px width
|
||
|
||
for i, word in enumerate(chunk_words):
|
||
word_start = chunk_start + (i * word_duration)
|
||
word_end = min(word_start + word_duration * 0.8, chunk_end)
|
||
|
||
highlighted_word = TextClip(
|
||
text=word.upper(),
|
||
font_size=68,
|
||
color='#FFD700',
|
||
stroke_color='#FF6B35',
|
||
stroke_width=5
|
||
)
|
||
word_width, _ = highlighted_word.size
|
||
|
||
word_x = current_x + (word_width / 2)
|
||
highlighted_word = highlighted_word.with_start(word_start).with_end(word_end).with_position((word_x -125
|
||
, subtitle_y_px))
|
||
clips.append(highlighted_word)
|
||
|
||
current_x += word_width + 20 # Add spacing between words
|
||
|
||
print(f"✅ Added Opus-style subtitle ({subtitle_start:.1f}s-{subtitle_end:.1f}s): {text[:30]}...")
|
||
except Exception as e:
|
||
print(f"⚠️ Subtitle error: {e}, skipping subtitle: {text[:50]}...")
|
||
continue
|
||
|
||
final = CompositeVideoClip(clips, size=(1080, 1920))
|
||
final.write_videofile(output_path, codec="libx264", audio_codec="aac", threads=1)
|
||
|
||
clip.reader.close()
|
||
if clip.audio:
|
||
clip.audio.reader.close()
|
||
final.close()
|
||
|
||
def validate_video(video_path, min_duration=30):
|
||
"""Validate video file and return duration"""
|
||
try:
|
||
clip = VideoFileClip(video_path)
|
||
duration = clip.duration
|
||
clip.close()
|
||
|
||
if duration < min_duration:
|
||
raise ValueError(f"Video is too short ({duration:.1f}s). Minimum {min_duration}s required.")
|
||
|
||
return duration
|
||
except Exception as e:
|
||
if "No such file" in str(e):
|
||
raise FileNotFoundError(f"Video file not found: {video_path}")
|
||
elif "could not open" in str(e).lower():
|
||
raise ValueError(f"Invalid or corrupted video file: {video_path}")
|
||
else:
|
||
raise ValueError(f"Error reading video: {str(e)}")
|
||
|
||
def generate_shorts(video_path, max_clips=3, output_folder="shorts", progress_callback=None,
|
||
detection_progress_callback=None, threshold_db=-30, clip_duration=5, detection_mode="loud"):
|
||
os.makedirs(output_folder, exist_ok=True)
|
||
|
||
# Validate video first
|
||
try:
|
||
video_duration = validate_video(video_path, min_duration=clip_duration * 2)
|
||
if progress_callback:
|
||
progress_callback(f"✅ Video validated ({video_duration:.1f}s)", 5)
|
||
except Exception as e:
|
||
if progress_callback:
|
||
progress_callback(f"❌ Video validation failed", 0)
|
||
raise e
|
||
|
||
# Choose detection method based on mode
|
||
if detection_mode == "loud":
|
||
if progress_callback:
|
||
progress_callback("🔍 Analyzing audio for loud moments...", 10)
|
||
best_moments = detect_loud_moments(video_path, chunk_duration=clip_duration, threshold_db=threshold_db)
|
||
if progress_callback:
|
||
progress_callback("🔍 Loud moments analysis complete", 35)
|
||
elif detection_mode == "scene":
|
||
if progress_callback:
|
||
progress_callback("🎬 Starting scene analysis...", 10)
|
||
best_moments = detect_scene_changes_with_progress(video_path, chunk_duration=clip_duration,
|
||
progress_callback=detection_progress_callback)
|
||
if progress_callback:
|
||
progress_callback("🎬 Scene analysis complete", 35)
|
||
elif detection_mode == "motion":
|
||
if progress_callback:
|
||
progress_callback("🏃 Starting motion analysis...", 10)
|
||
best_moments = detect_motion_intensity_with_progress(video_path, chunk_duration=clip_duration,
|
||
progress_callback=detection_progress_callback)
|
||
if progress_callback:
|
||
progress_callback("🏃 Motion analysis complete", 35)
|
||
elif detection_mode == "speech":
|
||
if progress_callback:
|
||
progress_callback("😄 Starting speech analysis...", 10)
|
||
best_moments = detect_speech_emotion_with_progress(video_path, chunk_duration=clip_duration,
|
||
progress_callback=detection_progress_callback)
|
||
if progress_callback:
|
||
progress_callback("😄 Speech analysis complete", 35)
|
||
elif detection_mode == "peaks":
|
||
if progress_callback:
|
||
progress_callback("🎵 Starting audio peak analysis...", 10)
|
||
best_moments = detect_audio_peaks_with_progress(video_path, chunk_duration=clip_duration,
|
||
progress_callback=detection_progress_callback)
|
||
if progress_callback:
|
||
progress_callback("🎵 Audio peak analysis complete", 35)
|
||
elif detection_mode == "combined":
|
||
if progress_callback:
|
||
progress_callback("🎯 Starting comprehensive analysis...", 10)
|
||
best_moments = detect_combined_intensity_with_progress(video_path, chunk_duration=clip_duration,
|
||
progress_callback=detection_progress_callback)
|
||
if progress_callback:
|
||
progress_callback("🎯 Comprehensive analysis complete", 35)
|
||
else:
|
||
best_moments = detect_loud_moments(video_path, chunk_duration=clip_duration, threshold_db=threshold_db)
|
||
if progress_callback:
|
||
progress_callback("🔍 Analysis complete", 35)
|
||
|
||
selected = best_moments[:max_clips]
|
||
|
||
if not selected:
|
||
mode_name = {
|
||
"loud": "loud moments", "scene": "scene changes", "motion": "motion intensity",
|
||
"speech": "emotional speech", "peaks": "audio peaks", "combined": "interesting moments"
|
||
}.get(detection_mode, "moments")
|
||
raise ValueError(f"No {mode_name} found. Try a different detection mode or adjust settings.")
|
||
|
||
if progress_callback:
|
||
progress_callback(f"📊 Found {len(selected)} clips to generate", 20)
|
||
|
||
for i, (start, end) in enumerate(selected):
|
||
if progress_callback:
|
||
progress_callback(f"🗣️ Transcribing clip {i+1}/{len(selected)}", 30 + (i * 20))
|
||
|
||
subtitles = transcribe_and_extract_subtitles(video_path, start, end)
|
||
out_path = os.path.join(output_folder, f"short_{i+1}.mp4")
|
||
|
||
if progress_callback:
|
||
progress_callback(f"🎬 Creating video {i+1}/{len(selected)}", 50 + (i * 20))
|
||
|
||
create_short_clip(video_path, start, end, subtitles, out_path)
|
||
|
||
if progress_callback:
|
||
progress_callback("✅ All shorts generated successfully!", 100)
|
||
|
||
# Video Editing Tools
|
||
class VideoEditor:
|
||
"""Professional video editing tools for generated shorts"""
|
||
|
||
def __init__(self, video_path=None):
|
||
"""Initialize video editor with optional video file"""
|
||
self.original_video_path = video_path
|
||
self.video_clip = None
|
||
self.effects = []
|
||
|
||
if video_path:
|
||
self.load_video(video_path)
|
||
|
||
def load_video(self, video_path):
|
||
"""Load a video file for editing"""
|
||
if self.video_clip:
|
||
self.video_clip.close()
|
||
|
||
self.original_video_path = video_path
|
||
self.video_clip = VideoFileClip(video_path)
|
||
self.effects = []
|
||
print(f"📺 Loaded video: {os.path.basename(video_path)}")
|
||
|
||
def reset(self):
|
||
"""Reset to original video, removing all effects"""
|
||
if self.original_video_path:
|
||
self.load_video(self.original_video_path)
|
||
print("🔄 Video reset to original state")
|
||
|
||
def export(self, output_path, quality="medium", progress_callback=None):
|
||
"""Export the final edited video"""
|
||
if not self.video_clip:
|
||
raise Exception("No video loaded!")
|
||
|
||
# Quality settings
|
||
quality_settings = {
|
||
"low": {"bitrate": "500k", "audio_bitrate": "128k"},
|
||
"medium": {"bitrate": "1M", "audio_bitrate": "192k"},
|
||
"high": {"bitrate": "2M", "audio_bitrate": "320k"}
|
||
}
|
||
|
||
settings = quality_settings.get(quality, quality_settings["medium"])
|
||
|
||
# Export with progress callback
|
||
if progress_callback:
|
||
self.video_clip.write_videofile(
|
||
output_path,
|
||
codec="libx264",
|
||
audio_codec="aac",
|
||
bitrate=settings["bitrate"],
|
||
audio_bitrate=settings["audio_bitrate"],
|
||
verbose=False,
|
||
logger=None
|
||
)
|
||
else:
|
||
self.video_clip.write_videofile(
|
||
output_path,
|
||
codec="libx264",
|
||
audio_codec="aac",
|
||
bitrate=settings["bitrate"],
|
||
audio_bitrate=settings["audio_bitrate"]
|
||
)
|
||
|
||
@staticmethod
|
||
def trim_video(video_path, start_time, end_time, output_path):
|
||
"""Trim video to specific time range"""
|
||
clip = VideoFileClip(video_path)
|
||
trimmed = clip.subclipped(start_time, end_time)
|
||
trimmed.write_videofile(output_path, codec="libx264", audio_codec="aac")
|
||
clip.close()
|
||
trimmed.close()
|
||
|
||
@staticmethod
|
||
def adjust_speed(video_path, speed_factor, output_path):
|
||
"""Change video playback speed (0.5 = half speed, 2.0 = double speed)"""
|
||
clip = VideoFileClip(video_path)
|
||
if speed_factor > 1:
|
||
# Speed up
|
||
speeded = clip.with_fps(clip.fps * speed_factor).subclipped(0, clip.duration / speed_factor)
|
||
else:
|
||
# Slow down
|
||
speeded = clip.with_fps(clip.fps * speed_factor)
|
||
speeded.write_videofile(output_path, codec="libx264", audio_codec="aac")
|
||
clip.close()
|
||
speeded.close()
|
||
|
||
@staticmethod
|
||
def add_fade_effects(video_path, fade_in_duration=1.0, fade_out_duration=1.0, output_path=None):
|
||
"""Add fade in/out effects"""
|
||
clip = VideoFileClip(video_path)
|
||
|
||
# Apply fade effects
|
||
final_clip = clip
|
||
if fade_in_duration > 0:
|
||
final_clip = final_clip.with_effects([FadeIn(fade_in_duration)])
|
||
if fade_out_duration > 0:
|
||
final_clip = final_clip.with_effects([FadeOut(fade_out_duration)])
|
||
|
||
if not output_path:
|
||
output_path = video_path.replace('.mp4', '_faded.mp4')
|
||
|
||
final_clip.write_videofile(output_path, codec="libx264", audio_codec="aac")
|
||
clip.close()
|
||
final_clip.close()
|
||
return output_path
|
||
|
||
@staticmethod
|
||
def adjust_volume(video_path, volume_factor, output_path=None):
|
||
"""Adjust audio volume (1.0 = normal, 0.5 = half volume, 2.0 = double volume)"""
|
||
clip = VideoFileClip(video_path)
|
||
|
||
if clip.audio:
|
||
audio_adjusted = clip.audio.with_effects([MultiplyVolume(volume_factor)])
|
||
final_clip = clip.with_audio(audio_adjusted)
|
||
else:
|
||
final_clip = clip
|
||
|
||
if not output_path:
|
||
output_path = video_path.replace('.mp4', '_volume_adjusted.mp4')
|
||
|
||
final_clip.write_videofile(output_path, codec="libx264", audio_codec="aac")
|
||
clip.close()
|
||
final_clip.close()
|
||
return output_path
|
||
|
||
@staticmethod
|
||
def resize_video(video_path, width, height, output_path=None):
|
||
"""Resize video to specific dimensions"""
|
||
clip = VideoFileClip(video_path)
|
||
resized = clip.resized((width, height))
|
||
|
||
if not output_path:
|
||
output_path = video_path.replace('.mp4', f'_resized_{width}x{height}.mp4')
|
||
|
||
resized.write_videofile(output_path, codec="libx264", audio_codec="aac")
|
||
clip.close()
|
||
resized.close()
|
||
return output_path
|
||
|
||
@staticmethod
|
||
def crop_video(video_path, x1, y1, x2, y2, output_path=None):
|
||
"""Crop video to specific coordinates"""
|
||
clip = VideoFileClip(video_path)
|
||
cropped = clip.cropped(x1=x1, y1=y1, x2=x2, y2=y2)
|
||
|
||
if not output_path:
|
||
output_path = video_path.replace('.mp4', '_cropped.mp4')
|
||
|
||
cropped.write_videofile(output_path, codec="libx264", audio_codec="aac")
|
||
clip.close()
|
||
cropped.close()
|
||
return output_path
|
||
|
||
@staticmethod
|
||
def add_text_overlay(video_path, text, position=('center', 'bottom'),
|
||
duration=None, start_time=0, font_size=50, color='white', output_path=None):
|
||
"""Add text overlay to video (optimized for speed)"""
|
||
print(f"🎬 Adding text overlay: '{text}'...")
|
||
|
||
clip = VideoFileClip(video_path)
|
||
|
||
if duration is None:
|
||
duration = clip.duration - start_time
|
||
|
||
# Optimize text creation - use smaller cache and faster rendering
|
||
try:
|
||
# Try using a more efficient text creation method
|
||
text_clip = TextClip(
|
||
text,
|
||
font_size=font_size,
|
||
color=color,
|
||
stroke_color='black',
|
||
stroke_width=2,
|
||
method='caption', # Faster rendering method
|
||
size=(clip.w * 0.8, None) # Limit width to prevent huge text
|
||
)
|
||
|
||
print(f"📝 Text clip created successfully...")
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Using fallback text method: {e}")
|
||
# Fallback to basic text creation
|
||
text_clip = TextClip(
|
||
text,
|
||
font_size=font_size,
|
||
color=color,
|
||
stroke_color='black',
|
||
stroke_width=2
|
||
)
|
||
|
||
# Set timing and position
|
||
text_clip = text_clip.with_start(start_time).with_end(start_time + duration).with_position(position)
|
||
|
||
print(f"⏱️ Compositing video with text overlay...")
|
||
|
||
# Optimize composition with reduced quality for faster processing
|
||
final_video = CompositeVideoClip([clip, text_clip])
|
||
|
||
if not output_path:
|
||
output_path = video_path.replace('.mp4', '_with_text.mp4')
|
||
|
||
print(f"💾 Saving video to: {output_path}")
|
||
|
||
# Optimize output settings for faster processing
|
||
try:
|
||
# Try with all optimization parameters (newer MoviePy)
|
||
final_video.write_videofile(
|
||
output_path,
|
||
codec="libx264",
|
||
audio_codec="aac",
|
||
temp_audiofile='temp-audio.m4a',
|
||
remove_temp=True,
|
||
verbose=False, # Reduce console output
|
||
logger=None, # Disable logging for speed
|
||
preset='ultrafast', # Fastest encoding preset
|
||
threads=4 # Use multiple threads
|
||
)
|
||
except TypeError:
|
||
# Fallback for older MoviePy versions
|
||
final_video.write_videofile(
|
||
output_path,
|
||
codec="libx264",
|
||
audio_codec="aac",
|
||
temp_audiofile='temp-audio.m4a',
|
||
remove_temp=True,
|
||
preset='ultrafast', # Fastest encoding preset
|
||
threads=4 # Use multiple threads
|
||
)
|
||
|
||
# Clean up
|
||
clip.close()
|
||
text_clip.close()
|
||
final_video.close()
|
||
|
||
def add_blur_effect(self, blur_strength=2.0):
|
||
"""Add blur effect to current video"""
|
||
if not self.video_clip:
|
||
raise Exception("No video loaded!")
|
||
|
||
def blur_frame(get_frame, t):
|
||
frame = get_frame(t)
|
||
# Convert to uint8 if needed
|
||
if frame.dtype != np.uint8:
|
||
frame = (frame * 255).astype(np.uint8)
|
||
blurred = cv2.GaussianBlur(frame, (15, 15), blur_strength)
|
||
return blurred
|
||
|
||
self.video_clip = self.video_clip.transform(blur_frame)
|
||
self.effects.append(f"blur({blur_strength})")
|
||
print(f"🌫️ Applied blur effect (strength: {blur_strength})")
|
||
|
||
def add_color_effect(self, effect_type="sepia"):
|
||
"""Add color effects: sepia, grayscale, vintage, etc."""
|
||
if not self.video_clip:
|
||
raise Exception("No video loaded!")
|
||
|
||
def apply_color_effect(get_frame, t):
|
||
frame = get_frame(t)
|
||
if frame.dtype != np.uint8:
|
||
frame = (frame * 255).astype(np.uint8)
|
||
|
||
if effect_type == "grayscale":
|
||
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
|
||
return cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
|
||
elif effect_type == "sepia":
|
||
# Sepia transformation matrix
|
||
sepia_filter = np.array([[0.393, 0.769, 0.189],
|
||
[0.349, 0.686, 0.168],
|
||
[0.272, 0.534, 0.131]])
|
||
sepia_img = frame.dot(sepia_filter.T)
|
||
sepia_img = np.clip(sepia_img, 0, 255)
|
||
return sepia_img.astype(np.uint8)
|
||
elif effect_type == "vintage":
|
||
# Vintage effect (warm + slight vignette)
|
||
frame = frame.astype(np.float32)
|
||
frame[:,:,0] *= 1.2 # Increase red
|
||
frame[:,:,1] *= 1.1 # Slightly increase green
|
||
frame[:,:,2] *= 0.9 # Decrease blue
|
||
return np.clip(frame, 0, 255).astype(np.uint8)
|
||
elif effect_type == "cool":
|
||
# Cool effect (more blue)
|
||
frame = frame.astype(np.float32)
|
||
frame[:,:,0] *= 0.9 # Decrease red
|
||
frame[:,:,1] *= 1.0 # Keep green
|
||
frame[:,:,2] *= 1.3 # Increase blue
|
||
return np.clip(frame, 0, 255).astype(np.uint8)
|
||
return frame
|
||
|
||
self.video_clip = self.video_clip.transform(apply_color_effect)
|
||
self.effects.append(f"color({effect_type})")
|
||
print(f"🎨 Applied color effect: {effect_type}")
|
||
|
||
def add_zoom_effect(self, zoom_factor=1.5, zoom_type="zoom_in"):
|
||
"""Add zoom in/out effect"""
|
||
if not self.video_clip:
|
||
raise Exception("No video loaded!")
|
||
|
||
def zoom_frame(get_frame, t):
|
||
frame = get_frame(t)
|
||
h, w = frame.shape[:2]
|
||
|
||
if zoom_type == "zoom_in":
|
||
progress = t / self.video_clip.duration
|
||
current_zoom = 1.0 + (zoom_factor - 1.0) * progress
|
||
elif zoom_type == "zoom_out":
|
||
progress = t / self.video_clip.duration
|
||
current_zoom = zoom_factor - (zoom_factor - 1.0) * progress
|
||
else: # static zoom
|
||
current_zoom = zoom_factor
|
||
|
||
# Calculate crop region
|
||
new_h, new_w = int(h / current_zoom), int(w / current_zoom)
|
||
start_x = (w - new_w) // 2
|
||
start_y = (h - new_h) // 2
|
||
|
||
# Calculate crop dimensions for zoom
|
||
new_w = int(w / current_zoom)
|
||
new_h = int(h / current_zoom)
|
||
start_x = (w - new_w) // 2
|
||
start_y = (h - new_h) // 2
|
||
|
||
# Crop and resize
|
||
cropped = frame[start_y:start_y + new_h, start_x:start_x + new_w]
|
||
zoomed = cv2.resize(cropped, (w, h), interpolation=cv2.INTER_CUBIC)
|
||
|
||
return zoomed
|
||
|
||
self.video_clip = self.video_clip.transform(zoom_frame)
|
||
self.effects.append(f"zoom({zoom_type}, {zoom_factor})")
|
||
print(f"🔍 Applied zoom effect: {zoom_type} (factor: {zoom_factor})")
|
||
|
||
def add_rotation_effect(self, angle=0, rotation_type="static"):
|
||
"""Add rotation effect"""
|
||
if not self.video_clip:
|
||
raise Exception("No video loaded!")
|
||
|
||
def rotate_frame(get_frame, t):
|
||
frame = get_frame(t)
|
||
h, w = frame.shape[:2]
|
||
|
||
if rotation_type == "spinning":
|
||
# Continuous rotation
|
||
current_angle = (angle * t * 360 / self.video_clip.duration) % 360
|
||
else: # static rotation
|
||
current_angle = angle
|
||
|
||
# Rotation matrix
|
||
center = (w // 2, h // 2)
|
||
matrix = cv2.getRotationMatrix2D(center, current_angle, 1.0)
|
||
rotated = cv2.warpAffine(frame, matrix, (w, h), borderMode=cv2.BORDER_REFLECT)
|
||
|
||
return rotated
|
||
|
||
self.video_clip = self.video_clip.transform(rotate_frame)
|
||
self.effects.append(f"rotation({rotation_type}, {angle})")
|
||
print(f"🔄 Applied rotation effect: {rotation_type} (angle: {angle}°)")
|
||
|
||
def apply_trim(self, start_time, end_time):
|
||
"""Apply trim to current video"""
|
||
if not self.video_clip:
|
||
raise Exception("No video loaded!")
|
||
|
||
if start_time >= end_time:
|
||
raise Exception("Start time must be less than end time!")
|
||
|
||
if end_time > self.video_clip.duration:
|
||
raise Exception(f"End time cannot exceed video duration ({self.video_clip.duration:.1f}s)!")
|
||
|
||
self.video_clip = self.video_clip.subclipped(start_time, end_time)
|
||
self.effects.append(f"trim({start_time:.1f}s-{end_time:.1f}s)")
|
||
print(f"✂️ Applied trim: {start_time:.1f}s to {end_time:.1f}s")
|
||
|
||
def apply_speed(self, speed_factor):
|
||
"""Apply speed change to current video"""
|
||
if not self.video_clip:
|
||
raise Exception("No video loaded!")
|
||
|
||
if speed_factor <= 0:
|
||
raise Exception("Speed factor must be greater than 0!")
|
||
|
||
if speed_factor > 1:
|
||
# Speed up
|
||
self.video_clip = self.video_clip.with_fps(self.video_clip.fps * speed_factor).subclipped(0, self.video_clip.duration / speed_factor)
|
||
else:
|
||
# Slow down
|
||
self.video_clip = self.video_clip.with_fps(self.video_clip.fps * speed_factor)
|
||
|
||
self.effects.append(f"speed({speed_factor:.1f}x)")
|
||
print(f"⚡ Applied speed change: {speed_factor:.1f}x")
|
||
|
||
def apply_fade_effects(self, fade_in_duration=1.0, fade_out_duration=1.0):
|
||
"""Apply fade in/out effects to current video"""
|
||
if not self.video_clip:
|
||
raise Exception("No video loaded!")
|
||
|
||
from moviepy.video.fx import FadeIn, FadeOut
|
||
|
||
if fade_in_duration > 0:
|
||
self.video_clip = self.video_clip.with_effects([FadeIn(fade_in_duration)])
|
||
|
||
if fade_out_duration > 0:
|
||
self.video_clip = self.video_clip.with_effects([FadeOut(fade_out_duration)])
|
||
|
||
self.effects.append(f"fade(in:{fade_in_duration:.1f}s, out:{fade_out_duration:.1f}s)")
|
||
print(f"🌅 Applied fade effects: in {fade_in_duration:.1f}s, out {fade_out_duration:.1f}s")
|
||
|
||
def apply_volume(self, volume_factor):
|
||
"""Apply volume adjustment to current video"""
|
||
if not self.video_clip:
|
||
raise Exception("No video loaded!")
|
||
|
||
if not self.video_clip.audio:
|
||
raise Exception("Video has no audio track!")
|
||
|
||
from moviepy.audio.fx import MultiplyVolume
|
||
|
||
self.video_clip = self.video_clip.with_effects([MultiplyVolume(volume_factor)])
|
||
self.effects.append(f"volume({volume_factor:.1f}x)")
|
||
print(f"🔊 Applied volume adjustment: {volume_factor:.1f}x")
|
||
|
||
def apply_resize(self, width, height):
|
||
"""Apply resize to current video"""
|
||
if not self.video_clip:
|
||
raise Exception("No video loaded!")
|
||
|
||
if width < 1 or height < 1:
|
||
raise Exception("Width and height must be positive!")
|
||
|
||
from moviepy.video.fx import Resize
|
||
|
||
self.video_clip = self.video_clip.with_effects([Resize((width, height))])
|
||
self.effects.append(f"resize({width}x{height})")
|
||
print(f"📐 Applied resize: {width}x{height}")
|
||
|
||
def apply_text_overlay_to_current(self, text, position=('center', 'bottom'), font_size=50, color='white', method='fast'):
|
||
"""Apply text overlay to current video"""
|
||
if not self.video_clip:
|
||
raise Exception("No video loaded!")
|
||
|
||
if method == 'fast':
|
||
# Use the fast PIL-based method
|
||
self._apply_text_overlay_fast_to_current(text, position, font_size, color)
|
||
else:
|
||
# Use MoviePy method for higher quality
|
||
self._apply_text_overlay_quality_to_current(text, position, font_size, color)
|
||
|
||
self.effects.append(f"text('{text[:20]}...', {position}, {font_size}px)")
|
||
print(f"📝 Applied text overlay: '{text[:30]}...'")
|
||
|
||
def _apply_text_overlay_fast_to_current(self, text, position, font_size, color):
|
||
"""Fast PIL-based text overlay to current video"""
|
||
from PIL import Image, ImageDraw, ImageFont
|
||
|
||
def add_text_to_frame(get_frame, t):
|
||
frame = get_frame(t)
|
||
# Convert to PIL Image
|
||
pil_image = Image.fromarray(frame)
|
||
draw = ImageDraw.Draw(pil_image)
|
||
|
||
# Calculate position
|
||
w, h = pil_image.size
|
||
x_pos, y_pos = self._calculate_text_position(position, w, h, text, font_size)
|
||
|
||
# Draw text with outline for better visibility
|
||
try:
|
||
font = ImageFont.truetype("arial.ttf", font_size)
|
||
except:
|
||
font = ImageFont.load_default()
|
||
|
||
# Draw outline
|
||
for adj in range(-2, 3):
|
||
for adj2 in range(-2, 3):
|
||
draw.text((x_pos + adj, y_pos + adj2), text, font=font, fill='black')
|
||
|
||
# Draw main text
|
||
draw.text((x_pos, y_pos), text, font=font, fill=color)
|
||
|
||
return np.array(pil_image)
|
||
|
||
self.video_clip = self.video_clip.transform(add_text_to_frame)
|
||
|
||
def _apply_text_overlay_quality_to_current(self, text, position, font_size, color):
|
||
"""High quality MoviePy-based text overlay to current video"""
|
||
from moviepy.editor import TextClip, CompositeVideoClip
|
||
|
||
text_clip = TextClip(text, fontsize=font_size, color=color, font='Arial-Bold')
|
||
text_clip = text_clip.with_duration(self.video_clip.duration)
|
||
|
||
# Set position
|
||
if position == ('center', 'center'):
|
||
text_clip = text_clip.with_position('center')
|
||
elif position == ('center', 'bottom'):
|
||
text_clip = text_clip.with_position(('center', 'bottom'))
|
||
elif position == ('center', 'top'):
|
||
text_clip = text_clip.with_position(('center', 'top'))
|
||
else:
|
||
text_clip = text_clip.with_position(position)
|
||
|
||
self.video_clip = CompositeVideoClip([self.video_clip, text_clip])
|
||
|
||
def _calculate_text_position(self, position, width, height, text, font_size):
|
||
"""Calculate text position based on position tuple"""
|
||
# Estimate text dimensions (rough calculation)
|
||
text_width = len(text) * font_size * 0.6
|
||
text_height = font_size
|
||
|
||
x_pos, y_pos = position
|
||
|
||
if x_pos == 'center':
|
||
x_pos = (width - text_width) // 2
|
||
elif x_pos == 'left':
|
||
x_pos = 50
|
||
elif x_pos == 'right':
|
||
x_pos = width - text_width - 50
|
||
|
||
if y_pos == 'center':
|
||
y_pos = (height - text_height) // 2
|
||
elif y_pos == 'top':
|
||
y_pos = 50
|
||
elif y_pos == 'bottom':
|
||
y_pos = height - text_height - 50
|
||
|
||
return int(x_pos), int(y_pos)
|
||
|
||
@staticmethod
|
||
def add_text_overlay_fast(video_path, text, position=('center', 'bottom'),
|
||
font_size=50, color='white', output_path=None):
|
||
"""Ultra-fast text overlay using PIL (for simple text only)"""
|
||
try:
|
||
from PIL import Image, ImageDraw, ImageFont
|
||
import cv2
|
||
|
||
print(f"🚀 Using fast text overlay method...")
|
||
|
||
# Read video with OpenCV for faster processing
|
||
cap = cv2.VideoCapture(video_path)
|
||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||
|
||
if not output_path:
|
||
output_path = video_path.replace('.mp4', '_with_text_fast.mp4')
|
||
|
||
# Set up video writer
|
||
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
||
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
||
|
||
frame_count = 0
|
||
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||
|
||
# Calculate text position
|
||
if position == ('center', 'bottom'):
|
||
text_x, text_y = width // 2, height - 100
|
||
elif position == ('center', 'top'):
|
||
text_x, text_y = width // 2, 100
|
||
elif position == ('center', 'center'):
|
||
text_x, text_y = width // 2, height // 2
|
||
else:
|
||
text_x, text_y = width // 2, height - 100 # Default
|
||
|
||
print(f"📹 Processing {total_frames} frames...")
|
||
|
||
while True:
|
||
ret, frame = cap.read()
|
||
if not ret:
|
||
break
|
||
|
||
# Convert BGR to RGB for PIL
|
||
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||
pil_image = Image.fromarray(frame_rgb)
|
||
draw = ImageDraw.Draw(pil_image)
|
||
|
||
# Try to use a system font, fallback to default
|
||
try:
|
||
font = ImageFont.truetype("arial.ttf", font_size)
|
||
except:
|
||
try:
|
||
font = ImageFont.truetype("calibri.ttf", font_size)
|
||
except:
|
||
try:
|
||
font = ImageFont.truetype("tahoma.ttf", font_size)
|
||
except:
|
||
font = ImageFont.load_default()
|
||
print(f"📝 Using default font (system fonts not found)")
|
||
|
||
# Add text with outline effect (centered text)
|
||
text_bbox = draw.textbbox((0, 0), text, font=font)
|
||
text_width = text_bbox[2] - text_bbox[0]
|
||
text_height = text_bbox[3] - text_bbox[1]
|
||
|
||
# Center the text properly
|
||
centered_x = text_x - (text_width // 2)
|
||
centered_y = text_y - (text_height // 2)
|
||
|
||
outline_width = 2
|
||
for adj_x in range(-outline_width, outline_width + 1):
|
||
for adj_y in range(-outline_width, outline_width + 1):
|
||
draw.text((centered_x + adj_x, centered_y + adj_y), text, font=font, fill='black')
|
||
|
||
# Add main text
|
||
draw.text((centered_x, centered_y), text, font=font, fill=color)
|
||
|
||
# Convert back to BGR for OpenCV
|
||
frame_with_text = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
|
||
out.write(frame_with_text)
|
||
|
||
frame_count += 1
|
||
if frame_count % 30 == 0: # Progress every 30 frames
|
||
progress = (frame_count / total_frames) * 100
|
||
print(f"🎬 Progress: {progress:.1f}%")
|
||
|
||
cap.release()
|
||
out.release()
|
||
|
||
# Add audio back using MoviePy (faster than re-encoding everything)
|
||
print(f"🔊 Adding audio track...")
|
||
video_with_audio = VideoFileClip(video_path)
|
||
video_with_text = VideoFileClip(output_path)
|
||
final_video = video_with_text.with_audio(video_with_audio.audio)
|
||
|
||
temp_output = output_path.replace('.mp4', '_temp.mp4')
|
||
try:
|
||
# Try with verbose parameter (newer MoviePy)
|
||
final_video.write_videofile(temp_output, codec="libx264", audio_codec="aac",
|
||
verbose=False, logger=None)
|
||
except TypeError:
|
||
# Fallback for older MoviePy versions without verbose parameter
|
||
final_video.write_videofile(temp_output, codec="libx264", audio_codec="aac")
|
||
|
||
# Replace original with final version
|
||
import os
|
||
os.remove(output_path)
|
||
os.rename(temp_output, output_path)
|
||
|
||
video_with_audio.close()
|
||
video_with_text.close()
|
||
final_video.close()
|
||
|
||
print(f"✅ Fast text overlay completed!")
|
||
return output_path
|
||
|
||
except ImportError:
|
||
print(f"⚠️ PIL not available, falling back to MoviePy method...")
|
||
return VideoEditor.add_text_overlay(video_path, text, position,
|
||
font_size=font_size, color=color, output_path=output_path)
|
||
except Exception as e:
|
||
print(f"⚠️ Fast method failed ({e}), falling back to MoviePy...")
|
||
return VideoEditor.add_text_overlay(video_path, text, position,
|
||
font_size=font_size, color=color, output_path=output_path)
|
||
|
||
@staticmethod
|
||
def get_video_info(video_path):
|
||
"""Get basic video information"""
|
||
clip = VideoFileClip(video_path)
|
||
info = {
|
||
'duration': clip.duration,
|
||
'fps': clip.fps,
|
||
'size': clip.size,
|
||
'has_audio': clip.audio is not None
|
||
}
|
||
clip.close()
|
||
return info
|
||
|
||
# Post-Generation Editing Interface
|
||
class ShortsEditorGUI:
|
||
"""Interface for editing generated shorts"""
|
||
|
||
def __init__(self, parent, shorts_folder="shorts"):
|
||
self.parent = parent
|
||
self.shorts_folder = shorts_folder
|
||
self.current_video = None
|
||
self.video_info = None
|
||
self.editor_window = None
|
||
|
||
def open_editor(self):
|
||
"""Open the shorts editing interface"""
|
||
# Find available shorts
|
||
shorts_files = glob.glob(os.path.join(self.shorts_folder, "*.mp4"))
|
||
|
||
if not shorts_files:
|
||
messagebox.showinfo("No Shorts Found",
|
||
f"No video files found in '{self.shorts_folder}' folder.\nGenerate some shorts first!")
|
||
return
|
||
|
||
# Create editor window
|
||
self.editor_window = tk.Toplevel(self.parent)
|
||
self.editor_window.title("🎬 Shorts Editor - Professional Video Editing")
|
||
self.editor_window.geometry("800x700")
|
||
self.editor_window.minsize(600, 500) # Set minimum size
|
||
self.editor_window.resizable(True, True)
|
||
self.editor_window.transient(self.parent)
|
||
|
||
# Make window responsive
|
||
self.editor_window.rowconfigure(1, weight=1)
|
||
self.editor_window.columnconfigure(0, weight=1)
|
||
|
||
# Bind resize event
|
||
self.editor_window.bind('<Configure>', self.on_editor_resize)
|
||
|
||
self.create_editor_interface(shorts_files)
|
||
|
||
def on_editor_resize(self, event):
|
||
"""Handle editor window resize events"""
|
||
if event.widget == self.editor_window:
|
||
# Get current window size
|
||
width = self.editor_window.winfo_width()
|
||
height = self.editor_window.winfo_height()
|
||
|
||
# Adjust layout based on size - for very small windows, stack vertically
|
||
if width < 700:
|
||
# Switch to vertical layout for smaller windows
|
||
try:
|
||
# This would require more significant layout changes
|
||
# For now, just ensure minimum functionality
|
||
pass
|
||
except:
|
||
pass
|
||
|
||
def create_editor_interface(self, shorts_files):
|
||
"""Create the main editor interface with video player"""
|
||
# Title
|
||
title_frame = tk.Frame(self.editor_window)
|
||
title_frame.grid(row=0, column=0, padx=20, pady=10, sticky="ew")
|
||
|
||
tk.Label(title_frame, text="🎬 Professional Shorts Editor",
|
||
font=("Arial", 16, "bold")).pack()
|
||
tk.Label(title_frame, text="Select and edit your generated shorts with professional tools + Real-time Preview",
|
||
font=("Arial", 10), fg="gray").pack()
|
||
|
||
# Main content frame
|
||
main_frame = tk.Frame(self.editor_window)
|
||
main_frame.grid(row=1, column=0, padx=20, pady=10, sticky="nsew")
|
||
main_frame.rowconfigure(0, weight=1)
|
||
main_frame.columnconfigure(1, weight=1)
|
||
|
||
# Left panel - Video selection and info
|
||
left_panel = tk.Frame(main_frame)
|
||
left_panel.grid(row=0, column=0, sticky="nsew", padx=(0, 10))
|
||
left_panel.rowconfigure(1, weight=1)
|
||
|
||
# Video selection frame
|
||
selection_frame = tk.LabelFrame(left_panel, text="📁 Select Short to Edit", padx=10, pady=10)
|
||
selection_frame.grid(row=0, column=0, pady=(0, 10), sticky="ew")
|
||
|
||
# Video list with preview info
|
||
list_frame = tk.Frame(selection_frame)
|
||
list_frame.pack(fill="x")
|
||
|
||
tk.Label(list_frame, text="Available Shorts:", font=("Arial", 10, "bold")).pack(anchor="w")
|
||
|
||
# Listbox with scrollbar
|
||
list_container = tk.Frame(list_frame)
|
||
list_container.pack(fill="x", pady=5)
|
||
|
||
self.video_listbox = tk.Listbox(list_container, height=4, font=("Courier", 9), width=50)
|
||
scrollbar = tk.Scrollbar(list_container, orient="vertical")
|
||
self.video_listbox.config(yscrollcommand=scrollbar.set)
|
||
scrollbar.config(command=self.video_listbox.yview)
|
||
|
||
self.video_listbox.pack(side="left", fill="both", expand=True)
|
||
scrollbar.pack(side="right", fill="y")
|
||
|
||
# Populate video list with file info
|
||
self.video_files = []
|
||
for video_file in sorted(shorts_files):
|
||
try:
|
||
info = VideoEditor.get_video_info(video_file)
|
||
filename = os.path.basename(video_file)
|
||
size_mb = os.path.getsize(video_file) / (1024 * 1024)
|
||
display_text = f"{filename:<20} │ {info['duration']:.1f}s │ {info['size'][0]}x{info['size'][1]} │ {size_mb:.1f}MB"
|
||
self.video_listbox.insert(tk.END, display_text)
|
||
self.video_files.append(video_file)
|
||
except Exception as e:
|
||
print(f"Error reading {video_file}: {e}")
|
||
|
||
# Video player frame (center)
|
||
player_frame = tk.Frame(main_frame)
|
||
player_frame.pack(side="left", fill="both", expand=True, padx=10)
|
||
|
||
# Video player
|
||
self.create_video_player(player_frame)
|
||
|
||
# Video selection handler
|
||
def on_video_select(event):
|
||
selection = self.video_listbox.curselection()
|
||
if selection:
|
||
self.current_video = self.video_files[selection[0]]
|
||
self.video_info = VideoEditor.get_video_info(self.current_video)
|
||
self.update_video_info()
|
||
self.enable_editing_tools()
|
||
self.load_video_in_player()
|
||
|
||
self.video_listbox.bind("<<ListboxSelect>>", on_video_select)
|
||
|
||
# Current video info
|
||
self.info_frame = tk.LabelFrame(left_panel, text="📊 Video Information", padx=10, pady=10)
|
||
self.info_frame.pack(fill="x", pady=(0, 10))
|
||
|
||
self.info_label = tk.Label(self.info_frame, text="Select a video to see details",
|
||
font=("Courier", 9), justify="left")
|
||
self.info_label.pack(anchor="w")
|
||
|
||
# Editing tools frame (right panel)
|
||
self.tools_frame = tk.LabelFrame(main_frame, text="🛠️ Professional Editing Tools", padx=10, pady=10)
|
||
self.tools_frame.pack(side="right", fill="y", padx=(10, 0))
|
||
|
||
self.create_editing_tools()
|
||
|
||
# Output and action buttons
|
||
action_frame = tk.Frame(self.editor_window)
|
||
action_frame.pack(fill="x", padx=20, pady=10)
|
||
|
||
# Output folder selection
|
||
output_folder_frame = tk.Frame(action_frame)
|
||
output_folder_frame.pack(fill="x", pady=5)
|
||
|
||
tk.Label(output_folder_frame, text="Output Folder:", font=("Arial", 9, "bold")).pack(side="left")
|
||
self.output_folder = tk.StringVar(value=os.path.join(self.shorts_folder, "edited"))
|
||
output_entry = tk.Entry(output_folder_frame, textvariable=self.output_folder, width=40)
|
||
output_entry.pack(side="left", padx=(10, 5))
|
||
|
||
tk.Button(output_folder_frame, text="Browse",
|
||
command=self.select_output_folder).pack(side="left")
|
||
|
||
# Action buttons
|
||
button_frame = tk.Frame(action_frame)
|
||
button_frame.pack(fill="x", pady=10)
|
||
|
||
tk.Button(button_frame, text="🔄 Refresh List",
|
||
command=self.refresh_video_list, bg="#2196F3", fg="white").pack(side="left", padx=5)
|
||
|
||
tk.Button(button_frame, text="📂 Open Shorts Folder",
|
||
command=self.open_shorts_folder, bg="#FF9800", fg="white").pack(side="left", padx=5)
|
||
|
||
tk.Button(button_frame, text="❌ Close Editor",
|
||
command=self.close_editor, bg="#F44336", fg="white").pack(side="right", padx=5)
|
||
|
||
def create_video_player(self, parent_frame):
|
||
"""Create the video player with timeline controls"""
|
||
player_label_frame = tk.LabelFrame(parent_frame, text="🎥 Real-time Video Player", padx=10, pady=10)
|
||
player_label_frame.pack(fill="both", expand=True)
|
||
|
||
# Video display canvas
|
||
self.video_canvas = tk.Canvas(player_label_frame, width=400, height=300, bg="black", relief="sunken", bd=2)
|
||
self.video_canvas.pack(pady=10)
|
||
|
||
# Player controls frame
|
||
controls_frame = tk.Frame(player_label_frame)
|
||
controls_frame.pack(fill="x", pady=5)
|
||
|
||
# Timeline slider
|
||
timeline_frame = tk.Frame(controls_frame)
|
||
timeline_frame.pack(fill="x", pady=5)
|
||
|
||
tk.Label(timeline_frame, text="Timeline:", font=("Arial", 9, "bold")).pack(anchor="w")
|
||
|
||
self.timeline_var = tk.DoubleVar()
|
||
self.timeline_slider = tk.Scale(timeline_frame, from_=0, to=100, orient="horizontal",
|
||
variable=self.timeline_var, command=self.on_timeline_change,
|
||
length=380, resolution=0.1)
|
||
self.timeline_slider.pack(fill="x")
|
||
|
||
# Play controls
|
||
play_controls_frame = tk.Frame(controls_frame)
|
||
play_controls_frame.pack(pady=5)
|
||
|
||
self.play_button = tk.Button(play_controls_frame, text="▶️ Play", command=self.toggle_play,
|
||
font=("Arial", 10, "bold"), bg="#4CAF50", fg="white")
|
||
self.play_button.pack(side="left", padx=5)
|
||
|
||
tk.Button(play_controls_frame, text="⏹️ Stop", command=self.stop_video,
|
||
font=("Arial", 10, "bold"), bg="#F44336", fg="white").pack(side="left", padx=5)
|
||
|
||
tk.Button(play_controls_frame, text="⏪ -5s", command=lambda: self.seek_relative(-5),
|
||
font=("Arial", 9), bg="#FF9800", fg="white").pack(side="left", padx=2)
|
||
|
||
tk.Button(play_controls_frame, text="⏩ +5s", command=lambda: self.seek_relative(5),
|
||
font=("Arial", 9), bg="#FF9800", fg="white").pack(side="left", padx=2)
|
||
|
||
# Time display
|
||
self.time_label = tk.Label(controls_frame, text="00:00 / 00:00", font=("Arial", 10, "bold"))
|
||
self.time_label.pack(pady=5)
|
||
|
||
# Player state variables
|
||
self.current_clip = None
|
||
self.is_playing = False
|
||
self.current_time = 0.0
|
||
self.video_duration = 0.0
|
||
self.play_thread = None
|
||
self.last_frame_time = 0
|
||
|
||
def load_video_in_player(self):
|
||
"""Load the selected video in the player"""
|
||
if not self.current_video:
|
||
return
|
||
|
||
try:
|
||
# Close previous clip
|
||
if self.current_clip:
|
||
self.current_clip.close()
|
||
|
||
print(f"🎥 Loading video in player: {os.path.basename(self.current_video)}")
|
||
self.current_clip = VideoFileClip(self.current_video)
|
||
self.video_duration = self.current_clip.duration
|
||
|
||
# Update timeline
|
||
self.timeline_slider.config(to=self.video_duration)
|
||
self.timeline_var.set(0)
|
||
self.current_time = 0.0
|
||
|
||
# Display first frame
|
||
self.display_frame_at_time(0.0)
|
||
self.update_time_display()
|
||
|
||
print(f"✅ Video loaded successfully ({self.video_duration:.1f}s)")
|
||
|
||
except Exception as e:
|
||
print(f"❌ Error loading video: {e}")
|
||
messagebox.showerror("Video Error", f"Failed to load video:\n{str(e)}")
|
||
|
||
def display_frame_at_time(self, time_seconds):
|
||
"""Display video frame at specific time"""
|
||
if not self.current_clip:
|
||
return
|
||
|
||
try:
|
||
# Get frame at specified time
|
||
frame = self.current_clip.get_frame(min(time_seconds, self.video_duration - 0.01))
|
||
|
||
# Convert frame to proper format for PIL
|
||
if frame.dtype != np.uint8:
|
||
# Convert float frames to uint8
|
||
frame = (frame * 255).astype(np.uint8)
|
||
|
||
# Ensure frame is in correct shape (handle edge cases)
|
||
if len(frame.shape) == 3 and frame.shape[2] == 3:
|
||
# Normal RGB frame
|
||
pil_image = Image.fromarray(frame)
|
||
else:
|
||
# Handle other formats or corrupted frames
|
||
print(f"⚠️ Unusual frame shape: {frame.shape}, dtype: {frame.dtype}")
|
||
# Create a black frame as fallback
|
||
canvas_width = self.video_canvas.winfo_width() or 400
|
||
canvas_height = self.video_canvas.winfo_height() or 300
|
||
frame = np.zeros((canvas_height, canvas_width, 3), dtype=np.uint8)
|
||
pil_image = Image.fromarray(frame)
|
||
|
||
# Resize to fit canvas while maintaining aspect ratio
|
||
canvas_width = self.video_canvas.winfo_width() or 400
|
||
canvas_height = self.video_canvas.winfo_height() or 300
|
||
|
||
pil_image.thumbnail((canvas_width - 20, canvas_height - 20), Image.Resampling.LANCZOS)
|
||
|
||
# Convert to Tkinter format
|
||
self.current_tk_image = ImageTk.PhotoImage(pil_image)
|
||
|
||
# Clear canvas and display image
|
||
self.video_canvas.delete("all")
|
||
self.video_canvas.create_image(canvas_width//2, canvas_height//2,
|
||
image=self.current_tk_image)
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Error displaying frame: {e}")
|
||
# Show a black frame on error
|
||
try:
|
||
canvas_width = self.video_canvas.winfo_width() or 400
|
||
canvas_height = self.video_canvas.winfo_height() or 300
|
||
black_frame = np.zeros((canvas_height-20, canvas_width-20, 3), dtype=np.uint8)
|
||
pil_image = Image.fromarray(black_frame)
|
||
self.current_tk_image = ImageTk.PhotoImage(pil_image)
|
||
self.video_canvas.delete("all")
|
||
self.video_canvas.create_image(canvas_width//2, canvas_height//2,
|
||
image=self.current_tk_image)
|
||
except:
|
||
pass
|
||
|
||
def on_timeline_change(self, value):
|
||
"""Handle timeline slider changes"""
|
||
if not self.current_clip:
|
||
return
|
||
|
||
self.current_time = float(value)
|
||
self.display_frame_at_time(self.current_time)
|
||
self.update_time_display()
|
||
|
||
def toggle_play(self):
|
||
"""Toggle play/pause"""
|
||
if not self.current_clip:
|
||
return
|
||
|
||
if self.is_playing:
|
||
self.pause_video()
|
||
else:
|
||
self.play_video()
|
||
|
||
def play_video(self):
|
||
"""Start video playback"""
|
||
if not self.current_clip or self.is_playing:
|
||
return
|
||
|
||
self.is_playing = True
|
||
self.play_button.config(text="⏸️ Pause", bg="#FF9800")
|
||
|
||
def play_thread():
|
||
start_time = time.time()
|
||
start_video_time = self.current_time
|
||
|
||
while self.is_playing and self.current_time < self.video_duration:
|
||
try:
|
||
# Calculate current video time
|
||
elapsed = time.time() - start_time
|
||
self.current_time = start_video_time + elapsed
|
||
|
||
if self.current_time >= self.video_duration:
|
||
self.current_time = self.video_duration
|
||
self.is_playing = False
|
||
break
|
||
|
||
# Update timeline and display
|
||
self.timeline_var.set(self.current_time)
|
||
self.display_frame_at_time(self.current_time)
|
||
self.update_time_display()
|
||
|
||
# Frame rate control (approximately 30 FPS)
|
||
time.sleep(1/30)
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Playback error: {e}")
|
||
break
|
||
|
||
# Playback finished
|
||
self.is_playing = False
|
||
self.play_button.config(text="▶️ Play", bg="#4CAF50")
|
||
|
||
self.play_thread = threading.Thread(target=play_thread, daemon=True)
|
||
self.play_thread.start()
|
||
|
||
def pause_video(self):
|
||
"""Pause video playback"""
|
||
self.is_playing = False
|
||
self.play_button.config(text="▶️ Play", bg="#4CAF50")
|
||
|
||
def stop_video(self):
|
||
"""Stop video and return to beginning"""
|
||
self.is_playing = False
|
||
self.current_time = 0.0
|
||
self.timeline_var.set(0)
|
||
self.display_frame_at_time(0.0)
|
||
self.update_time_display()
|
||
self.play_button.config(text="▶️ Play", bg="#4CAF50")
|
||
|
||
def seek_relative(self, seconds):
|
||
"""Seek relative to current position"""
|
||
if not self.current_clip:
|
||
return
|
||
|
||
new_time = max(0, min(self.current_time + seconds, self.video_duration))
|
||
self.current_time = new_time
|
||
self.timeline_var.set(new_time)
|
||
self.display_frame_at_time(new_time)
|
||
self.update_time_display()
|
||
|
||
def update_time_display(self):
|
||
"""Update the time display label"""
|
||
current_mins = int(self.current_time // 60)
|
||
current_secs = int(self.current_time % 60)
|
||
total_mins = int(self.video_duration // 60)
|
||
total_secs = int(self.video_duration % 60)
|
||
|
||
time_text = f"{current_mins:02d}:{current_secs:02d} / {total_mins:02d}:{total_secs:02d}"
|
||
self.time_label.config(text=time_text)
|
||
|
||
def close_editor(self):
|
||
"""Clean up and close editor"""
|
||
self.is_playing = False
|
||
if self.current_clip:
|
||
self.current_clip.close()
|
||
self.editor_window.destroy()
|
||
|
||
def create_editing_tools(self):
|
||
"""Create the professional editing tools interface"""
|
||
# Create notebook for organized tools
|
||
notebook = ttk.Notebook(self.tools_frame)
|
||
notebook.pack(fill="both", expand=True)
|
||
|
||
# Basic Editing Tab
|
||
basic_frame = ttk.Frame(notebook)
|
||
notebook.add(basic_frame, text="✂️ Basic Editing")
|
||
|
||
# Trim Tool
|
||
trim_frame = tk.LabelFrame(basic_frame, text="✂️ Trim Video", padx=10, pady=5)
|
||
trim_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
trim_controls = tk.Frame(trim_frame)
|
||
trim_controls.pack(fill="x")
|
||
|
||
tk.Label(trim_controls, text="Start:").pack(side="left")
|
||
self.trim_start = tk.DoubleVar(value=0.0)
|
||
tk.Spinbox(trim_controls, from_=0, to=120, increment=0.1, width=8,
|
||
textvariable=self.trim_start, format="%.1f").pack(side="left", padx=5)
|
||
|
||
tk.Label(trim_controls, text="End:").pack(side="left", padx=(10, 0))
|
||
self.trim_end = tk.DoubleVar(value=5.0)
|
||
tk.Spinbox(trim_controls, from_=0, to=120, increment=0.1, width=8,
|
||
textvariable=self.trim_end, format="%.1f").pack(side="left", padx=5)
|
||
|
||
tk.Button(trim_controls, text="✂️ Trim Video",
|
||
command=self.trim_video, bg="#4CAF50", fg="white").pack(side="right", padx=10)
|
||
|
||
# Speed Tool
|
||
speed_frame = tk.LabelFrame(basic_frame, text="⚡ Speed Control", padx=10, pady=5)
|
||
speed_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
speed_controls = tk.Frame(speed_frame)
|
||
speed_controls.pack(fill="x")
|
||
|
||
tk.Label(speed_controls, text="Speed:").pack(side="left")
|
||
self.speed_factor = tk.DoubleVar(value=1.0)
|
||
speed_spinbox = tk.Spinbox(speed_controls, from_=0.1, to=5.0, increment=0.1, width=8,
|
||
textvariable=self.speed_factor, format="%.1f")
|
||
speed_spinbox.pack(side="left", padx=5)
|
||
|
||
tk.Label(speed_controls, text="(0.5=slow, 1.0=normal, 2.0=fast)").pack(side="left", padx=5)
|
||
tk.Button(speed_controls, text="⚡ Apply Speed",
|
||
command=self.adjust_speed, bg="#FF9800", fg="white").pack(side="right", padx=10)
|
||
|
||
# Effects Tab
|
||
effects_frame = ttk.Frame(notebook)
|
||
notebook.add(effects_frame, text="✨ Effects")
|
||
|
||
# Fade Effects
|
||
fade_frame = tk.LabelFrame(effects_frame, text="🌅 Fade Effects", padx=10, pady=5)
|
||
fade_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
fade_controls = tk.Frame(fade_frame)
|
||
fade_controls.pack(fill="x")
|
||
|
||
tk.Label(fade_controls, text="Fade In:").pack(side="left")
|
||
self.fade_in = tk.DoubleVar(value=0.5)
|
||
tk.Spinbox(fade_controls, from_=0, to=5, increment=0.1, width=6,
|
||
textvariable=self.fade_in, format="%.1f").pack(side="left", padx=5)
|
||
|
||
tk.Label(fade_controls, text="Fade Out:").pack(side="left", padx=(10, 0))
|
||
self.fade_out = tk.DoubleVar(value=0.5)
|
||
tk.Spinbox(fade_controls, from_=0, to=5, increment=0.1, width=6,
|
||
textvariable=self.fade_out, format="%.1f").pack(side="left", padx=5)
|
||
|
||
tk.Button(fade_controls, text="🌅 Add Fades",
|
||
command=self.add_fades, bg="#9C27B0", fg="white").pack(side="right", padx=10)
|
||
|
||
# Volume Control
|
||
volume_frame = tk.LabelFrame(effects_frame, text="🔊 Volume Control", padx=10, pady=5)
|
||
volume_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
volume_controls = tk.Frame(volume_frame)
|
||
volume_controls.pack(fill="x")
|
||
|
||
tk.Label(volume_controls, text="Volume:").pack(side="left")
|
||
self.volume_factor = tk.DoubleVar(value=1.0)
|
||
tk.Spinbox(volume_controls, from_=0, to=3, increment=0.1, width=6,
|
||
textvariable=self.volume_factor, format="%.1f").pack(side="left", padx=5)
|
||
|
||
tk.Label(volume_controls, text="(0.0=mute, 1.0=normal, 2.0=loud)").pack(side="left", padx=5)
|
||
tk.Button(volume_controls, text="🔊 Adjust Volume",
|
||
command=self.adjust_volume, bg="#3F51B5", fg="white").pack(side="right", padx=10)
|
||
|
||
# Transform Tab
|
||
transform_frame = ttk.Frame(notebook)
|
||
notebook.add(transform_frame, text="🔄 Transform")
|
||
|
||
# Resize Tool
|
||
resize_frame = tk.LabelFrame(transform_frame, text="📐 Resize Video", padx=10, pady=5)
|
||
resize_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
resize_controls = tk.Frame(resize_frame)
|
||
resize_controls.pack(fill="x")
|
||
|
||
tk.Label(resize_controls, text="Width:").pack(side="left")
|
||
self.resize_width = tk.IntVar(value=1080)
|
||
tk.Spinbox(resize_controls, from_=240, to=4320, increment=120, width=6,
|
||
textvariable=self.resize_width).pack(side="left", padx=5)
|
||
|
||
tk.Label(resize_controls, text="Height:").pack(side="left", padx=(10, 0))
|
||
self.resize_height = tk.IntVar(value=1920)
|
||
tk.Spinbox(resize_controls, from_=240, to=4320, increment=120, width=6,
|
||
textvariable=self.resize_height).pack(side="left", padx=5)
|
||
|
||
tk.Button(resize_controls, text="📐 Resize",
|
||
command=self.resize_video, bg="#607D8B", fg="white").pack(side="right", padx=10)
|
||
|
||
# Text Overlay Tab
|
||
text_frame = ttk.Frame(notebook)
|
||
notebook.add(text_frame, text="📝 Text Overlay")
|
||
|
||
text_overlay_frame = tk.LabelFrame(text_frame, text="📝 Add Text Overlay", padx=10, pady=5)
|
||
text_overlay_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
# Text input
|
||
text_input_frame = tk.Frame(text_overlay_frame)
|
||
text_input_frame.pack(fill="x", pady=5)
|
||
|
||
tk.Label(text_input_frame, text="Text:").pack(side="left")
|
||
self.overlay_text = tk.StringVar(value="Your Text Here")
|
||
tk.Entry(text_input_frame, textvariable=self.overlay_text, width=30).pack(side="left", padx=5)
|
||
|
||
# Text settings
|
||
text_settings_frame = tk.Frame(text_overlay_frame)
|
||
text_settings_frame.pack(fill="x", pady=5)
|
||
|
||
tk.Label(text_settings_frame, text="Size:").pack(side="left")
|
||
self.text_size = tk.IntVar(value=50)
|
||
tk.Spinbox(text_settings_frame, from_=20, to=150, width=6,
|
||
textvariable=self.text_size).pack(side="left", padx=5)
|
||
|
||
tk.Label(text_settings_frame, text="Position:").pack(side="left", padx=(10, 0))
|
||
self.text_position = tk.StringVar(value="center,bottom")
|
||
position_combo = ttk.Combobox(text_settings_frame, textvariable=self.text_position, width=15,
|
||
values=["center,top", "center,center", "center,bottom",
|
||
"left,top", "right,top", "left,bottom", "right,bottom"],
|
||
state="readonly")
|
||
position_combo.pack(side="left", padx=5)
|
||
|
||
# Speed/Quality options
|
||
speed_frame = tk.Frame(text_overlay_frame)
|
||
speed_frame.pack(fill="x", pady=5)
|
||
|
||
tk.Label(speed_frame, text="Processing Method:", font=("Arial", 9, "bold")).pack(side="left")
|
||
self.text_method = tk.StringVar(value="fast")
|
||
|
||
method_frame = tk.Frame(speed_frame)
|
||
method_frame.pack(side="left", padx=10)
|
||
|
||
tk.Radiobutton(method_frame, text="🚀 Fast (PIL)", variable=self.text_method,
|
||
value="fast", font=("Arial", 8)).pack(side="left")
|
||
tk.Radiobutton(method_frame, text="🎬 High Quality (MoviePy)", variable=self.text_method,
|
||
value="quality", font=("Arial", 8)).pack(side="left", padx=(10, 0))
|
||
|
||
# Info label for method explanation
|
||
method_info = tk.Label(speed_frame, text="Fast: 3-5x faster, basic text | Quality: Slower, advanced effects",
|
||
font=("Arial", 7), fg="gray")
|
||
method_info.pack(side="right")
|
||
|
||
# Button frame
|
||
button_frame = tk.Frame(text_overlay_frame)
|
||
button_frame.pack(fill="x", pady=5)
|
||
|
||
tk.Button(button_frame, text="📝 Add Text Overlay",
|
||
command=self.add_text_overlay, bg="#795548", fg="white",
|
||
font=("Arial", 10, "bold")).pack(side="right", padx=10)
|
||
|
||
# Video Effects Tab - NEW
|
||
effects_advanced_frame = ttk.Frame(notebook)
|
||
notebook.add(effects_advanced_frame, text="🎨 Video Effects")
|
||
|
||
# Blur Effect
|
||
blur_frame = tk.LabelFrame(effects_advanced_frame, text="🌫️ Blur Effect", padx=10, pady=5)
|
||
blur_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
blur_controls = tk.Frame(blur_frame)
|
||
blur_controls.pack(fill="x")
|
||
|
||
tk.Label(blur_controls, text="Strength:").pack(side="left")
|
||
self.blur_strength = tk.DoubleVar(value=2.0)
|
||
tk.Scale(blur_controls, from_=0.1, to=10.0, resolution=0.1, orient="horizontal",
|
||
variable=self.blur_strength, length=150).pack(side="left", padx=5)
|
||
|
||
tk.Button(blur_controls, text="🌫️ Apply Blur",
|
||
command=self.apply_blur_effect, bg="#795548", fg="white").pack(side="right", padx=10)
|
||
|
||
# Color Effects
|
||
color_frame = tk.LabelFrame(effects_advanced_frame, text="🎨 Color Effects", padx=10, pady=5)
|
||
color_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
color_controls = tk.Frame(color_frame)
|
||
color_controls.pack(fill="x")
|
||
|
||
tk.Label(color_controls, text="Effect:").pack(side="left")
|
||
self.color_effect_var = tk.StringVar(value="sepia")
|
||
color_combo = ttk.Combobox(color_controls, textvariable=self.color_effect_var,
|
||
values=["sepia", "grayscale", "vintage", "cool"], width=12, state="readonly")
|
||
color_combo.pack(side="left", padx=5)
|
||
|
||
tk.Button(color_controls, text="🎨 Apply Color Effect",
|
||
command=self.apply_color_effect, bg="#E91E63", fg="white").pack(side="right", padx=10)
|
||
|
||
# Zoom Effects
|
||
zoom_frame = tk.LabelFrame(effects_advanced_frame, text="🔍 Zoom Effects", padx=10, pady=5)
|
||
zoom_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
zoom_controls = tk.Frame(zoom_frame)
|
||
zoom_controls.pack(fill="x")
|
||
|
||
tk.Label(zoom_controls, text="Type:").pack(side="left")
|
||
self.zoom_effect_var = tk.StringVar(value="zoom_in")
|
||
zoom_combo = ttk.Combobox(zoom_controls, textvariable=self.zoom_effect_var,
|
||
values=["zoom_in", "zoom_out", "static"], width=10, state="readonly")
|
||
zoom_combo.pack(side="left", padx=5)
|
||
|
||
tk.Label(zoom_controls, text="Factor:").pack(side="left", padx=(10, 0))
|
||
self.zoom_factor = tk.DoubleVar(value=1.5)
|
||
tk.Scale(zoom_controls, from_=1.0, to=3.0, resolution=0.1, orient="horizontal",
|
||
variable=self.zoom_factor, length=100).pack(side="left", padx=5)
|
||
|
||
tk.Button(zoom_controls, text="🔍 Apply Zoom",
|
||
command=self.apply_zoom_effect, bg="#3F51B5", fg="white").pack(side="right", padx=10)
|
||
|
||
# Rotation Effects
|
||
rotation_frame = tk.LabelFrame(effects_advanced_frame, text="🔄 Rotation Effects", padx=10, pady=5)
|
||
rotation_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
rotation_controls = tk.Frame(rotation_frame)
|
||
rotation_controls.pack(fill="x")
|
||
|
||
tk.Label(rotation_controls, text="Type:").pack(side="left")
|
||
self.rotation_type_var = tk.StringVar(value="static")
|
||
rotation_combo = ttk.Combobox(rotation_controls, textvariable=self.rotation_type_var,
|
||
values=["static", "spinning"], width=10, state="readonly")
|
||
rotation_combo.pack(side="left", padx=5)
|
||
|
||
tk.Label(rotation_controls, text="Angle:").pack(side="left", padx=(10, 0))
|
||
self.rotation_angle = tk.DoubleVar(value=0.0)
|
||
tk.Scale(rotation_controls, from_=-180, to=180, resolution=5, orient="horizontal",
|
||
variable=self.rotation_angle, length=120).pack(side="left", padx=5)
|
||
|
||
tk.Button(rotation_controls, text="🔄 Apply Rotation",
|
||
command=self.apply_rotation_effect, bg="#FF5722", fg="white").pack(side="right", padx=10)
|
||
|
||
# Export Tab
|
||
export_frame = ttk.Frame(notebook)
|
||
notebook.add(export_frame, text="💾 Export")
|
||
|
||
export_controls_frame = tk.LabelFrame(export_frame, text="💾 Export Final Video", padx=10, pady=5)
|
||
export_controls_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
# Output filename
|
||
filename_frame = tk.Frame(export_controls_frame)
|
||
filename_frame.pack(fill="x", pady=5)
|
||
|
||
tk.Label(filename_frame, text="Filename:").pack(side="left")
|
||
self.output_filename = tk.StringVar(value="edited_video.mp4")
|
||
tk.Entry(filename_frame, textvariable=self.output_filename, width=25).pack(side="left", padx=5)
|
||
|
||
# Quality settings
|
||
quality_frame = tk.Frame(export_controls_frame)
|
||
quality_frame.pack(fill="x", pady=5)
|
||
|
||
tk.Label(quality_frame, text="Quality:").pack(side="left")
|
||
self.export_quality = tk.StringVar(value="medium")
|
||
quality_combo = ttk.Combobox(quality_frame, textvariable=self.export_quality,
|
||
values=["low", "medium", "high"], width=10, state="readonly")
|
||
quality_combo.pack(side="left", padx=5)
|
||
|
||
# Export button
|
||
export_button_frame = tk.Frame(export_controls_frame)
|
||
export_button_frame.pack(fill="x", pady=10)
|
||
|
||
self.export_button = tk.Button(export_button_frame, text="💾 Export Final Video",
|
||
command=self.export_edited_video, bg="#4CAF50", fg="white",
|
||
font=("Arial", 12, "bold"))
|
||
self.export_button.pack(pady=5)
|
||
|
||
# Progress bar (initially hidden)
|
||
self.progress_var = tk.DoubleVar()
|
||
self.progress_bar = ttk.Progressbar(export_button_frame, variable=self.progress_var, maximum=100)
|
||
self.progress_label = tk.Label(export_button_frame, text="", font=("Arial", 9))
|
||
|
||
# Reset button
|
||
tk.Button(export_button_frame, text="🔄 Reset All Changes", command=self.reset_edited_video,
|
||
bg="#F44336", fg="white", font=("Arial", 10)).pack(pady=5)
|
||
|
||
# Initially disable all tools
|
||
self.disable_editing_tools()
|
||
|
||
def disable_editing_tools(self):
|
||
"""Disable all editing tools until a video is selected"""
|
||
for widget in self.tools_frame.winfo_children():
|
||
self.set_widget_state(widget, "disabled")
|
||
|
||
def enable_editing_tools(self):
|
||
"""Enable editing tools when a video is selected"""
|
||
for widget in self.tools_frame.winfo_children():
|
||
self.set_widget_state(widget, "normal")
|
||
|
||
# Initialize video editor for current video
|
||
try:
|
||
self.video_editor = VideoEditor(self.current_video)
|
||
print(f"✅ Video editor initialized for: {os.path.basename(self.current_video)}")
|
||
except Exception as e:
|
||
print(f"❌ Error initializing video editor: {e}")
|
||
self.video_editor = None
|
||
|
||
# Update trim end time to video duration
|
||
if self.video_info:
|
||
self.trim_end.set(min(self.video_info['duration'], 30.0))
|
||
|
||
def apply_blur_effect(self):
|
||
"""Apply blur effect to video"""
|
||
if not hasattr(self, 'video_editor') or not self.video_editor:
|
||
messagebox.showerror("Error", "Please select a video first!")
|
||
return
|
||
|
||
strength = self.blur_strength.get()
|
||
print(f"🌫️ Applying blur effect (strength: {strength})")
|
||
|
||
try:
|
||
self.video_editor.add_blur_effect(strength)
|
||
self.refresh_video_preview()
|
||
messagebox.showinfo("Success", f"Blur effect applied with strength {strength}")
|
||
except Exception as e:
|
||
print(f"❌ Error applying blur effect: {e}")
|
||
messagebox.showerror("Blur Error", f"Failed to apply blur effect:\n{str(e)}")
|
||
|
||
def apply_color_effect(self):
|
||
"""Apply color effect to video"""
|
||
if not hasattr(self, 'video_editor') or not self.video_editor:
|
||
messagebox.showerror("Error", "Please select a video first!")
|
||
return
|
||
|
||
effect_type = self.color_effect_var.get()
|
||
print(f"🎨 Applying color effect: {effect_type}")
|
||
|
||
try:
|
||
self.video_editor.add_color_effect(effect_type)
|
||
self.refresh_video_preview()
|
||
messagebox.showinfo("Success", f"Color effect '{effect_type}' applied successfully")
|
||
except Exception as e:
|
||
print(f"❌ Error applying color effect: {e}")
|
||
messagebox.showerror("Color Effect Error", f"Failed to apply color effect:\n{str(e)}")
|
||
|
||
def apply_zoom_effect(self):
|
||
"""Apply zoom effect to video"""
|
||
if not hasattr(self, 'video_editor') or not self.video_editor:
|
||
messagebox.showerror("Error", "Please select a video first!")
|
||
return
|
||
|
||
zoom_type = self.zoom_effect_var.get()
|
||
zoom_factor = self.zoom_factor.get()
|
||
print(f"🔍 Applying zoom effect: {zoom_type} (factor: {zoom_factor})")
|
||
|
||
try:
|
||
self.video_editor.add_zoom_effect(zoom_factor, zoom_type)
|
||
self.refresh_video_preview()
|
||
messagebox.showinfo("Success", f"Zoom effect '{zoom_type}' applied successfully")
|
||
except Exception as e:
|
||
print(f"❌ Error applying zoom effect: {e}")
|
||
messagebox.showerror("Zoom Effect Error", f"Failed to apply zoom effect:\n{str(e)}")
|
||
|
||
def apply_rotation_effect(self):
|
||
"""Apply rotation effect to video"""
|
||
if not hasattr(self, 'video_editor') or not self.video_editor:
|
||
messagebox.showerror("Error", "Please select a video first!")
|
||
return
|
||
|
||
rotation_type = self.rotation_type_var.get()
|
||
angle = self.rotation_angle.get()
|
||
print(f"🔄 Applying rotation effect: {rotation_type} (angle: {angle}°)")
|
||
|
||
try:
|
||
self.video_editor.add_rotation_effect(angle, rotation_type)
|
||
self.refresh_video_preview()
|
||
messagebox.showinfo("Success", f"Rotation effect '{rotation_type}' applied successfully")
|
||
except Exception as e:
|
||
print(f"❌ Error applying rotation effect: {e}")
|
||
messagebox.showerror("Rotation Effect Error", f"Failed to apply rotation effect:\n{str(e)}")
|
||
|
||
def refresh_video_preview(self):
|
||
"""Refresh the video preview after applying effects"""
|
||
if hasattr(self, 'current_time') and hasattr(self, 'video_editor') and self.video_editor:
|
||
try:
|
||
# Update the current clip reference to include effects
|
||
if self.video_editor.video_clip:
|
||
self.current_clip = self.video_editor.video_clip
|
||
|
||
# Update video duration in case it changed (speed/trim effects)
|
||
old_duration = self.video_duration
|
||
self.video_duration = self.current_clip.duration
|
||
|
||
# Update timeline if duration changed
|
||
if abs(old_duration - self.video_duration) > 0.1:
|
||
self.timeline_slider.config(to=self.video_duration)
|
||
# Adjust current time if it's beyond new duration
|
||
if self.current_time > self.video_duration:
|
||
self.current_time = max(0, self.video_duration - 0.1)
|
||
self.timeline_var.set(self.current_time)
|
||
print(f"📏 Updated timeline duration: {old_duration:.1f}s → {self.video_duration:.1f}s")
|
||
|
||
self.display_frame_at_time(self.current_time)
|
||
self.update_time_display()
|
||
print("🔄 Video preview refreshed with effects")
|
||
except Exception as e:
|
||
print(f"⚠️ Error refreshing preview: {e}")
|
||
|
||
def export_edited_video(self):
|
||
"""Export the final edited video"""
|
||
if not hasattr(self, 'video_editor') or not self.video_editor:
|
||
messagebox.showerror("Error", "No video selected for editing!")
|
||
return
|
||
|
||
filename = self.output_filename.get()
|
||
if not filename.endswith('.mp4'):
|
||
filename += '.mp4'
|
||
|
||
output_path = os.path.join(self.output_folder.get(), filename)
|
||
quality = self.export_quality.get()
|
||
|
||
print(f"💾 Exporting edited video to: {output_path}")
|
||
|
||
def export_thread():
|
||
try:
|
||
# Show progress bar
|
||
self.progress_bar.pack(pady=5)
|
||
self.progress_label.pack()
|
||
self.export_button.config(state="disabled", text="Exporting...")
|
||
|
||
# Export with progress updates
|
||
def progress_callback(progress):
|
||
self.progress_var.set(progress * 100)
|
||
self.progress_label.config(text=f"Exporting... {progress*100:.1f}%")
|
||
self.editor_window.update_idletasks()
|
||
|
||
# Create output directory if needed
|
||
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||
|
||
# Export the video
|
||
self.video_editor.export(output_path, quality, progress_callback)
|
||
|
||
# Hide progress bar
|
||
self.progress_bar.pack_forget()
|
||
self.progress_label.pack_forget()
|
||
self.export_button.config(state="normal", text="💾 Export Final Video")
|
||
|
||
messagebox.showinfo("Success", f"Video exported successfully to:\n{output_path}")
|
||
print(f"✅ Video exported successfully: {output_path}")
|
||
|
||
except Exception as e:
|
||
print(f"❌ Export error: {e}")
|
||
self.progress_bar.pack_forget()
|
||
self.progress_label.pack_forget()
|
||
self.export_button.config(state="normal", text="💾 Export Final Video")
|
||
messagebox.showerror("Export Error", f"Failed to export video:\n{str(e)}")
|
||
|
||
# Run export in background thread
|
||
threading.Thread(target=export_thread, daemon=True).start()
|
||
|
||
def reset_edited_video(self):
|
||
"""Reset all edits and reload original video"""
|
||
if hasattr(self, 'video_editor') and self.video_editor:
|
||
self.video_editor.reset()
|
||
self.refresh_video_preview()
|
||
messagebox.showinfo("Reset", "All edits have been reset to original video")
|
||
print("🔄 Video reset to original state")
|
||
else:
|
||
messagebox.showwarning("No Video", "No video loaded to reset!")
|
||
|
||
def set_widget_state(self, widget, state):
|
||
"""Recursively set widget state"""
|
||
try:
|
||
widget.config(state=state)
|
||
except:
|
||
pass
|
||
for child in widget.winfo_children():
|
||
self.set_widget_state(child, state)
|
||
|
||
def update_video_info(self):
|
||
"""Update the video information display"""
|
||
if self.video_info and self.current_video:
|
||
filename = os.path.basename(self.current_video)
|
||
info_text = f"""📁 File: {filename}
|
||
⏱️ Duration: {self.video_info['duration']:.2f} seconds
|
||
📐 Resolution: {self.video_info['size'][0]} x {self.video_info['size'][1]}
|
||
🎬 FPS: {self.video_info['fps']:.1f}
|
||
🔊 Audio: {'Yes' if self.video_info['has_audio'] else 'No'}
|
||
💾 Size: {os.path.getsize(self.current_video) / (1024*1024):.1f} MB"""
|
||
self.info_label.config(text=info_text)
|
||
|
||
def select_output_folder(self):
|
||
"""Select output folder for edited videos"""
|
||
folder = filedialog.askdirectory(title="Select Output Folder")
|
||
if folder:
|
||
self.output_folder.set(folder)
|
||
|
||
def refresh_video_list(self):
|
||
"""Refresh the list of available videos"""
|
||
self.video_listbox.delete(0, tk.END)
|
||
self.video_files.clear()
|
||
|
||
shorts_files = glob.glob(os.path.join(self.shorts_folder, "*.mp4"))
|
||
for video_file in sorted(shorts_files):
|
||
try:
|
||
info = VideoEditor.get_video_info(video_file)
|
||
filename = os.path.basename(video_file)
|
||
size_mb = os.path.getsize(video_file) / (1024 * 1024)
|
||
display_text = f"{filename:<20} │ {info['duration']:.1f}s │ {info['size'][0]}x{info['size'][1]} │ {size_mb:.1f}MB"
|
||
self.video_listbox.insert(tk.END, display_text)
|
||
self.video_files.append(video_file)
|
||
except Exception as e:
|
||
print(f"Error reading {video_file}: {e}")
|
||
|
||
def open_shorts_folder(self):
|
||
"""Open the shorts folder in file explorer"""
|
||
import subprocess
|
||
try:
|
||
subprocess.run(['explorer', os.path.abspath(self.shorts_folder)], check=True)
|
||
except:
|
||
messagebox.showinfo("Folder Location", f"Shorts folder: {os.path.abspath(self.shorts_folder)}")
|
||
|
||
def get_output_path(self, suffix):
|
||
"""Generate output path with timestamp"""
|
||
if not self.current_video:
|
||
return None
|
||
|
||
os.makedirs(self.output_folder.get(), exist_ok=True)
|
||
base_name = os.path.splitext(os.path.basename(self.current_video))[0]
|
||
timestamp = datetime.now().strftime("%H%M%S")
|
||
return os.path.join(self.output_folder.get(), f"{base_name}_{suffix}_{timestamp}.mp4")
|
||
|
||
def show_progress_dialog(self, title, operation_func):
|
||
"""Show progress dialog for editing operations"""
|
||
progress_window = tk.Toplevel(self.editor_window)
|
||
progress_window.title(title)
|
||
progress_window.geometry("400x120")
|
||
progress_window.transient(self.editor_window)
|
||
progress_window.grab_set()
|
||
|
||
tk.Label(progress_window, text=f"🎬 {title}", font=("Arial", 12, "bold")).pack(pady=10)
|
||
|
||
progress_label = tk.Label(progress_window, text="Processing video...")
|
||
progress_label.pack(pady=5)
|
||
|
||
progress_bar = ttk.Progressbar(progress_window, mode="indeterminate")
|
||
progress_bar.pack(fill="x", padx=20, pady=10)
|
||
progress_bar.start()
|
||
|
||
def run_operation():
|
||
try:
|
||
result = operation_func()
|
||
progress_window.after(0, lambda r=result: self.operation_complete(progress_window, r, title))
|
||
except Exception as error:
|
||
progress_window.after(0, lambda err=str(error): self.operation_error(progress_window, err))
|
||
|
||
threading.Thread(target=run_operation, daemon=True).start()
|
||
|
||
def operation_complete(self, progress_window, result, operation_name):
|
||
"""Handle successful operation completion"""
|
||
progress_window.destroy()
|
||
if result:
|
||
messagebox.showinfo("Success",
|
||
f"✅ {operation_name} completed successfully!\n\n"
|
||
f"Output saved to:\n{result}")
|
||
self.refresh_video_list()
|
||
|
||
def operation_error(self, progress_window, error_msg):
|
||
"""Handle operation error"""
|
||
progress_window.destroy()
|
||
messagebox.showerror("Error", f"❌ Operation failed:\n{error_msg}")
|
||
|
||
# Editing tool methods
|
||
def trim_video(self):
|
||
"""Apply trim to the current video editor"""
|
||
if not hasattr(self, 'video_editor') or not self.video_editor:
|
||
messagebox.showwarning("No Video", "Please select a video first!")
|
||
return
|
||
|
||
start = self.trim_start.get()
|
||
end = self.trim_end.get()
|
||
|
||
if start >= end:
|
||
messagebox.showwarning("Invalid Range", "Start time must be less than end time!")
|
||
return
|
||
|
||
try:
|
||
self.video_editor.apply_trim(start, end)
|
||
self.refresh_video_preview()
|
||
messagebox.showinfo("Success", f"Video trimmed from {start:.1f}s to {end:.1f}s")
|
||
except Exception as e:
|
||
print(f"❌ Error applying trim: {e}")
|
||
messagebox.showerror("Trim Error", f"Failed to trim video:\n{str(e)}")
|
||
|
||
def adjust_speed(self):
|
||
"""Apply speed adjustment to the current video editor"""
|
||
if not hasattr(self, 'video_editor') or not self.video_editor:
|
||
messagebox.showwarning("No Video", "Please select a video first!")
|
||
return
|
||
|
||
speed = self.speed_factor.get()
|
||
if speed <= 0:
|
||
messagebox.showwarning("Invalid Speed", "Speed must be greater than 0!")
|
||
return
|
||
|
||
try:
|
||
self.video_editor.apply_speed(speed)
|
||
self.refresh_video_preview()
|
||
messagebox.showinfo("Success", f"Speed adjusted to {speed:.1f}x")
|
||
except Exception as e:
|
||
print(f"❌ Error applying speed: {e}")
|
||
messagebox.showerror("Speed Error", f"Failed to adjust speed:\n{str(e)}")
|
||
|
||
def add_fades(self):
|
||
"""Apply fade effects to the current video editor"""
|
||
if not hasattr(self, 'video_editor') or not self.video_editor:
|
||
messagebox.showwarning("No Video", "Please select a video first!")
|
||
return
|
||
|
||
fade_in = self.fade_in.get()
|
||
fade_out = self.fade_out.get()
|
||
|
||
try:
|
||
self.video_editor.apply_fade_effects(fade_in, fade_out)
|
||
self.refresh_video_preview()
|
||
messagebox.showinfo("Success", f"Fade effects applied: in {fade_in:.1f}s, out {fade_out:.1f}s")
|
||
except Exception as e:
|
||
print(f"❌ Error applying fades: {e}")
|
||
messagebox.showerror("Fade Error", f"Failed to apply fade effects:\n{str(e)}")
|
||
|
||
def adjust_volume(self):
|
||
"""Apply volume adjustment to the current video editor"""
|
||
if not hasattr(self, 'video_editor') or not self.video_editor:
|
||
messagebox.showwarning("No Video", "Please select a video first!")
|
||
return
|
||
|
||
volume = self.volume_factor.get()
|
||
|
||
try:
|
||
self.video_editor.apply_volume(volume)
|
||
self.refresh_video_preview()
|
||
messagebox.showinfo("Success", f"Volume adjusted to {volume:.1f}x")
|
||
except Exception as e:
|
||
print(f"❌ Error applying volume: {e}")
|
||
messagebox.showerror("Volume Error", f"Failed to adjust volume:\n{str(e)}")
|
||
|
||
def resize_video(self):
|
||
"""Apply resize to the current video editor"""
|
||
if not hasattr(self, 'video_editor') or not self.video_editor:
|
||
messagebox.showwarning("No Video", "Please select a video first!")
|
||
return
|
||
|
||
width = self.resize_width.get()
|
||
height = self.resize_height.get()
|
||
|
||
if width < 1 or height < 1:
|
||
messagebox.showwarning("Invalid Size", "Width and height must be positive!")
|
||
return
|
||
|
||
try:
|
||
self.video_editor.apply_resize(width, height)
|
||
self.refresh_video_preview()
|
||
messagebox.showinfo("Success", f"Video resized to {width}x{height}")
|
||
except Exception as e:
|
||
print(f"❌ Error applying resize: {e}")
|
||
messagebox.showerror("Resize Error", f"Failed to resize video:\n{str(e)}")
|
||
|
||
def add_text_overlay(self):
|
||
"""Apply text overlay to the current video editor"""
|
||
if not hasattr(self, 'video_editor') or not self.video_editor:
|
||
messagebox.showwarning("No Video", "Please select a video first!")
|
||
return
|
||
|
||
text = self.overlay_text.get().strip()
|
||
if not text:
|
||
messagebox.showwarning("No Text", "Please enter text to overlay!")
|
||
return
|
||
|
||
position_str = self.text_position.get()
|
||
position = tuple(position_str.split(','))
|
||
size = self.text_size.get()
|
||
method = self.text_method.get()
|
||
|
||
try:
|
||
self.video_editor.apply_text_overlay_to_current(text, position, size, 'white', method)
|
||
self.refresh_video_preview()
|
||
messagebox.showinfo("Success", f"Text '{text[:30]}...' added successfully")
|
||
except Exception as e:
|
||
print(f"❌ Error applying text overlay: {e}")
|
||
messagebox.showerror("Text Error", f"Failed to add text overlay:\n{str(e)}")
|
||
|
||
# GUI Components
|
||
class ShortsGeneratorGUI:
|
||
def __init__(self, root):
|
||
self.root = root
|
||
self.root.title("🎬 AI Shorts Generator - Advanced Video Moment Detection")
|
||
self.root.geometry("650x650") # Reduced height to eliminate empty space
|
||
self.root.minsize(500, 500) # Set minimum size for responsiveness
|
||
|
||
# Make window responsive
|
||
self.root.rowconfigure(0, weight=1)
|
||
self.root.columnconfigure(0, weight=1)
|
||
|
||
self.video_path = None
|
||
self.output_folder = "shorts"
|
||
self.max_clips = 3
|
||
self.threshold_db = -30
|
||
self.clip_duration = 5
|
||
|
||
# Bind resize event
|
||
self.root.bind('<Configure>', self.on_window_resize)
|
||
|
||
self.create_widgets()
|
||
|
||
def create_widgets(self):
|
||
# Create main scrollable container
|
||
main_container = tk.Frame(self.root)
|
||
main_container.pack(fill="both", expand=True, padx=10, pady=10)
|
||
main_container.rowconfigure(0, weight=1)
|
||
main_container.columnconfigure(0, weight=1)
|
||
|
||
# Create canvas and scrollbar for scrolling
|
||
canvas = tk.Canvas(main_container)
|
||
scrollbar = ttk.Scrollbar(main_container, orient="vertical", command=canvas.yview)
|
||
scrollable_frame = tk.Frame(canvas)
|
||
|
||
scrollable_frame.bind(
|
||
"<Configure>",
|
||
lambda e: canvas.configure(scrollregion=canvas.bbox("all"))
|
||
)
|
||
|
||
canvas.create_window((0, 0), window=scrollable_frame, anchor="nw")
|
||
canvas.configure(yscrollcommand=scrollbar.set)
|
||
|
||
# Make scrollable frame responsive
|
||
scrollable_frame.columnconfigure(0, weight=1)
|
||
|
||
# Title
|
||
title_label = tk.Label(scrollable_frame, text="🎬 AI Shorts Generator", font=("Arial", 16, "bold"))
|
||
title_label.grid(row=0, column=0, pady=10, sticky="ew")
|
||
|
||
# Video selection
|
||
video_frame = tk.Frame(scrollable_frame)
|
||
video_frame.grid(row=1, column=0, pady=10, sticky="ew")
|
||
video_frame.columnconfigure(0, weight=1)
|
||
|
||
tk.Label(video_frame, text="Select Video File:").grid(row=0, column=0, sticky="w")
|
||
video_select_frame = tk.Frame(video_frame)
|
||
video_select_frame.grid(row=1, column=0, pady=5, sticky="ew")
|
||
video_select_frame.columnconfigure(0, weight=1)
|
||
|
||
self.video_label = tk.Label(video_select_frame, text="No video selected", bg="white", relief="sunken")
|
||
self.video_label.grid(row=0, column=0, sticky="ew", padx=(0, 5))
|
||
|
||
tk.Button(video_select_frame, text="Browse", command=self.select_video).grid(row=0, column=1)
|
||
|
||
# Output folder selection
|
||
output_frame = tk.Frame(scrollable_frame)
|
||
output_frame.grid(row=2, column=0, pady=10, sticky="ew")
|
||
output_frame.columnconfigure(0, weight=1)
|
||
|
||
tk.Label(output_frame, text="Output Folder:").grid(row=0, column=0, sticky="w")
|
||
output_select_frame = tk.Frame(output_frame)
|
||
output_select_frame.grid(row=1, column=0, pady=5, sticky="ew")
|
||
output_select_frame.columnconfigure(0, weight=1)
|
||
|
||
self.output_label = tk.Label(output_select_frame, text="shorts/", bg="white", relief="sunken")
|
||
self.output_label.grid(row=0, column=0, sticky="ew", padx=(0, 5))
|
||
|
||
tk.Button(output_select_frame, text="Browse", command=self.select_output_folder).grid(row=0, column=1)
|
||
|
||
# Settings frame
|
||
settings_frame = tk.LabelFrame(scrollable_frame, text="Settings", padx=10, pady=10)
|
||
settings_frame.grid(row=3, column=0, pady=10, sticky="ew")
|
||
settings_frame.columnconfigure(0, weight=1)
|
||
|
||
# Max clips with on/off toggle
|
||
clips_frame = tk.Frame(settings_frame)
|
||
clips_frame.grid(row=0, column=0, pady=5, sticky="ew")
|
||
clips_frame.columnconfigure(1, weight=1)
|
||
|
||
self.use_max_clips = tk.BooleanVar(value=True)
|
||
clips_checkbox = tk.Checkbutton(clips_frame, variable=self.use_max_clips, text="Max Clips to Generate:")
|
||
clips_checkbox.grid(row=0, column=0, sticky="w")
|
||
|
||
self.clips_var = tk.IntVar(value=3)
|
||
self.clips_spinbox = tk.Spinbox(clips_frame, from_=1, to=10, width=5, textvariable=self.clips_var)
|
||
self.clips_spinbox.grid(row=0, column=2, sticky="e")
|
||
|
||
# Bind checkbox to enable/disable spinbox
|
||
def toggle_clips_limit():
|
||
if self.use_max_clips.get():
|
||
self.clips_spinbox.config(state="normal")
|
||
else:
|
||
self.clips_spinbox.config(state="disabled")
|
||
|
||
self.use_max_clips.trace("w", lambda *args: toggle_clips_limit())
|
||
clips_checkbox.config(command=toggle_clips_limit)
|
||
|
||
# Add tooltip for max clips setting
|
||
clips_tooltip_text = """Max Clips Control:
|
||
|
||
• Checked: Limit the number of clips generated
|
||
• Unchecked: Generate all detected moments
|
||
• 1-3 clips: Quick highlights for social media
|
||
• 4-6 clips: Good variety pack
|
||
• 7-10 clips: Comprehensive highlight reel
|
||
|
||
Tip: Start with 3 clips, then increase if you want more content"""
|
||
ToolTip(self.clips_spinbox, clips_tooltip_text, side='right')
|
||
ToolTip(clips_checkbox, clips_tooltip_text, side='right')
|
||
|
||
# Detection Mode Selection
|
||
detection_frame = tk.Frame(settings_frame)
|
||
detection_frame.grid(row=1, column=0, pady=5, sticky="ew")
|
||
detection_frame.columnconfigure(1, weight=1)
|
||
|
||
tk.Label(detection_frame, text="Detection Mode:", font=("Arial", 9, "bold")).grid(row=0, column=0, sticky="w")
|
||
|
||
self.detection_mode_var = tk.StringVar(value="loud")
|
||
self.detection_display_var = tk.StringVar(value="🔊 Loud Moments")
|
||
|
||
detection_dropdown = ttk.Combobox(detection_frame, textvariable=self.detection_display_var,
|
||
values=["🔊 Loud Moments", "🎬 Scene Changes", "🏃 Motion Intensity",
|
||
"😄 Emotional Speech", "🎵 Audio Peaks", "🎯 Smart Combined"],
|
||
state="readonly", width=22)
|
||
detection_dropdown.grid(row=0, column=1, sticky="e")
|
||
|
||
# Store the mapping between display text and internal values
|
||
self.mode_mapping = {
|
||
"🔊 Loud Moments": "loud",
|
||
"🎬 Scene Changes": "scene",
|
||
"🏃 Motion Intensity": "motion",
|
||
"😄 Emotional Speech": "speech",
|
||
"🎵 Audio Peaks": "peaks",
|
||
"🎯 Smart Combined": "combined"
|
||
}
|
||
|
||
# Simple, clear descriptions for mode tooltips
|
||
mode_descriptions = {
|
||
"🔊 Loud Moments": """Analyzes audio volume levels to find the loudest parts of your video.
|
||
|
||
• Best for: Gaming reactions, music highlights, shouting moments
|
||
• Finds: High-volume audio segments above the threshold
|
||
• Ideal when: Your video has clear volume differences
|
||
• Tip: Adjust threshold if too many/few moments found""",
|
||
|
||
"🎬 Scene Changes": """Detects dramatic visual transitions and cuts in your video.
|
||
|
||
• Best for: Movie trailers, montages, location changes
|
||
• Finds: Major visual shifts between frames
|
||
• Ideal when: Video has multiple scenes or camera angles
|
||
• Tip: Great for content with quick cuts or transitions""",
|
||
|
||
"🏃 Motion Intensity": """Analyzes movement and action within video frames.
|
||
|
||
• Best for: Sports highlights, dance videos, action scenes
|
||
• Finds: High-movement moments with lots of visual activity
|
||
• Ideal when: Video contains physical action or movement
|
||
• Tip: Perfect for extracting the most dynamic moments""",
|
||
|
||
"😄 Emotional Speech": """Uses AI to detect excited, emotional, or emphatic speech patterns.
|
||
|
||
• Best for: Reactions, reviews, commentary, tutorials
|
||
• Finds: Words like 'wow', 'amazing', exclamations, excited tone
|
||
• Ideal when: Video has spoken content with emotional moments
|
||
• Tip: Captures the most engaging verbal reactions""",
|
||
|
||
"🎵 Audio Peaks": """Detects sudden audio spikes like bass drops, impacts, or sound effects.
|
||
|
||
• Best for: Music videos, sound effect moments, beat drops
|
||
• Finds: Sharp increases in audio frequency or volume
|
||
• Ideal when: Video has musical elements or sound effects
|
||
• Tip: Great for rhythm-based or audio-driven content""",
|
||
|
||
"🎯 Smart Combined": """Intelligently combines all detection methods for optimal results.
|
||
|
||
• Best for: Any video type, general content, unsure what to use
|
||
• Finds: Moments scoring high across multiple analysis methods
|
||
• Ideal when: You want the most 'interesting' overall moments
|
||
• Tip: Recommended starting point for most videos"""
|
||
}
|
||
|
||
# Create tooltip for the dropdown (updates when selection changes)
|
||
current_tooltip_text = mode_descriptions["🔊 Loud Moments"] # Default
|
||
dropdown_tooltip = ToolTip(detection_dropdown, current_tooltip_text)
|
||
|
||
# Update tooltip when selection changes
|
||
def on_detection_change(event):
|
||
selection = detection_dropdown.get()
|
||
mode_map = {
|
||
"🔊 Loud Moments": "loud",
|
||
"🎬 Scene Changes": "scene",
|
||
"🏃 Motion Intensity": "motion",
|
||
"😄 Emotional Speech": "speech",
|
||
"🎵 Audio Peaks": "peaks",
|
||
"🎯 Smart Combined": "combined"
|
||
}
|
||
self.detection_mode_var.set(mode_map.get(selection, "loud"))
|
||
|
||
# Update tooltip text for the selected mode
|
||
dropdown_tooltip.text = mode_descriptions.get(selection, "Select a detection mode")
|
||
|
||
# Show/hide threshold setting based on mode
|
||
if selection == "🔊 Loud Moments":
|
||
threshold_frame.grid(row=2, column=0, pady=5, sticky="ew")
|
||
else:
|
||
threshold_frame.grid_remove()
|
||
|
||
detection_dropdown.bind("<<ComboboxSelected>>", on_detection_change)
|
||
|
||
# Audio threshold (only shown for loud moments)
|
||
threshold_frame = tk.Frame(settings_frame)
|
||
threshold_frame.grid(row=2, column=0, pady=5, sticky="ew")
|
||
threshold_frame.columnconfigure(1, weight=1)
|
||
|
||
threshold_label = tk.Label(threshold_frame, text="Audio Threshold (dB):")
|
||
threshold_label.grid(row=0, column=0, sticky="w")
|
||
self.threshold_var = tk.IntVar(value=-30)
|
||
threshold_spinbox = tk.Spinbox(threshold_frame, from_=-50, to=0, width=5, textvariable=self.threshold_var)
|
||
threshold_spinbox.grid(row=0, column=2, sticky="e")
|
||
|
||
# Add tooltip for threshold setting
|
||
threshold_tooltip_text = """Audio Threshold Control:
|
||
|
||
• Higher values (closer to 0): Only very loud moments
|
||
• Lower values (closer to -50): More moments detected
|
||
• Default -30 dB: Good balance for most videos
|
||
• Adjust based on your video's audio levels
|
||
|
||
Example: Gaming videos might need -20 dB, quiet vlogs might need -40 dB"""
|
||
ToolTip(threshold_spinbox, threshold_tooltip_text, side='right')
|
||
|
||
# Clip duration (increased to 120 seconds max)
|
||
duration_frame = tk.Frame(settings_frame)
|
||
duration_frame.grid(row=3, column=0, pady=5, sticky="ew")
|
||
duration_frame.columnconfigure(1, weight=1)
|
||
|
||
duration_label = tk.Label(duration_frame, text="Clip Duration (seconds):")
|
||
duration_label.grid(row=0, column=0, sticky="w")
|
||
self.duration_var = tk.IntVar(value=5)
|
||
duration_spinbox = tk.Spinbox(duration_frame, from_=3, to=120, width=5, textvariable=self.duration_var)
|
||
duration_spinbox.grid(row=0, column=2, sticky="e")
|
||
|
||
# Add tooltip for duration setting
|
||
duration_tooltip_text = """Clip Duration Setting:
|
||
|
||
• 3-10 seconds: Perfect for TikTok/Instagram Reels
|
||
• 10-30 seconds: Good for YouTube Shorts
|
||
• 30-60 seconds: Longer form highlights
|
||
• 60+ seconds: Extended content clips
|
||
|
||
Shorter clips = more viral potential
|
||
Longer clips = more context and story"""
|
||
ToolTip(duration_spinbox, duration_tooltip_text, side='right')
|
||
|
||
# Preview button
|
||
self.preview_btn = tk.Button(scrollable_frame, text="🔍 Preview Clips",
|
||
command=self.preview_clips, bg="#2196F3", fg="white",
|
||
font=("Arial", 10, "bold"), pady=5)
|
||
self.preview_btn.grid(row=4, column=0, pady=5, sticky="ew")
|
||
|
||
# Add tooltip for preview button
|
||
preview_tooltip_text = """Preview Clips Feature:
|
||
|
||
• Analyzes your video using the selected detection mode
|
||
• Shows all detected moments with timestamps
|
||
• Lets you select specific clips to generate
|
||
• No video files created - just analysis
|
||
• Great for testing settings before full generation
|
||
|
||
Tip: Always preview first to see what the AI finds!"""
|
||
ToolTip(self.preview_btn, preview_tooltip_text, side='right')
|
||
|
||
# Generate button
|
||
self.generate_btn = tk.Button(scrollable_frame, text="🎬 Generate Shorts",
|
||
command=self.start_generation, bg="#4CAF50", fg="white",
|
||
font=("Arial", 12, "bold"), pady=10)
|
||
self.generate_btn.grid(row=5, column=0, pady=10, sticky="ew")
|
||
|
||
# Add tooltip for generate button
|
||
generate_tooltip_text = """Generate Shorts Feature:
|
||
|
||
• Creates actual video files from detected moments
|
||
• Adds AI-generated subtitles to each clip
|
||
• Formats videos for vertical social media (1080x1920)
|
||
• Saves clips to your selected output folder
|
||
• Takes longer but creates ready-to-post content
|
||
|
||
Tip: Use Preview first to fine-tune your settings!"""
|
||
ToolTip(self.generate_btn, generate_tooltip_text, side='right')
|
||
|
||
# Edit Shorts button
|
||
self.edit_btn = tk.Button(scrollable_frame, text="✏️ Edit Generated Shorts",
|
||
command=self.open_shorts_editor, bg="#FF9800", fg="white",
|
||
font=("Arial", 11, "bold"), pady=8)
|
||
self.edit_btn.grid(row=6, column=0, pady=5, sticky="ew")
|
||
|
||
# Add tooltip for edit button
|
||
edit_tooltip_text = """Professional Shorts Editor:
|
||
|
||
• Select any generated short for editing
|
||
• Trim, speed up/slow down videos
|
||
• Add fade in/out effects
|
||
• Adjust volume levels
|
||
• Resize and crop videos
|
||
• Add custom text overlays
|
||
• Real-time preview and professional tools
|
||
|
||
Transform your shorts into perfect content!"""
|
||
ToolTip(self.edit_btn, edit_tooltip_text, side='right')
|
||
|
||
# Thumbnail Editor button
|
||
self.thumbnail_btn = tk.Button(scrollable_frame, text="📸 Create Thumbnails",
|
||
command=self.open_thumbnail_editor, bg="#9C27B0", fg="white",
|
||
font=("Arial", 11, "bold"), pady=8)
|
||
self.thumbnail_btn.grid(row=7, column=0, pady=5, sticky="ew")
|
||
|
||
# Add tooltip for thumbnail button
|
||
thumbnail_tooltip_text = """Professional Thumbnail Editor:
|
||
|
||
• Select any video to create custom thumbnails
|
||
• Choose the perfect frame with timeline slider
|
||
• Add text overlays with custom fonts and colors
|
||
• Add stickers and emojis for eye-catching designs
|
||
• Drag and drop positioning
|
||
• High-quality export (JPEG/PNG)
|
||
• Perfect for YouTube, TikTok, Instagram
|
||
|
||
Create thumbnails that get clicks!"""
|
||
ToolTip(self.thumbnail_btn, thumbnail_tooltip_text, side='right')
|
||
|
||
# Progress frame
|
||
progress_frame = tk.Frame(scrollable_frame)
|
||
progress_frame.grid(row=8, column=0, pady=5, sticky="ew")
|
||
progress_frame.columnconfigure(0, weight=1)
|
||
|
||
self.progress_label = tk.Label(progress_frame, text="Ready to generate shorts")
|
||
self.progress_label.grid(row=0, column=0, sticky="ew")
|
||
|
||
self.progress_bar = ttk.Progressbar(progress_frame, length=400, mode="determinate")
|
||
self.progress_bar.grid(row=1, column=0, pady=3, sticky="ew")
|
||
|
||
# Detection progress (initially hidden)
|
||
self.detection_progress_label = tk.Label(progress_frame, text="", font=("Arial", 9), fg="gray")
|
||
self.detection_progress_label.grid(row=2, column=0, sticky="ew")
|
||
|
||
self.detection_progress_bar = ttk.Progressbar(progress_frame, length=400, mode="determinate")
|
||
self.detection_progress_bar.grid(row=3, column=0, pady=(0, 3), sticky="ew")
|
||
|
||
# Initially hide detection progress
|
||
self.detection_progress_label.grid_remove()
|
||
self.detection_progress_bar.grid_remove()
|
||
|
||
# Pack the canvas and scrollbar
|
||
canvas.grid(row=0, column=0, sticky="nsew")
|
||
scrollbar.grid(row=0, column=1, sticky="ns")
|
||
|
||
def on_window_resize(self, event):
|
||
"""Handle window resize events for responsive layout"""
|
||
if event.widget == self.root:
|
||
# Get current window size
|
||
width = self.root.winfo_width()
|
||
|
||
# Adjust progress bar length based on window width
|
||
progress_length = max(300, width - 150)
|
||
try:
|
||
self.progress_bar.config(length=progress_length)
|
||
self.detection_progress_bar.config(length=progress_length)
|
||
except:
|
||
pass
|
||
|
||
def select_video(self):
|
||
file_path = filedialog.askopenfilename(
|
||
title="Select Video File",
|
||
filetypes=[("Video files", "*.mp4 *.mov *.avi *.mkv *.wmv")]
|
||
)
|
||
if file_path:
|
||
self.video_path = file_path
|
||
self.video_label.config(text=os.path.basename(file_path))
|
||
|
||
def select_output_folder(self):
|
||
folder_path = filedialog.askdirectory(title="Select Output Folder")
|
||
if folder_path:
|
||
self.output_folder = folder_path
|
||
self.output_label.config(text=folder_path)
|
||
|
||
def preview_clips(self):
|
||
if not self.video_path:
|
||
messagebox.showwarning("Warning", "Please select a video file first!")
|
||
return
|
||
|
||
try:
|
||
# Validate video first
|
||
validate_video(self.video_path, min_duration=self.duration_var.get() * 2)
|
||
|
||
# Analyze using selected detection mode
|
||
self.preview_btn.config(state="disabled", text="Analyzing...")
|
||
self.root.update()
|
||
|
||
detection_mode = self.detection_mode_var.get()
|
||
|
||
if detection_mode == "loud":
|
||
moments = detect_loud_moments(
|
||
self.video_path,
|
||
chunk_duration=self.duration_var.get(),
|
||
threshold_db=self.threshold_var.get()
|
||
)
|
||
mode_name = "loud moments"
|
||
elif detection_mode == "scene":
|
||
moments = detect_scene_changes(self.video_path, chunk_duration=self.duration_var.get())
|
||
mode_name = "scene changes"
|
||
elif detection_mode == "motion":
|
||
moments = detect_motion_intensity(self.video_path, chunk_duration=self.duration_var.get())
|
||
mode_name = "motion moments"
|
||
elif detection_mode == "speech":
|
||
moments = detect_speech_emotion(self.video_path, chunk_duration=self.duration_var.get())
|
||
mode_name = "emotional speech"
|
||
elif detection_mode == "peaks":
|
||
moments = detect_audio_peaks(self.video_path, chunk_duration=self.duration_var.get())
|
||
mode_name = "audio peaks"
|
||
elif detection_mode == "combined":
|
||
moments = detect_combined_intensity(self.video_path, chunk_duration=self.duration_var.get())
|
||
mode_name = "interesting moments"
|
||
else:
|
||
moments = detect_loud_moments(
|
||
self.video_path,
|
||
chunk_duration=self.duration_var.get(),
|
||
threshold_db=self.threshold_var.get()
|
||
)
|
||
mode_name = "loud moments"
|
||
|
||
if not moments:
|
||
messagebox.showinfo("Preview", f"No {mode_name} found.\nTry a different detection mode or adjust settings.")
|
||
return
|
||
|
||
# Show preview window
|
||
preview_window = tk.Toplevel(self.root)
|
||
preview_window.title("Preview and Select Clips")
|
||
preview_window.geometry("500x400")
|
||
|
||
tk.Label(preview_window, text=f"Found {len(moments)} {mode_name}:", font=("Arial", 12, "bold")).pack(pady=10)
|
||
|
||
# Create scrollable frame for checkboxes
|
||
canvas = tk.Canvas(preview_window)
|
||
scrollbar = tk.Scrollbar(preview_window, orient="vertical", command=canvas.yview)
|
||
scrollable_frame = tk.Frame(canvas)
|
||
|
||
scrollable_frame.bind(
|
||
"<Configure>",
|
||
lambda e: canvas.configure(scrollregion=canvas.bbox("all"))
|
||
)
|
||
|
||
canvas.create_window((0, 0), window=scrollable_frame, anchor="nw")
|
||
canvas.configure(yscrollcommand=scrollbar.set)
|
||
|
||
# Store checkbox variables and clip data
|
||
self.clip_vars = []
|
||
# Use all clips if max clips is disabled, otherwise limit by setting
|
||
clips_to_show = moments if not self.use_max_clips.get() else moments[:self.clips_var.get()]
|
||
self.preview_clips_data = clips_to_show
|
||
|
||
# Add selectable clips with checkboxes
|
||
for i, (start, end) in enumerate(self.preview_clips_data, 1):
|
||
duration = end - start
|
||
time_str = f"Clip {i}: {start//60:02.0f}:{start%60:05.2f} - {end//60:02.0f}:{end%60:05.2f} ({duration:.1f}s)"
|
||
|
||
clip_var = tk.BooleanVar(value=True) # Default selected
|
||
self.clip_vars.append(clip_var)
|
||
|
||
clip_frame = tk.Frame(scrollable_frame)
|
||
clip_frame.pack(fill="x", padx=10, pady=2)
|
||
|
||
checkbox = tk.Checkbutton(clip_frame, variable=clip_var, text=time_str,
|
||
font=("Courier", 10), anchor="w")
|
||
checkbox.pack(fill="x")
|
||
|
||
canvas.pack(side="left", fill="both", expand=True, padx=10, pady=5)
|
||
scrollbar.pack(side="right", fill="y")
|
||
|
||
# Button frame
|
||
button_frame = tk.Frame(preview_window)
|
||
button_frame.pack(fill="x", padx=10, pady=10)
|
||
|
||
# Select/Deselect all buttons
|
||
control_frame = tk.Frame(button_frame)
|
||
control_frame.pack(fill="x", pady=5)
|
||
|
||
tk.Button(control_frame, text="Select All",
|
||
command=lambda: [var.set(True) for var in self.clip_vars]).pack(side="left", padx=5)
|
||
tk.Button(control_frame, text="Deselect All",
|
||
command=lambda: [var.set(False) for var in self.clip_vars]).pack(side="left", padx=5)
|
||
|
||
# Generate selected clips button (fixed size for full text visibility)
|
||
generate_selected_btn = tk.Button(button_frame, text="🎬 Generate Selected Clips",
|
||
command=lambda: self.generate_selected_clips(preview_window),
|
||
bg="#4CAF50", fg="white", font=("Arial", 11, "bold"),
|
||
pady=8, width=25)
|
||
generate_selected_btn.pack(fill="x", pady=5)
|
||
|
||
# Close button
|
||
tk.Button(button_frame, text="Close", command=preview_window.destroy).pack(pady=5)
|
||
|
||
except Exception as e:
|
||
messagebox.showerror("Preview Error", f"Error analyzing video: {str(e)}")
|
||
finally:
|
||
self.preview_btn.config(state="normal", text="🔍 Preview Clips")
|
||
|
||
def generate_selected_clips(self, preview_window):
|
||
"""Generate only the selected clips from preview"""
|
||
try:
|
||
# Get selected clips
|
||
selected_clips = []
|
||
for i, (clip_var, clip_data) in enumerate(zip(self.clip_vars, self.preview_clips_data)):
|
||
if clip_var.get():
|
||
selected_clips.append((i+1, clip_data)) # (clip_number, (start, end))
|
||
|
||
if not selected_clips:
|
||
messagebox.showwarning("Warning", "Please select at least one clip to generate!")
|
||
return
|
||
|
||
# Close preview window
|
||
preview_window.destroy()
|
||
|
||
# Show confirmation
|
||
clip_count = len(selected_clips)
|
||
clip_numbers = [str(num) for num, _ in selected_clips]
|
||
confirm_msg = f"Generate {clip_count} selected clips (#{', #'.join(clip_numbers)})?"
|
||
|
||
if not messagebox.askyesno("Confirm Generation", confirm_msg):
|
||
return
|
||
|
||
# Start generation in background thread
|
||
self.selected_clips_data = [clip_data for _, clip_data in selected_clips]
|
||
self.generate_btn.config(state="disabled", text="Generating Selected...")
|
||
thread = threading.Thread(target=self.selected_generation_worker)
|
||
thread.daemon = True
|
||
thread.start()
|
||
|
||
except Exception as e:
|
||
messagebox.showerror("Generation Error", f"Error starting generation: {str(e)}")
|
||
|
||
def selected_generation_worker(self):
|
||
"""Generate only selected clips"""
|
||
try:
|
||
# Check available disk space
|
||
import shutil
|
||
free_space_gb = shutil.disk_usage(self.output_folder)[2] / (1024**3)
|
||
if free_space_gb < 1:
|
||
raise RuntimeError(f"Insufficient disk space. Only {free_space_gb:.1f} GB available. Need at least 1 GB.")
|
||
|
||
# Validate video first
|
||
try:
|
||
video_duration = validate_video(self.video_path, min_duration=self.duration_var.get() * 2)
|
||
self.update_progress(f"✅ Video validated ({video_duration:.1f}s)", 5)
|
||
except Exception as e:
|
||
self.update_progress(f"❌ Video validation failed", 0)
|
||
raise e
|
||
|
||
os.makedirs(self.output_folder, exist_ok=True)
|
||
|
||
selected_count = len(self.selected_clips_data)
|
||
self.update_progress(f"📊 Generating {selected_count} selected clips", 10)
|
||
|
||
for i, (start, end) in enumerate(self.selected_clips_data):
|
||
self.update_progress(f"🗣️ Transcribing clip {i+1}/{selected_count}", 20 + (i * 30))
|
||
|
||
subtitles = transcribe_and_extract_subtitles(self.video_path, start, end)
|
||
out_path = os.path.join(self.output_folder, f"short_{i+1}.mp4")
|
||
|
||
self.update_progress(f"🎬 Creating video {i+1}/{selected_count}", 40 + (i * 30))
|
||
|
||
create_short_clip(self.video_path, start, end, subtitles, out_path)
|
||
|
||
self.update_progress("✅ Selected clips generated successfully!", 100)
|
||
messagebox.showinfo("Success", f"Successfully generated {selected_count} selected clips in '{self.output_folder}' folder!")
|
||
|
||
except FileNotFoundError as e:
|
||
messagebox.showerror("File Error", str(e))
|
||
except ValueError as e:
|
||
messagebox.showerror("Video Error", str(e))
|
||
except RuntimeError as e:
|
||
messagebox.showerror("System Error", str(e))
|
||
except Exception as e:
|
||
messagebox.showerror("Error", f"An unexpected error occurred: {str(e)}")
|
||
finally:
|
||
self.generate_btn.config(state="normal", text="🎬 Generate Shorts")
|
||
self.progress_bar["value"] = 0
|
||
self.progress_label.config(text="Ready to generate shorts")
|
||
|
||
def update_progress(self, message, percent):
|
||
self.progress_label.config(text=message)
|
||
self.progress_bar["value"] = percent
|
||
self.root.update()
|
||
|
||
def show_detection_progress(self):
|
||
"""Show the detection progress bar"""
|
||
self.detection_progress_label.pack(after=self.progress_bar)
|
||
self.detection_progress_bar.pack(after=self.detection_progress_label, pady=(0, 3))
|
||
self.root.update_idletasks()
|
||
|
||
def hide_detection_progress(self):
|
||
"""Hide the detection progress bar"""
|
||
self.detection_progress_label.pack_forget()
|
||
self.detection_progress_bar.pack_forget()
|
||
self.root.update_idletasks()
|
||
|
||
def update_detection_progress(self, message, percent):
|
||
"""Update detection progress bar and message"""
|
||
self.detection_progress_label.config(text=message)
|
||
self.detection_progress_bar["value"] = percent
|
||
self.root.update_idletasks()
|
||
|
||
def generation_worker(self):
|
||
try:
|
||
# Check available disk space
|
||
import shutil
|
||
free_space_gb = shutil.disk_usage(self.output_folder)[2] / (1024**3)
|
||
if free_space_gb < 1:
|
||
raise RuntimeError(f"Insufficient disk space. Only {free_space_gb:.1f} GB available. Need at least 1 GB.")
|
||
|
||
# Show detection progress for heavy modes
|
||
detection_mode = self.detection_mode_var.get()
|
||
if detection_mode in ["scene", "motion", "speech", "peaks", "combined"]:
|
||
self.show_detection_progress()
|
||
|
||
def detailed_progress_callback(status, percent):
|
||
# Update main progress
|
||
self.update_progress(status, percent)
|
||
|
||
def detection_progress_callback(detection_percent, detection_status):
|
||
# Update detection progress bar
|
||
self.update_detection_progress(detection_status, detection_percent)
|
||
|
||
# Pass both callbacks to generate_shorts
|
||
generate_shorts(
|
||
self.video_path,
|
||
max_clips=self.clips_var.get() if self.use_max_clips.get() else 10,
|
||
output_folder=self.output_folder,
|
||
progress_callback=detailed_progress_callback,
|
||
detection_progress_callback=detection_progress_callback,
|
||
threshold_db=self.threshold_var.get(),
|
||
clip_duration=self.duration_var.get(),
|
||
detection_mode=detection_mode
|
||
)
|
||
else:
|
||
# Use regular progress for loud moments mode
|
||
generate_shorts(
|
||
self.video_path,
|
||
max_clips=self.clips_var.get() if self.use_max_clips.get() else 10,
|
||
output_folder=self.output_folder,
|
||
progress_callback=self.update_progress,
|
||
threshold_db=self.threshold_var.get(),
|
||
clip_duration=self.duration_var.get(),
|
||
detection_mode=detection_mode
|
||
)
|
||
|
||
messagebox.showinfo("Success", f"Successfully generated shorts in '{self.output_folder}' folder!")
|
||
except FileNotFoundError as e:
|
||
messagebox.showerror("File Error", str(e))
|
||
except ValueError as e:
|
||
messagebox.showerror("Video Error", str(e))
|
||
except RuntimeError as e:
|
||
messagebox.showerror("System Error", str(e))
|
||
except Exception as e:
|
||
messagebox.showerror("Error", f"An unexpected error occurred: {str(e)}")
|
||
finally:
|
||
self.hide_detection_progress()
|
||
self.generate_btn.config(state="normal", text="🎬 Generate Shorts")
|
||
self.progress_bar["value"] = 0
|
||
self.progress_label.config(text="Ready to generate shorts")
|
||
|
||
def start_generation(self):
|
||
if not self.video_path:
|
||
messagebox.showwarning("Warning", "Please select a video file first!")
|
||
return
|
||
|
||
self.generate_btn.config(state="disabled", text="Generating...")
|
||
thread = threading.Thread(target=self.generation_worker)
|
||
thread.daemon = True
|
||
thread.start()
|
||
|
||
def open_shorts_editor(self):
|
||
"""Open the professional shorts editor"""
|
||
editor = ShortsEditorGUI(self.root, self.output_folder)
|
||
editor.open_editor()
|
||
|
||
def open_thumbnail_editor(self):
|
||
"""Open the professional thumbnail editor"""
|
||
# Import the thumbnail editor
|
||
try:
|
||
import subprocess
|
||
import sys
|
||
|
||
# Check if there are any video files to work with
|
||
video_files = []
|
||
|
||
# Check for original video
|
||
if self.video_path:
|
||
video_files.append(("Original Video", self.video_path))
|
||
|
||
# Check for generated shorts
|
||
if os.path.exists(self.output_folder):
|
||
import glob
|
||
shorts = glob.glob(os.path.join(self.output_folder, "*.mp4"))
|
||
for short in shorts:
|
||
video_files.append((os.path.basename(short), short))
|
||
|
||
if not video_files:
|
||
messagebox.showinfo("No Videos Found",
|
||
"Please select a video or generate some shorts first!")
|
||
return
|
||
|
||
# If only one video, open it directly
|
||
if len(video_files) == 1:
|
||
selected_video = video_files[0][1]
|
||
else:
|
||
# Let user choose which video to edit
|
||
choice_window = tk.Toplevel(self.root)
|
||
choice_window.title("Select Video for Thumbnail")
|
||
choice_window.geometry("400x300")
|
||
choice_window.transient(self.root)
|
||
choice_window.grab_set()
|
||
|
||
tk.Label(choice_window, text="📸 Select Video for Thumbnail Creation",
|
||
font=("Arial", 12, "bold")).pack(pady=10)
|
||
|
||
selected_video = None
|
||
|
||
def on_video_select(video_path):
|
||
nonlocal selected_video
|
||
selected_video = video_path
|
||
choice_window.destroy()
|
||
|
||
# Create list of videos
|
||
for display_name, video_path in video_files:
|
||
btn = tk.Button(choice_window, text=f"📹 {display_name}",
|
||
command=lambda vp=video_path: on_video_select(vp),
|
||
font=("Arial", 10), pady=5, width=40)
|
||
btn.pack(pady=2, padx=20, fill="x")
|
||
|
||
tk.Button(choice_window, text="Cancel",
|
||
command=choice_window.destroy).pack(pady=10)
|
||
|
||
# Wait for selection
|
||
choice_window.wait_window()
|
||
|
||
if not selected_video:
|
||
return
|
||
|
||
# Import and open thumbnail editor
|
||
from thumbnail_editor import open_thumbnail_editor
|
||
open_thumbnail_editor(selected_video)
|
||
|
||
except ImportError as e:
|
||
messagebox.showerror("Thumbnail Editor Error",
|
||
f"Could not load thumbnail editor:\n{str(e)}\n\nMake sure thumbnail_editor.py is in the same folder.")
|
||
except Exception as e:
|
||
messagebox.showerror("Error", f"Failed to open thumbnail editor:\n{str(e)}")
|
||
|
||
def run_gui():
|
||
root = tk.Tk()
|
||
app = ShortsGeneratorGUI(root)
|
||
root.mainloop()
|
||
|
||
if __name__ == "__main__":
|
||
import sys
|
||
if len(sys.argv) > 1 and sys.argv[1] != "--gui":
|
||
# Run command line mode
|
||
try:
|
||
generate_shorts(sys.argv[1])
|
||
print("✅ Shorts generation completed successfully!")
|
||
except Exception as e:
|
||
print(f"❌ Error: {str(e)}")
|
||
else:
|
||
# Run GUI mode (default)
|
||
run_gui()
|
||
|