From ae5a287aeb4b73e1dae6b1c28186f9920650716e Mon Sep 17 00:00:00 2001 From: klop51 Date: Sat, 9 Aug 2025 21:45:39 +0200 Subject: [PATCH] Add processing preview with progress tracking to the main application - Implemented a new feature to show a processing preview for clips. - Added a progress bar, elapsed time, and remaining time indicators. - Utilized threading to keep the main application responsive during processing. - Included buttons for previewing clips, opening videos, and exiting the application. --- Main.py | 52 ++ shorts_generator2.py | 1668 +++++++++++++++++++++++++++++++++++++++--- 2 files changed, 1618 insertions(+), 102 deletions(-) create mode 100644 Main.py diff --git a/Main.py b/Main.py new file mode 100644 index 0000000..0348d65 --- /dev/null +++ b/Main.py @@ -0,0 +1,52 @@ +import tkinter as tk +from tkinter import ttk, filedialog +import threading +import time + +# Example function that simulates clip processing with time tracking +def process_clips(progress_var, progress_bar, status_label, time_label, total_steps=10): + start_time = time.time() + for i in range(total_steps): + time.sleep(0.5) # Simulate processing time + elapsed = time.time() - start_time + remaining = (elapsed / (i+1)) * (total_steps - (i+1)) + progress_var.set((i+1) * 100 / total_steps) + status_label.config(text=f"Processing... {i+1}/{total_steps}") + time_label.config(text=f"Elapsed: {elapsed:.1f}s | Remaining: {remaining:.1f}s") + status_label.config(text="Done!") + time_label.config(text=f"Total Time: {time.time() - start_time:.1f}s") + +def start_preview_with_progress(): + progress_window = tk.Toplevel(root) + progress_window.title("Processing Preview") + progress_window.geometry("320x130") + progress_window.resizable(False, False) + + status_label = tk.Label(progress_window, text="Initializing...", anchor="w") + status_label.pack(fill="x", padx=10, pady=(10,0)) + + time_label = tk.Label(progress_window, text="Elapsed: 0.0s | Remaining: --s", anchor="w") + time_label.pack(fill="x", padx=10) + + progress_var = tk.DoubleVar() + progress_bar = ttk.Progressbar(progress_window, variable=progress_var, maximum=100) + progress_bar.pack(fill="x", padx=10, pady=10) + + threading.Thread(target=process_clips, args=(progress_var, progress_bar, status_label, time_label), daemon=True).start() + +# Main Tkinter app remains responsive with all buttons available +root = tk.Tk() +root.title("Shorts Generator 2") +root.geometry("400x200") + +preview_button = tk.Button(root, text="Preview Clips", command=start_preview_with_progress) +preview_button.pack(pady=10) + +# Example of other functional buttons remaining available +open_button = tk.Button(root, text="Open Video", command=lambda: filedialog.askopenfilename()) +open_button.pack(pady=10) + +exit_button = tk.Button(root, text="Exit", command=root.quit) +exit_button.pack(pady=10) + +root.mainloop() diff --git a/shorts_generator2.py b/shorts_generator2.py index 6827e1f..faca354 100644 --- a/shorts_generator2.py +++ b/shorts_generator2.py @@ -13,6 +13,9 @@ import librosa import glob import json from datetime import datetime +from PIL import Image, ImageTk +from PIL import ImageDraw, ImageFont +import time class ToolTip: """Create a tooltip for a given widget""" @@ -187,10 +190,14 @@ def detect_scene_changes(video_path, chunk_duration=5, threshold=0.3): times = np.arange(0, clip.duration, sample_rate) scene_changes = [] - prev_frame = None + total_frames = len(times) - 1 for i, t in enumerate(times[:-1]): try: + # Periodic progress output + if i % 10 == 0: + print(f"🎬 Processing frame {i+1}/{total_frames}...") + # Get current and next frame frame1 = clip.get_frame(t) frame2 = clip.get_frame(times[i + 1]) @@ -217,6 +224,476 @@ def detect_scene_changes(video_path, chunk_duration=5, threshold=0.3): clip.close() return scene_changes +def detect_motion_intensity(video_path, chunk_duration=5, threshold=20000000): + """Detect high motion/action scenes""" + print("πŸƒ Analyzing motion intensity...") + clip = VideoFileClip(video_path) + + sample_rate = 1 # Check every second + times = np.arange(0, clip.duration - 1, sample_rate) + + motion_scenes = [] + total_frames = len(times) + + for i, t in enumerate(times): + try: + # Periodic progress output + if i % 20 == 0: + print(f"πŸƒ Processing frame {i+1}/{total_frames}...") + + # Get consecutive frames + frame1 = clip.get_frame(t) + frame2 = clip.get_frame(t + 1) + + # Convert to grayscale and resize + gray1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY) + gray2 = cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY) + gray1 = cv2.resize(gray1, (320, 180)) + gray2 = cv2.resize(gray2, (320, 180)) + + # Calculate optical flow magnitude + flow = cv2.calcOpticalFlowPyrLK(gray1, gray2, + np.random.randint(0, 320, (100, 1, 2)).astype(np.float32), + None)[0] + + if flow is not None: + motion_magnitude = np.sum(np.linalg.norm(flow.reshape(-1, 2), axis=1)) + + if motion_magnitude > threshold: + start = max(0, t - chunk_duration/2) + end = min(clip.duration, t + chunk_duration/2) + motion_scenes.append((start, end)) + + except Exception as e: + print(f"⚠️ Motion analysis error at {t:.1f}s: {e}") + continue + + print(f"πŸƒ Found {len(motion_scenes)} motion scenes") + clip.close() + return motion_scenes + +def detect_speech_emotion(video_path, chunk_duration=5): + """Detect emotional speech segments using faster_whisper""" + print("πŸ—£οΈ Analyzing speech emotion...") + + try: + # Load Whisper model for speech detection + model = WhisperModel("base", device="cpu", compute_type="int8") + + # Extract audio temporarily + temp_audio = "temp_audio.wav" + clip = VideoFileClip(video_path) + audio = clip.audio + audio.write_audiofile(temp_audio, verbose=False, logger=None) + + # Transcribe with word-level timestamps + segments, _ = model.transcribe(temp_audio, word_timestamps=True) + + emotional_segments = [] + + for segment in segments: + # Look for emotional indicators in speech patterns + text = segment.text.lower() + + # Check for emotional keywords and speech patterns + emotional_words = ['amazing', 'incredible', 'wow', 'unbelievable', 'shocking', + 'fantastic', 'awesome', 'terrible', 'horrible', 'beautiful'] + + has_emotion = any(word in text for word in emotional_words) + has_exclamation = '!' in segment.text + is_question = '?' in segment.text + + if has_emotion or has_exclamation or is_question: + start = max(0, segment.start - chunk_duration/2) + end = min(clip.duration, segment.end + chunk_duration/2) + emotional_segments.append((start, end)) + + # Clean up + audio.close() + clip.close() + if os.path.exists(temp_audio): + os.remove(temp_audio) + + print(f"πŸ—£οΈ Found {len(emotional_segments)} emotional speech segments") + return emotional_segments + + except Exception as e: + print(f"⚠️ Speech analysis error: {e}") + return [] + +def detect_audio_peaks(video_path, chunk_duration=5): + """Detect audio frequency peaks and interesting sounds""" + print("🎡 Analyzing audio peaks...") + + try: + # Extract audio + clip = VideoFileClip(video_path) + audio = clip.audio + + # Convert to numpy array + temp_audio = "temp_peak_audio.wav" + audio.write_audiofile(temp_audio, verbose=False, logger=None) + + # Load with librosa + y, sr = librosa.load(temp_audio) + + # Analyze spectral features + hop_length = 512 + frame_length = 2048 + + # Calculate spectral centroid (brightness) + spectral_centroids = librosa.feature.spectral_centroid(y=y, sr=sr, hop_length=hop_length)[0] + + # Calculate RMS energy + rms = librosa.feature.rms(y=y, hop_length=hop_length)[0] + + # Find frames with high spectral activity + time_frames = librosa.frames_to_time(np.arange(len(spectral_centroids)), sr=sr, hop_length=hop_length) + + peak_segments = [] + + # Threshold for interesting audio + centroid_threshold = np.percentile(spectral_centroids, 85) + rms_threshold = np.percentile(rms, 80) + + for i, (time, centroid, energy) in enumerate(zip(time_frames, spectral_centroids, rms)): + if centroid > centroid_threshold and energy > rms_threshold: + start = max(0, time - chunk_duration/2) + end = min(clip.duration, time + chunk_duration/2) + peak_segments.append((start, end)) + + # Clean up + audio.close() + clip.close() + if os.path.exists(temp_audio): + os.remove(temp_audio) + + print(f"🎡 Found {len(peak_segments)} audio peak segments") + return peak_segments + + except Exception as e: + print(f"⚠️ Audio analysis error: {e}") + return [] + +def detect_combined_moments(video_path, chunk_duration=5): + """Combine multiple detection methods for best results""" + print("🎯 Running combined analysis...") + + try: + # Run multiple detection methods + loud_moments = detect_loud_moments(video_path, chunk_duration) + scene_changes = detect_scene_changes(video_path, chunk_duration) + + # Combine and deduplicate + all_moments = loud_moments + scene_changes + + # Simple deduplication by merging overlapping segments + if not all_moments: + return [] + + # Sort by start time + all_moments.sort(key=lambda x: x[0]) + + # Merge overlapping segments + merged = [all_moments[0]] + for start, end in all_moments[1:]: + last_start, last_end = merged[-1] + if start <= last_end + 1: # Allow 1 second gap + merged[-1] = (last_start, max(last_end, end)) + else: + merged.append((start, end)) + + print(f"🎯 Combined analysis found {len(merged)} interesting moments") + return merged + + except Exception as e: + print(f"⚠️ Combined analysis error: {e}") + return [] + +def detect_scene_changes_with_progress(video_path, chunk_duration=5, threshold=0.3, progress_callback=None): + """Detect dramatic visual scene changes with progress updates""" + print("🎬 Analyzing scene changes...") + clip = VideoFileClip(video_path) + + # Sample frames at regular intervals + sample_rate = 2 # Check every 2 seconds + times = np.arange(0, clip.duration, sample_rate) + + scene_changes = [] + prev_frame = None + total_frames = len(times) - 1 + + for i, t in enumerate(times[:-1]): + try: + # Update progress every few frames + if progress_callback and i % 5 == 0: + progress = (i / total_frames) * 100 + progress_callback(progress, f"🎬 Analyzing scene changes... Frame {i+1}/{total_frames}") + + # Get current and next frame + frame1 = clip.get_frame(t) + frame2 = clip.get_frame(times[i + 1]) + + # Convert to grayscale and resize for faster processing + gray1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY) + gray2 = cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY) + gray1 = cv2.resize(gray1, (160, 90)) # Small size for speed + gray2 = cv2.resize(gray2, (160, 90)) + + # Calculate structural similarity difference + diff = np.mean(np.abs(gray1.astype(float) - gray2.astype(float))) / 255.0 + + if diff > threshold: + start = max(0, t - chunk_duration/2) + end = min(clip.duration, t + chunk_duration/2) + scene_changes.append((start, end)) + + except Exception as e: + print(f"⚠️ Frame analysis error at {t:.1f}s: {e}") + continue + + if progress_callback: + progress_callback(100, f"🎬 Found {len(scene_changes)} scene changes") + + print(f"🎬 Found {len(scene_changes)} scene changes") + clip.close() + return scene_changes + +def detect_motion_intensity_with_progress(video_path, chunk_duration=5, threshold=0.15, progress_callback=None): + """Detect high motion/action moments with progress updates""" + print("πŸƒ Analyzing motion intensity...") + clip = VideoFileClip(video_path) + + sample_rate = 1 # Check every second + times = np.arange(0, clip.duration - 1, sample_rate) + + motion_moments = [] + + for i, t in enumerate(times): + try: + # Update progress every 10 seconds + if progress_callback and i % 10 == 0: + progress = (i / len(times)) * 100 + progress_callback(progress, f"πŸƒ Analyzing motion... {i+1}/{len(times)} seconds") + + # Get two consecutive frames + frame1 = clip.get_frame(t) + frame2 = clip.get_frame(t + 0.5) # Half second later + + # Convert to grayscale and resize + gray1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY) + gray2 = cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY) + gray1 = cv2.resize(gray1, (160, 90)) + gray2 = cv2.resize(gray2, (160, 90)) + + # Calculate optical flow magnitude + flow = cv2.calcOpticalFlowPyrLK(gray1, gray2, + np.random.randint(0, 160, (100, 1, 2)).astype(np.float32), + None)[0] + + if flow is not None: + motion_magnitude = np.mean(np.linalg.norm(flow.reshape(-1, 2), axis=1)) + + if motion_magnitude > threshold: + start = max(0, t - chunk_duration/2) + end = min(clip.duration, t + chunk_duration/2) + motion_moments.append((start, end)) + + except Exception as e: + print(f"⚠️ Motion analysis error at {t:.1f}s: {e}") + continue + + if progress_callback: + progress_callback(100, f"πŸƒ Found {len(motion_moments)} high-motion moments") + + print(f"πŸƒ Found {len(motion_moments)} high-motion moments") + clip.close() + return motion_moments + +def detect_speech_emotion_with_progress(video_path, chunk_duration=5, progress_callback=None): + """Detect emotional/excited speech patterns with progress updates""" + print("πŸ˜„ Analyzing speech emotions...") + + if progress_callback: + progress_callback(10, "πŸ˜„ Initializing speech recognition...") + + # Use Whisper to get detailed speech analysis + model = WhisperModel("base", device="cpu", compute_type="int8") + + if progress_callback: + progress_callback(30, "πŸ˜„ Transcribing audio...") + + segments, _ = model.transcribe(video_path, beam_size=5, vad_filter=True, word_timestamps=True) + + emotional_moments = [] + excitement_keywords = ['wow', 'amazing', 'incredible', 'unbelievable', 'awesome', 'fantastic', + 'omg', 'what', 'no way', 'crazy', 'insane', 'perfect', 'yes', 'exactly'] + + segments_list = list(segments) + + if progress_callback: + progress_callback(50, f"πŸ˜„ Processing {len(segments_list)} speech segments...") + + for i, segment in enumerate(segments_list): + if progress_callback and i % 10 == 0: + progress = 50 + (i / len(segments_list)) * 50 + progress_callback(progress, f"πŸ˜„ Analyzing speech... {i+1}/{len(segments_list)} segments") + + text = segment.text.lower() + + # Check for excitement keywords + has_keywords = any(keyword in text for keyword in excitement_keywords) + + # Check for multiple exclamation-worthy patterns + has_caps = any(word.isupper() for word in segment.text.split()) + has_punctuation = '!' in segment.text or '?' in segment.text + is_short_excited = len(text.split()) <= 5 and (has_keywords or has_caps) + + if has_keywords or has_punctuation or is_short_excited: + start = max(0, segment.start - chunk_duration/2) + end = min(segment.end + chunk_duration/2, segment.end + chunk_duration) + emotional_moments.append((start, end)) + + if progress_callback: + progress_callback(100, f"πŸ˜„ Found {len(emotional_moments)} emotional speech moments") + + print(f"πŸ˜„ Found {len(emotional_moments)} emotional speech moments") + return emotional_moments + +def detect_audio_peaks_with_progress(video_path, chunk_duration=5, progress_callback=None): + """Detect sudden audio peaks with progress updates""" + print("🎡 Analyzing audio peaks...") + + if progress_callback: + progress_callback(10, "🎡 Loading audio...") + + clip = VideoFileClip(video_path) + audio = clip.audio.to_soundarray(fps=22050) # Lower sample rate for speed + + # Convert to mono if stereo + if len(audio.shape) > 1: + audio = np.mean(audio, axis=1) + + if progress_callback: + progress_callback(40, "🎡 Finding audio peaks...") + + # Find spectral peaks (bass, treble spikes) + peaks, _ = signal.find_peaks(np.abs(audio), height=np.percentile(np.abs(audio), 95)) + + peak_moments = [] + prev_peak = 0 + + if progress_callback: + progress_callback(70, f"🎡 Processing {len(peaks)} peaks...") + + for i, peak in enumerate(peaks): + if progress_callback and i % 1000 == 0: + progress = 70 + (i / len(peaks)) * 30 + progress_callback(progress, f"🎡 Processing peaks... {i}/{len(peaks)}") + + peak_time = peak / 22050 + + # Avoid too close peaks + if peak_time - prev_peak > chunk_duration: + start = max(0, peak_time - chunk_duration/2) + end = min(clip.duration, peak_time + chunk_duration/2) + peak_moments.append((start, end)) + prev_peak = peak_time + + if progress_callback: + progress_callback(100, f"🎡 Found {len(peak_moments)} audio peak moments") + + print(f"🎡 Found {len(peak_moments)} audio peak moments") + clip.close() + return peak_moments + +def detect_combined_intensity_with_progress(video_path, chunk_duration=5, weights=None, progress_callback=None): + """Combine multiple detection methods with progress updates""" + print("🎯 Running comprehensive moment analysis...") + + if weights is None: + weights = {'loud': 0.3, 'scene': 0.2, 'motion': 0.2, 'speech': 0.2, 'peaks': 0.1} + + # Sub-progress callback for each method + def sub_progress(method_weight, base_percent): + def callback(percent, status): + if progress_callback: + total_percent = base_percent + (percent / 100) * method_weight + progress_callback(total_percent, f"🎯 {status}") + return callback + + # Get all detection results with progress + if progress_callback: + progress_callback(5, "🎯 Analyzing loud moments...") + loud_moments = detect_loud_moments(video_path, chunk_duration, threshold_db=5) + + if progress_callback: + progress_callback(15, "🎯 Analyzing scene changes...") + scene_moments = detect_scene_changes_with_progress(video_path, chunk_duration, progress_callback=sub_progress(20, 15)) + + if progress_callback: + progress_callback(35, "🎯 Analyzing motion...") + motion_moments = detect_motion_intensity_with_progress(video_path, chunk_duration, progress_callback=sub_progress(20, 35)) + + if progress_callback: + progress_callback(55, "🎯 Analyzing speech...") + speech_moments = detect_speech_emotion_with_progress(video_path, chunk_duration, progress_callback=sub_progress(20, 55)) + + if progress_callback: + progress_callback(75, "🎯 Analyzing audio peaks...") + peak_moments = detect_audio_peaks_with_progress(video_path, chunk_duration, progress_callback=sub_progress(15, 75)) + + if progress_callback: + progress_callback(90, "οΏ½ Combining results...") + + # Create time-based scoring + clip = VideoFileClip(video_path) + duration = clip.duration + clip.close() + + # Score each second of the video + time_scores = {} + + for moments, weight in [(loud_moments, weights['loud']), + (scene_moments, weights['scene']), + (motion_moments, weights['motion']), + (speech_moments, weights['speech']), + (peak_moments, weights['peaks'])]: + for start, end in moments: + for t in range(int(start), int(end) + 1): + if t not in time_scores: + time_scores[t] = 0 + time_scores[t] += weight + + # Find the highest scoring segments + if not time_scores: + if progress_callback: + progress_callback(100, "🎯 No moments found, using loud moments fallback") + return loud_moments # Fallback to loud moments + + # Get top scoring time periods + sorted_times = sorted(time_scores.items(), key=lambda x: x[1], reverse=True) + + combined_moments = [] + used_times = set() + + for time_sec, score in sorted_times: + if time_sec not in used_times and score > 0.3: # Minimum threshold + start = max(0, time_sec - chunk_duration/2) + end = min(duration, time_sec + chunk_duration/2) + combined_moments.append((start, end)) + + # Mark nearby times as used to avoid overlap + for t in range(max(0, time_sec - chunk_duration), + min(int(duration), time_sec + chunk_duration)): + used_times.add(t) + + if progress_callback: + progress_callback(100, f"🎯 Found {len(combined_moments)} high-intensity combined moments") + + print(f"🎯 Found {len(combined_moments)} high-intensity combined moments") + return combined_moments + def detect_motion_intensity(video_path, chunk_duration=5, threshold=0.15): """Detect high motion/action moments""" print("πŸƒ Analyzing motion intensity...") @@ -227,8 +704,12 @@ def detect_motion_intensity(video_path, chunk_duration=5, threshold=0.15): motion_moments = [] - for t in times: + for i, t in enumerate(times): try: + # Periodic UI update to prevent freezing + if i % 20 == 0: # Every 20 seconds + print(f"πŸƒ Processing motion at {t:.1f}s ({i+1}/{len(times)})...") + # Get two consecutive frames frame1 = clip.get_frame(t) frame2 = clip.get_frame(t + 0.5) # Half second later @@ -264,15 +745,24 @@ def detect_speech_emotion(video_path, chunk_duration=5): """Detect emotional/excited speech patterns""" print("πŸ˜„ Analyzing speech emotions...") + print("πŸ˜„ Initializing speech recognition...") # Use Whisper to get detailed speech analysis model = WhisperModel("base", device="cpu", compute_type="int8") + + print("πŸ˜„ Transcribing audio...") segments, _ = model.transcribe(video_path, beam_size=5, vad_filter=True, word_timestamps=True) emotional_moments = [] excitement_keywords = ['wow', 'amazing', 'incredible', 'unbelievable', 'awesome', 'fantastic', 'omg', 'what', 'no way', 'crazy', 'insane', 'perfect', 'yes', 'exactly'] - for segment in segments: + segments_list = list(segments) + print(f"πŸ˜„ Processing {len(segments_list)} speech segments...") + + for i, segment in enumerate(segments_list): + if i % 10 == 0: # Every 10 segments + print(f"πŸ˜„ Processing segment {i+1}/{len(segments_list)}...") + text = segment.text.lower() # Check for excitement keywords @@ -295,6 +785,7 @@ def detect_audio_peaks(video_path, chunk_duration=5): """Detect sudden audio peaks (bass drops, beats, impacts)""" print("🎡 Analyzing audio peaks...") + print("🎡 Loading audio...") clip = VideoFileClip(video_path) audio = clip.audio.to_soundarray(fps=22050) # Lower sample rate for speed @@ -302,13 +793,17 @@ def detect_audio_peaks(video_path, chunk_duration=5): if len(audio.shape) > 1: audio = np.mean(audio, axis=1) + print("🎡 Finding audio peaks...") # Find spectral peaks (bass, treble spikes) peaks, _ = signal.find_peaks(np.abs(audio), height=np.percentile(np.abs(audio), 95)) peak_moments = [] prev_peak = 0 - for peak in peaks: + for i, peak in enumerate(peaks): + if i % 1000 == 0: # Every 1000 peaks + print(f"🎡 Processing peaks... {i}/{len(peaks)}") + peak_time = peak / 22050 # Avoid too close peaks @@ -329,13 +824,24 @@ def detect_combined_intensity(video_path, chunk_duration=5, weights=None): if weights is None: weights = {'loud': 0.3, 'scene': 0.2, 'motion': 0.2, 'speech': 0.2, 'peaks': 0.1} - # Get all detection results + # Get all detection results with progress updates + print("🎯 Analyzing loud moments...") loud_moments = detect_loud_moments(video_path, chunk_duration, threshold_db=5) # Lower threshold + + print("🎯 Analyzing scene changes...") scene_moments = detect_scene_changes(video_path, chunk_duration) + + print("🎯 Analyzing motion...") motion_moments = detect_motion_intensity(video_path, chunk_duration) + + print("🎯 Analyzing speech...") speech_moments = detect_speech_emotion(video_path, chunk_duration) + + print("🎯 Analyzing audio peaks...") peak_moments = detect_audio_peaks(video_path, chunk_duration) + print("🎯 Combining results...") + # Create time-based scoring clip = VideoFileClip(video_path) duration = clip.duration @@ -357,6 +863,7 @@ def detect_combined_intensity(video_path, chunk_duration=5, weights=None): # Find the highest scoring segments if not time_scores: + print("🎯 No moments found, using loud moments fallback") return loud_moments # Fallback to loud moments # Get top scoring time periods @@ -502,7 +1009,7 @@ def validate_video(video_path, min_duration=30): raise ValueError(f"Error reading video: {str(e)}") def generate_shorts(video_path, max_clips=3, output_folder="shorts", progress_callback=None, - threshold_db=-30, clip_duration=5, detection_mode="loud"): + detection_progress_callback=None, threshold_db=-30, clip_duration=5, detection_mode="loud"): os.makedirs(output_folder, exist_ok=True) # Validate video first @@ -520,28 +1027,47 @@ def generate_shorts(video_path, max_clips=3, output_folder="shorts", progress_ca if progress_callback: progress_callback("πŸ” Analyzing audio for loud moments...", 10) best_moments = detect_loud_moments(video_path, chunk_duration=clip_duration, threshold_db=threshold_db) + if progress_callback: + progress_callback("πŸ” Loud moments analysis complete", 35) elif detection_mode == "scene": if progress_callback: - progress_callback("🎬 Analyzing scene changes...", 10) - best_moments = detect_scene_changes(video_path, chunk_duration=clip_duration) + progress_callback("🎬 Starting scene analysis...", 10) + best_moments = detect_scene_changes_with_progress(video_path, chunk_duration=clip_duration, + progress_callback=detection_progress_callback) + if progress_callback: + progress_callback("🎬 Scene analysis complete", 35) elif detection_mode == "motion": if progress_callback: - progress_callback("πŸƒ Analyzing motion intensity...", 10) - best_moments = detect_motion_intensity(video_path, chunk_duration=clip_duration) + progress_callback("πŸƒ Starting motion analysis...", 10) + best_moments = detect_motion_intensity_with_progress(video_path, chunk_duration=clip_duration, + progress_callback=detection_progress_callback) + if progress_callback: + progress_callback("πŸƒ Motion analysis complete", 35) elif detection_mode == "speech": if progress_callback: - progress_callback("πŸ˜„ Analyzing speech emotions...", 10) - best_moments = detect_speech_emotion(video_path, chunk_duration=clip_duration) + progress_callback("πŸ˜„ Starting speech analysis...", 10) + best_moments = detect_speech_emotion_with_progress(video_path, chunk_duration=clip_duration, + progress_callback=detection_progress_callback) + if progress_callback: + progress_callback("πŸ˜„ Speech analysis complete", 35) elif detection_mode == "peaks": if progress_callback: - progress_callback("🎡 Analyzing audio peaks...", 10) - best_moments = detect_audio_peaks(video_path, chunk_duration=clip_duration) + progress_callback("🎡 Starting audio peak analysis...", 10) + best_moments = detect_audio_peaks_with_progress(video_path, chunk_duration=clip_duration, + progress_callback=detection_progress_callback) + if progress_callback: + progress_callback("🎡 Audio peak analysis complete", 35) elif detection_mode == "combined": if progress_callback: - progress_callback("🎯 Running comprehensive analysis...", 10) - best_moments = detect_combined_intensity(video_path, chunk_duration=clip_duration) + progress_callback("🎯 Starting comprehensive analysis...", 10) + best_moments = detect_combined_intensity_with_progress(video_path, chunk_duration=clip_duration, + progress_callback=detection_progress_callback) + if progress_callback: + progress_callback("🎯 Comprehensive analysis complete", 35) else: best_moments = detect_loud_moments(video_path, chunk_duration=clip_duration, threshold_db=threshold_db) + if progress_callback: + progress_callback("πŸ” Analysis complete", 35) selected = best_moments[:max_clips] @@ -574,6 +1100,65 @@ def generate_shorts(video_path, max_clips=3, output_folder="shorts", progress_ca class VideoEditor: """Professional video editing tools for generated shorts""" + def __init__(self, video_path=None): + """Initialize video editor with optional video file""" + self.original_video_path = video_path + self.video_clip = None + self.effects = [] + + if video_path: + self.load_video(video_path) + + def load_video(self, video_path): + """Load a video file for editing""" + if self.video_clip: + self.video_clip.close() + + self.original_video_path = video_path + self.video_clip = VideoFileClip(video_path) + self.effects = [] + print(f"πŸ“Ί Loaded video: {os.path.basename(video_path)}") + + def reset(self): + """Reset to original video, removing all effects""" + if self.original_video_path: + self.load_video(self.original_video_path) + print("πŸ”„ Video reset to original state") + + def export(self, output_path, quality="medium", progress_callback=None): + """Export the final edited video""" + if not self.video_clip: + raise Exception("No video loaded!") + + # Quality settings + quality_settings = { + "low": {"bitrate": "500k", "audio_bitrate": "128k"}, + "medium": {"bitrate": "1M", "audio_bitrate": "192k"}, + "high": {"bitrate": "2M", "audio_bitrate": "320k"} + } + + settings = quality_settings.get(quality, quality_settings["medium"]) + + # Export with progress callback + if progress_callback: + self.video_clip.write_videofile( + output_path, + codec="libx264", + audio_codec="aac", + bitrate=settings["bitrate"], + audio_bitrate=settings["audio_bitrate"], + verbose=False, + logger=None + ) + else: + self.video_clip.write_videofile( + output_path, + codec="libx264", + audio_codec="aac", + bitrate=settings["bitrate"], + audio_bitrate=settings["audio_bitrate"] + ) + @staticmethod def trim_video(video_path, start_time, end_time, output_path): """Trim video to specific time range""" @@ -745,8 +1330,295 @@ class VideoEditor: text_clip.close() final_video.close() - print(f"βœ… Text overlay completed!") - return output_path + def add_blur_effect(self, blur_strength=2.0): + """Add blur effect to current video""" + if not self.video_clip: + raise Exception("No video loaded!") + + def blur_frame(get_frame, t): + frame = get_frame(t) + # Convert to uint8 if needed + if frame.dtype != np.uint8: + frame = (frame * 255).astype(np.uint8) + blurred = cv2.GaussianBlur(frame, (15, 15), blur_strength) + return blurred + + self.video_clip = self.video_clip.transform(blur_frame) + self.effects.append(f"blur({blur_strength})") + print(f"🌫️ Applied blur effect (strength: {blur_strength})") + + def add_color_effect(self, effect_type="sepia"): + """Add color effects: sepia, grayscale, vintage, etc.""" + if not self.video_clip: + raise Exception("No video loaded!") + + def apply_color_effect(get_frame, t): + frame = get_frame(t) + if frame.dtype != np.uint8: + frame = (frame * 255).astype(np.uint8) + + if effect_type == "grayscale": + gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + return cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB) + elif effect_type == "sepia": + # Sepia transformation matrix + sepia_filter = np.array([[0.393, 0.769, 0.189], + [0.349, 0.686, 0.168], + [0.272, 0.534, 0.131]]) + sepia_img = frame.dot(sepia_filter.T) + sepia_img = np.clip(sepia_img, 0, 255) + return sepia_img.astype(np.uint8) + elif effect_type == "vintage": + # Vintage effect (warm + slight vignette) + frame = frame.astype(np.float32) + frame[:,:,0] *= 1.2 # Increase red + frame[:,:,1] *= 1.1 # Slightly increase green + frame[:,:,2] *= 0.9 # Decrease blue + return np.clip(frame, 0, 255).astype(np.uint8) + elif effect_type == "cool": + # Cool effect (more blue) + frame = frame.astype(np.float32) + frame[:,:,0] *= 0.9 # Decrease red + frame[:,:,1] *= 1.0 # Keep green + frame[:,:,2] *= 1.3 # Increase blue + return np.clip(frame, 0, 255).astype(np.uint8) + return frame + + self.video_clip = self.video_clip.transform(apply_color_effect) + self.effects.append(f"color({effect_type})") + print(f"🎨 Applied color effect: {effect_type}") + + def add_zoom_effect(self, zoom_factor=1.5, zoom_type="zoom_in"): + """Add zoom in/out effect""" + if not self.video_clip: + raise Exception("No video loaded!") + + def zoom_frame(get_frame, t): + frame = get_frame(t) + h, w = frame.shape[:2] + + if zoom_type == "zoom_in": + progress = t / self.video_clip.duration + current_zoom = 1.0 + (zoom_factor - 1.0) * progress + elif zoom_type == "zoom_out": + progress = t / self.video_clip.duration + current_zoom = zoom_factor - (zoom_factor - 1.0) * progress + else: # static zoom + current_zoom = zoom_factor + + # Calculate crop region + new_h, new_w = int(h / current_zoom), int(w / current_zoom) + start_x = (w - new_w) // 2 + start_y = (h - new_h) // 2 + + # Calculate crop dimensions for zoom + new_w = int(w / current_zoom) + new_h = int(h / current_zoom) + start_x = (w - new_w) // 2 + start_y = (h - new_h) // 2 + + # Crop and resize + cropped = frame[start_y:start_y + new_h, start_x:start_x + new_w] + zoomed = cv2.resize(cropped, (w, h), interpolation=cv2.INTER_CUBIC) + + return zoomed + + self.video_clip = self.video_clip.transform(zoom_frame) + self.effects.append(f"zoom({zoom_type}, {zoom_factor})") + print(f"πŸ” Applied zoom effect: {zoom_type} (factor: {zoom_factor})") + + def add_rotation_effect(self, angle=0, rotation_type="static"): + """Add rotation effect""" + if not self.video_clip: + raise Exception("No video loaded!") + + def rotate_frame(get_frame, t): + frame = get_frame(t) + h, w = frame.shape[:2] + + if rotation_type == "spinning": + # Continuous rotation + current_angle = (angle * t * 360 / self.video_clip.duration) % 360 + else: # static rotation + current_angle = angle + + # Rotation matrix + center = (w // 2, h // 2) + matrix = cv2.getRotationMatrix2D(center, current_angle, 1.0) + rotated = cv2.warpAffine(frame, matrix, (w, h), borderMode=cv2.BORDER_REFLECT) + + return rotated + + self.video_clip = self.video_clip.transform(rotate_frame) + self.effects.append(f"rotation({rotation_type}, {angle})") + print(f"πŸ”„ Applied rotation effect: {rotation_type} (angle: {angle}Β°)") + + def apply_trim(self, start_time, end_time): + """Apply trim to current video""" + if not self.video_clip: + raise Exception("No video loaded!") + + if start_time >= end_time: + raise Exception("Start time must be less than end time!") + + if end_time > self.video_clip.duration: + raise Exception(f"End time cannot exceed video duration ({self.video_clip.duration:.1f}s)!") + + self.video_clip = self.video_clip.subclipped(start_time, end_time) + self.effects.append(f"trim({start_time:.1f}s-{end_time:.1f}s)") + print(f"βœ‚οΈ Applied trim: {start_time:.1f}s to {end_time:.1f}s") + + def apply_speed(self, speed_factor): + """Apply speed change to current video""" + if not self.video_clip: + raise Exception("No video loaded!") + + if speed_factor <= 0: + raise Exception("Speed factor must be greater than 0!") + + if speed_factor > 1: + # Speed up + self.video_clip = self.video_clip.with_fps(self.video_clip.fps * speed_factor).subclipped(0, self.video_clip.duration / speed_factor) + else: + # Slow down + self.video_clip = self.video_clip.with_fps(self.video_clip.fps * speed_factor) + + self.effects.append(f"speed({speed_factor:.1f}x)") + print(f"⚑ Applied speed change: {speed_factor:.1f}x") + + def apply_fade_effects(self, fade_in_duration=1.0, fade_out_duration=1.0): + """Apply fade in/out effects to current video""" + if not self.video_clip: + raise Exception("No video loaded!") + + from moviepy.video.fx import FadeIn, FadeOut + + if fade_in_duration > 0: + self.video_clip = self.video_clip.with_effects([FadeIn(fade_in_duration)]) + + if fade_out_duration > 0: + self.video_clip = self.video_clip.with_effects([FadeOut(fade_out_duration)]) + + self.effects.append(f"fade(in:{fade_in_duration:.1f}s, out:{fade_out_duration:.1f}s)") + print(f"πŸŒ… Applied fade effects: in {fade_in_duration:.1f}s, out {fade_out_duration:.1f}s") + + def apply_volume(self, volume_factor): + """Apply volume adjustment to current video""" + if not self.video_clip: + raise Exception("No video loaded!") + + if not self.video_clip.audio: + raise Exception("Video has no audio track!") + + from moviepy.audio.fx import MultiplyVolume + + self.video_clip = self.video_clip.with_effects([MultiplyVolume(volume_factor)]) + self.effects.append(f"volume({volume_factor:.1f}x)") + print(f"πŸ”Š Applied volume adjustment: {volume_factor:.1f}x") + + def apply_resize(self, width, height): + """Apply resize to current video""" + if not self.video_clip: + raise Exception("No video loaded!") + + if width < 1 or height < 1: + raise Exception("Width and height must be positive!") + + from moviepy.video.fx import Resize + + self.video_clip = self.video_clip.with_effects([Resize((width, height))]) + self.effects.append(f"resize({width}x{height})") + print(f"πŸ“ Applied resize: {width}x{height}") + + def apply_text_overlay_to_current(self, text, position=('center', 'bottom'), font_size=50, color='white', method='fast'): + """Apply text overlay to current video""" + if not self.video_clip: + raise Exception("No video loaded!") + + if method == 'fast': + # Use the fast PIL-based method + self._apply_text_overlay_fast_to_current(text, position, font_size, color) + else: + # Use MoviePy method for higher quality + self._apply_text_overlay_quality_to_current(text, position, font_size, color) + + self.effects.append(f"text('{text[:20]}...', {position}, {font_size}px)") + print(f"πŸ“ Applied text overlay: '{text[:30]}...'") + + def _apply_text_overlay_fast_to_current(self, text, position, font_size, color): + """Fast PIL-based text overlay to current video""" + from PIL import Image, ImageDraw, ImageFont + + def add_text_to_frame(get_frame, t): + frame = get_frame(t) + # Convert to PIL Image + pil_image = Image.fromarray(frame) + draw = ImageDraw.Draw(pil_image) + + # Calculate position + w, h = pil_image.size + x_pos, y_pos = self._calculate_text_position(position, w, h, text, font_size) + + # Draw text with outline for better visibility + try: + font = ImageFont.truetype("arial.ttf", font_size) + except: + font = ImageFont.load_default() + + # Draw outline + for adj in range(-2, 3): + for adj2 in range(-2, 3): + draw.text((x_pos + adj, y_pos + adj2), text, font=font, fill='black') + + # Draw main text + draw.text((x_pos, y_pos), text, font=font, fill=color) + + return np.array(pil_image) + + self.video_clip = self.video_clip.transform(add_text_to_frame) + + def _apply_text_overlay_quality_to_current(self, text, position, font_size, color): + """High quality MoviePy-based text overlay to current video""" + from moviepy.editor import TextClip, CompositeVideoClip + + text_clip = TextClip(text, fontsize=font_size, color=color, font='Arial-Bold') + text_clip = text_clip.with_duration(self.video_clip.duration) + + # Set position + if position == ('center', 'center'): + text_clip = text_clip.with_position('center') + elif position == ('center', 'bottom'): + text_clip = text_clip.with_position(('center', 'bottom')) + elif position == ('center', 'top'): + text_clip = text_clip.with_position(('center', 'top')) + else: + text_clip = text_clip.with_position(position) + + self.video_clip = CompositeVideoClip([self.video_clip, text_clip]) + + def _calculate_text_position(self, position, width, height, text, font_size): + """Calculate text position based on position tuple""" + # Estimate text dimensions (rough calculation) + text_width = len(text) * font_size * 0.6 + text_height = font_size + + x_pos, y_pos = position + + if x_pos == 'center': + x_pos = (width - text_width) // 2 + elif x_pos == 'left': + x_pos = 50 + elif x_pos == 'right': + x_pos = width - text_width - 50 + + if y_pos == 'center': + y_pos = (height - text_height) // 2 + elif y_pos == 'top': + y_pos = 50 + elif y_pos == 'bottom': + y_pos = height - text_height - 50 + + return int(x_pos), int(y_pos) @staticmethod def add_text_overlay_fast(video_path, text, position=('center', 'bottom'), @@ -917,19 +1789,27 @@ class ShortsEditorGUI: self.create_editor_interface(shorts_files) def create_editor_interface(self, shorts_files): - """Create the main editor interface""" + """Create the main editor interface with video player""" # Title title_frame = tk.Frame(self.editor_window) title_frame.pack(fill="x", padx=20, pady=10) tk.Label(title_frame, text="🎬 Professional Shorts Editor", font=("Arial", 16, "bold")).pack() - tk.Label(title_frame, text="Select and edit your generated shorts with professional tools", + tk.Label(title_frame, text="Select and edit your generated shorts with professional tools + Real-time Preview", font=("Arial", 10), fg="gray").pack() + # Main content frame + main_frame = tk.Frame(self.editor_window) + main_frame.pack(fill="both", expand=True, padx=20, pady=10) + + # Left panel - Video selection and info + left_panel = tk.Frame(main_frame) + left_panel.pack(side="left", fill="y", padx=(0, 10)) + # Video selection frame - selection_frame = tk.LabelFrame(self.editor_window, text="πŸ“ Select Short to Edit", padx=10, pady=10) - selection_frame.pack(fill="x", padx=20, pady=10) + selection_frame = tk.LabelFrame(left_panel, text="πŸ“ Select Short to Edit", padx=10, pady=10) + selection_frame.pack(fill="x", pady=(0, 10)) # Video list with preview info list_frame = tk.Frame(selection_frame) @@ -941,7 +1821,7 @@ class ShortsEditorGUI: list_container = tk.Frame(list_frame) list_container.pack(fill="x", pady=5) - self.video_listbox = tk.Listbox(list_container, height=4, font=("Courier", 9)) + self.video_listbox = tk.Listbox(list_container, height=4, font=("Courier", 9), width=50) scrollbar = tk.Scrollbar(list_container, orient="vertical") self.video_listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.video_listbox.yview) @@ -962,6 +1842,13 @@ class ShortsEditorGUI: except Exception as e: print(f"Error reading {video_file}: {e}") + # Video player frame (center) + player_frame = tk.Frame(main_frame) + player_frame.pack(side="left", fill="both", expand=True, padx=10) + + # Video player + self.create_video_player(player_frame) + # Video selection handler def on_video_select(event): selection = self.video_listbox.curselection() @@ -970,20 +1857,21 @@ class ShortsEditorGUI: self.video_info = VideoEditor.get_video_info(self.current_video) self.update_video_info() self.enable_editing_tools() + self.load_video_in_player() self.video_listbox.bind("<>", on_video_select) # Current video info - self.info_frame = tk.LabelFrame(self.editor_window, text="πŸ“Š Video Information", padx=10, pady=10) - self.info_frame.pack(fill="x", padx=20, pady=10) + self.info_frame = tk.LabelFrame(left_panel, text="πŸ“Š Video Information", padx=10, pady=10) + self.info_frame.pack(fill="x", pady=(0, 10)) self.info_label = tk.Label(self.info_frame, text="Select a video to see details", font=("Courier", 9), justify="left") self.info_label.pack(anchor="w") - # Editing tools frame - self.tools_frame = tk.LabelFrame(self.editor_window, text="πŸ› οΈ Professional Editing Tools", padx=10, pady=10) - self.tools_frame.pack(fill="both", expand=True, padx=20, pady=10) + # Editing tools frame (right panel) + self.tools_frame = tk.LabelFrame(main_frame, text="πŸ› οΈ Professional Editing Tools", padx=10, pady=10) + self.tools_frame.pack(side="right", fill="y", padx=(10, 0)) self.create_editing_tools() @@ -1014,7 +1902,249 @@ class ShortsEditorGUI: command=self.open_shorts_folder, bg="#FF9800", fg="white").pack(side="left", padx=5) tk.Button(button_frame, text="❌ Close Editor", - command=self.editor_window.destroy, bg="#F44336", fg="white").pack(side="right", padx=5) + command=self.close_editor, bg="#F44336", fg="white").pack(side="right", padx=5) + + def create_video_player(self, parent_frame): + """Create the video player with timeline controls""" + player_label_frame = tk.LabelFrame(parent_frame, text="πŸŽ₯ Real-time Video Player", padx=10, pady=10) + player_label_frame.pack(fill="both", expand=True) + + # Video display canvas + self.video_canvas = tk.Canvas(player_label_frame, width=400, height=300, bg="black", relief="sunken", bd=2) + self.video_canvas.pack(pady=10) + + # Player controls frame + controls_frame = tk.Frame(player_label_frame) + controls_frame.pack(fill="x", pady=5) + + # Timeline slider + timeline_frame = tk.Frame(controls_frame) + timeline_frame.pack(fill="x", pady=5) + + tk.Label(timeline_frame, text="Timeline:", font=("Arial", 9, "bold")).pack(anchor="w") + + self.timeline_var = tk.DoubleVar() + self.timeline_slider = tk.Scale(timeline_frame, from_=0, to=100, orient="horizontal", + variable=self.timeline_var, command=self.on_timeline_change, + length=380, resolution=0.1) + self.timeline_slider.pack(fill="x") + + # Play controls + play_controls_frame = tk.Frame(controls_frame) + play_controls_frame.pack(pady=5) + + self.play_button = tk.Button(play_controls_frame, text="▢️ Play", command=self.toggle_play, + font=("Arial", 10, "bold"), bg="#4CAF50", fg="white") + self.play_button.pack(side="left", padx=5) + + tk.Button(play_controls_frame, text="⏹️ Stop", command=self.stop_video, + font=("Arial", 10, "bold"), bg="#F44336", fg="white").pack(side="left", padx=5) + + tk.Button(play_controls_frame, text="βͺ -5s", command=lambda: self.seek_relative(-5), + font=("Arial", 9), bg="#FF9800", fg="white").pack(side="left", padx=2) + + tk.Button(play_controls_frame, text="⏩ +5s", command=lambda: self.seek_relative(5), + font=("Arial", 9), bg="#FF9800", fg="white").pack(side="left", padx=2) + + # Time display + self.time_label = tk.Label(controls_frame, text="00:00 / 00:00", font=("Arial", 10, "bold")) + self.time_label.pack(pady=5) + + # Player state variables + self.current_clip = None + self.is_playing = False + self.current_time = 0.0 + self.video_duration = 0.0 + self.play_thread = None + self.last_frame_time = 0 + + def load_video_in_player(self): + """Load the selected video in the player""" + if not self.current_video: + return + + try: + # Close previous clip + if self.current_clip: + self.current_clip.close() + + print(f"πŸŽ₯ Loading video in player: {os.path.basename(self.current_video)}") + self.current_clip = VideoFileClip(self.current_video) + self.video_duration = self.current_clip.duration + + # Update timeline + self.timeline_slider.config(to=self.video_duration) + self.timeline_var.set(0) + self.current_time = 0.0 + + # Display first frame + self.display_frame_at_time(0.0) + self.update_time_display() + + print(f"βœ… Video loaded successfully ({self.video_duration:.1f}s)") + + except Exception as e: + print(f"❌ Error loading video: {e}") + messagebox.showerror("Video Error", f"Failed to load video:\n{str(e)}") + + def display_frame_at_time(self, time_seconds): + """Display video frame at specific time""" + if not self.current_clip: + return + + try: + # Get frame at specified time + frame = self.current_clip.get_frame(min(time_seconds, self.video_duration - 0.01)) + + # Convert frame to proper format for PIL + if frame.dtype != np.uint8: + # Convert float frames to uint8 + frame = (frame * 255).astype(np.uint8) + + # Ensure frame is in correct shape (handle edge cases) + if len(frame.shape) == 3 and frame.shape[2] == 3: + # Normal RGB frame + pil_image = Image.fromarray(frame) + else: + # Handle other formats or corrupted frames + print(f"⚠️ Unusual frame shape: {frame.shape}, dtype: {frame.dtype}") + # Create a black frame as fallback + canvas_width = self.video_canvas.winfo_width() or 400 + canvas_height = self.video_canvas.winfo_height() or 300 + frame = np.zeros((canvas_height, canvas_width, 3), dtype=np.uint8) + pil_image = Image.fromarray(frame) + + # Resize to fit canvas while maintaining aspect ratio + canvas_width = self.video_canvas.winfo_width() or 400 + canvas_height = self.video_canvas.winfo_height() or 300 + + pil_image.thumbnail((canvas_width - 20, canvas_height - 20), Image.Resampling.LANCZOS) + + # Convert to Tkinter format + self.current_tk_image = ImageTk.PhotoImage(pil_image) + + # Clear canvas and display image + self.video_canvas.delete("all") + self.video_canvas.create_image(canvas_width//2, canvas_height//2, + image=self.current_tk_image) + + except Exception as e: + print(f"⚠️ Error displaying frame: {e}") + # Show a black frame on error + try: + canvas_width = self.video_canvas.winfo_width() or 400 + canvas_height = self.video_canvas.winfo_height() or 300 + black_frame = np.zeros((canvas_height-20, canvas_width-20, 3), dtype=np.uint8) + pil_image = Image.fromarray(black_frame) + self.current_tk_image = ImageTk.PhotoImage(pil_image) + self.video_canvas.delete("all") + self.video_canvas.create_image(canvas_width//2, canvas_height//2, + image=self.current_tk_image) + except: + pass + + def on_timeline_change(self, value): + """Handle timeline slider changes""" + if not self.current_clip: + return + + self.current_time = float(value) + self.display_frame_at_time(self.current_time) + self.update_time_display() + + def toggle_play(self): + """Toggle play/pause""" + if not self.current_clip: + return + + if self.is_playing: + self.pause_video() + else: + self.play_video() + + def play_video(self): + """Start video playback""" + if not self.current_clip or self.is_playing: + return + + self.is_playing = True + self.play_button.config(text="⏸️ Pause", bg="#FF9800") + + def play_thread(): + start_time = time.time() + start_video_time = self.current_time + + while self.is_playing and self.current_time < self.video_duration: + try: + # Calculate current video time + elapsed = time.time() - start_time + self.current_time = start_video_time + elapsed + + if self.current_time >= self.video_duration: + self.current_time = self.video_duration + self.is_playing = False + break + + # Update timeline and display + self.timeline_var.set(self.current_time) + self.display_frame_at_time(self.current_time) + self.update_time_display() + + # Frame rate control (approximately 30 FPS) + time.sleep(1/30) + + except Exception as e: + print(f"⚠️ Playback error: {e}") + break + + # Playback finished + self.is_playing = False + self.play_button.config(text="▢️ Play", bg="#4CAF50") + + self.play_thread = threading.Thread(target=play_thread, daemon=True) + self.play_thread.start() + + def pause_video(self): + """Pause video playback""" + self.is_playing = False + self.play_button.config(text="▢️ Play", bg="#4CAF50") + + def stop_video(self): + """Stop video and return to beginning""" + self.is_playing = False + self.current_time = 0.0 + self.timeline_var.set(0) + self.display_frame_at_time(0.0) + self.update_time_display() + self.play_button.config(text="▢️ Play", bg="#4CAF50") + + def seek_relative(self, seconds): + """Seek relative to current position""" + if not self.current_clip: + return + + new_time = max(0, min(self.current_time + seconds, self.video_duration)) + self.current_time = new_time + self.timeline_var.set(new_time) + self.display_frame_at_time(new_time) + self.update_time_display() + + def update_time_display(self): + """Update the time display label""" + current_mins = int(self.current_time // 60) + current_secs = int(self.current_time % 60) + total_mins = int(self.video_duration // 60) + total_secs = int(self.video_duration % 60) + + time_text = f"{current_mins:02d}:{current_secs:02d} / {total_mins:02d}:{total_secs:02d}" + self.time_label.config(text=time_text) + + def close_editor(self): + """Clean up and close editor""" + self.is_playing = False + if self.current_clip: + self.current_clip.close() + self.editor_window.destroy() def create_editing_tools(self): """Create the professional editing tools interface""" @@ -1187,6 +2317,126 @@ class ShortsEditorGUI: command=self.add_text_overlay, bg="#795548", fg="white", font=("Arial", 10, "bold")).pack(side="right", padx=10) + # Video Effects Tab - NEW + effects_advanced_frame = ttk.Frame(notebook) + notebook.add(effects_advanced_frame, text="🎨 Video Effects") + + # Blur Effect + blur_frame = tk.LabelFrame(effects_advanced_frame, text="🌫️ Blur Effect", padx=10, pady=5) + blur_frame.pack(fill="x", padx=10, pady=5) + + blur_controls = tk.Frame(blur_frame) + blur_controls.pack(fill="x") + + tk.Label(blur_controls, text="Strength:").pack(side="left") + self.blur_strength = tk.DoubleVar(value=2.0) + tk.Scale(blur_controls, from_=0.1, to=10.0, resolution=0.1, orient="horizontal", + variable=self.blur_strength, length=150).pack(side="left", padx=5) + + tk.Button(blur_controls, text="🌫️ Apply Blur", + command=self.apply_blur_effect, bg="#795548", fg="white").pack(side="right", padx=10) + + # Color Effects + color_frame = tk.LabelFrame(effects_advanced_frame, text="🎨 Color Effects", padx=10, pady=5) + color_frame.pack(fill="x", padx=10, pady=5) + + color_controls = tk.Frame(color_frame) + color_controls.pack(fill="x") + + tk.Label(color_controls, text="Effect:").pack(side="left") + self.color_effect_var = tk.StringVar(value="sepia") + color_combo = ttk.Combobox(color_controls, textvariable=self.color_effect_var, + values=["sepia", "grayscale", "vintage", "cool"], width=12, state="readonly") + color_combo.pack(side="left", padx=5) + + tk.Button(color_controls, text="🎨 Apply Color Effect", + command=self.apply_color_effect, bg="#E91E63", fg="white").pack(side="right", padx=10) + + # Zoom Effects + zoom_frame = tk.LabelFrame(effects_advanced_frame, text="πŸ” Zoom Effects", padx=10, pady=5) + zoom_frame.pack(fill="x", padx=10, pady=5) + + zoom_controls = tk.Frame(zoom_frame) + zoom_controls.pack(fill="x") + + tk.Label(zoom_controls, text="Type:").pack(side="left") + self.zoom_effect_var = tk.StringVar(value="zoom_in") + zoom_combo = ttk.Combobox(zoom_controls, textvariable=self.zoom_effect_var, + values=["zoom_in", "zoom_out", "static"], width=10, state="readonly") + zoom_combo.pack(side="left", padx=5) + + tk.Label(zoom_controls, text="Factor:").pack(side="left", padx=(10, 0)) + self.zoom_factor = tk.DoubleVar(value=1.5) + tk.Scale(zoom_controls, from_=1.0, to=3.0, resolution=0.1, orient="horizontal", + variable=self.zoom_factor, length=100).pack(side="left", padx=5) + + tk.Button(zoom_controls, text="πŸ” Apply Zoom", + command=self.apply_zoom_effect, bg="#3F51B5", fg="white").pack(side="right", padx=10) + + # Rotation Effects + rotation_frame = tk.LabelFrame(effects_advanced_frame, text="πŸ”„ Rotation Effects", padx=10, pady=5) + rotation_frame.pack(fill="x", padx=10, pady=5) + + rotation_controls = tk.Frame(rotation_frame) + rotation_controls.pack(fill="x") + + tk.Label(rotation_controls, text="Type:").pack(side="left") + self.rotation_type_var = tk.StringVar(value="static") + rotation_combo = ttk.Combobox(rotation_controls, textvariable=self.rotation_type_var, + values=["static", "spinning"], width=10, state="readonly") + rotation_combo.pack(side="left", padx=5) + + tk.Label(rotation_controls, text="Angle:").pack(side="left", padx=(10, 0)) + self.rotation_angle = tk.DoubleVar(value=0.0) + tk.Scale(rotation_controls, from_=-180, to=180, resolution=5, orient="horizontal", + variable=self.rotation_angle, length=120).pack(side="left", padx=5) + + tk.Button(rotation_controls, text="πŸ”„ Apply Rotation", + command=self.apply_rotation_effect, bg="#FF5722", fg="white").pack(side="right", padx=10) + + # Export Tab + export_frame = ttk.Frame(notebook) + notebook.add(export_frame, text="πŸ’Ύ Export") + + export_controls_frame = tk.LabelFrame(export_frame, text="πŸ’Ύ Export Final Video", padx=10, pady=5) + export_controls_frame.pack(fill="x", padx=10, pady=5) + + # Output filename + filename_frame = tk.Frame(export_controls_frame) + filename_frame.pack(fill="x", pady=5) + + tk.Label(filename_frame, text="Filename:").pack(side="left") + self.output_filename = tk.StringVar(value="edited_video.mp4") + tk.Entry(filename_frame, textvariable=self.output_filename, width=25).pack(side="left", padx=5) + + # Quality settings + quality_frame = tk.Frame(export_controls_frame) + quality_frame.pack(fill="x", pady=5) + + tk.Label(quality_frame, text="Quality:").pack(side="left") + self.export_quality = tk.StringVar(value="medium") + quality_combo = ttk.Combobox(quality_frame, textvariable=self.export_quality, + values=["low", "medium", "high"], width=10, state="readonly") + quality_combo.pack(side="left", padx=5) + + # Export button + export_button_frame = tk.Frame(export_controls_frame) + export_button_frame.pack(fill="x", pady=10) + + self.export_button = tk.Button(export_button_frame, text="πŸ’Ύ Export Final Video", + command=self.export_edited_video, bg="#4CAF50", fg="white", + font=("Arial", 12, "bold")) + self.export_button.pack(pady=5) + + # Progress bar (initially hidden) + self.progress_var = tk.DoubleVar() + self.progress_bar = ttk.Progressbar(export_button_frame, variable=self.progress_var, maximum=100) + self.progress_label = tk.Label(export_button_frame, text="", font=("Arial", 9)) + + # Reset button + tk.Button(export_button_frame, text="πŸ”„ Reset All Changes", command=self.reset_edited_video, + bg="#F44336", fg="white", font=("Arial", 10)).pack(pady=5) + # Initially disable all tools self.disable_editing_tools() @@ -1200,10 +2450,177 @@ class ShortsEditorGUI: for widget in self.tools_frame.winfo_children(): self.set_widget_state(widget, "normal") + # Initialize video editor for current video + try: + self.video_editor = VideoEditor(self.current_video) + print(f"βœ… Video editor initialized for: {os.path.basename(self.current_video)}") + except Exception as e: + print(f"❌ Error initializing video editor: {e}") + self.video_editor = None + # Update trim end time to video duration if self.video_info: self.trim_end.set(min(self.video_info['duration'], 30.0)) + def apply_blur_effect(self): + """Apply blur effect to video""" + if not hasattr(self, 'video_editor') or not self.video_editor: + messagebox.showerror("Error", "Please select a video first!") + return + + strength = self.blur_strength.get() + print(f"🌫️ Applying blur effect (strength: {strength})") + + try: + self.video_editor.add_blur_effect(strength) + self.refresh_video_preview() + messagebox.showinfo("Success", f"Blur effect applied with strength {strength}") + except Exception as e: + print(f"❌ Error applying blur effect: {e}") + messagebox.showerror("Blur Error", f"Failed to apply blur effect:\n{str(e)}") + + def apply_color_effect(self): + """Apply color effect to video""" + if not hasattr(self, 'video_editor') or not self.video_editor: + messagebox.showerror("Error", "Please select a video first!") + return + + effect_type = self.color_effect_var.get() + print(f"🎨 Applying color effect: {effect_type}") + + try: + self.video_editor.add_color_effect(effect_type) + self.refresh_video_preview() + messagebox.showinfo("Success", f"Color effect '{effect_type}' applied successfully") + except Exception as e: + print(f"❌ Error applying color effect: {e}") + messagebox.showerror("Color Effect Error", f"Failed to apply color effect:\n{str(e)}") + + def apply_zoom_effect(self): + """Apply zoom effect to video""" + if not hasattr(self, 'video_editor') or not self.video_editor: + messagebox.showerror("Error", "Please select a video first!") + return + + zoom_type = self.zoom_effect_var.get() + zoom_factor = self.zoom_factor.get() + print(f"πŸ” Applying zoom effect: {zoom_type} (factor: {zoom_factor})") + + try: + self.video_editor.add_zoom_effect(zoom_factor, zoom_type) + self.refresh_video_preview() + messagebox.showinfo("Success", f"Zoom effect '{zoom_type}' applied successfully") + except Exception as e: + print(f"❌ Error applying zoom effect: {e}") + messagebox.showerror("Zoom Effect Error", f"Failed to apply zoom effect:\n{str(e)}") + + def apply_rotation_effect(self): + """Apply rotation effect to video""" + if not hasattr(self, 'video_editor') or not self.video_editor: + messagebox.showerror("Error", "Please select a video first!") + return + + rotation_type = self.rotation_type_var.get() + angle = self.rotation_angle.get() + print(f"πŸ”„ Applying rotation effect: {rotation_type} (angle: {angle}Β°)") + + try: + self.video_editor.add_rotation_effect(angle, rotation_type) + self.refresh_video_preview() + messagebox.showinfo("Success", f"Rotation effect '{rotation_type}' applied successfully") + except Exception as e: + print(f"❌ Error applying rotation effect: {e}") + messagebox.showerror("Rotation Effect Error", f"Failed to apply rotation effect:\n{str(e)}") + + def refresh_video_preview(self): + """Refresh the video preview after applying effects""" + if hasattr(self, 'current_time') and hasattr(self, 'video_editor') and self.video_editor: + try: + # Update the current clip reference to include effects + if self.video_editor.video_clip: + self.current_clip = self.video_editor.video_clip + + # Update video duration in case it changed (speed/trim effects) + old_duration = self.video_duration + self.video_duration = self.current_clip.duration + + # Update timeline if duration changed + if abs(old_duration - self.video_duration) > 0.1: + self.timeline_slider.config(to=self.video_duration) + # Adjust current time if it's beyond new duration + if self.current_time > self.video_duration: + self.current_time = max(0, self.video_duration - 0.1) + self.timeline_var.set(self.current_time) + print(f"πŸ“ Updated timeline duration: {old_duration:.1f}s β†’ {self.video_duration:.1f}s") + + self.display_frame_at_time(self.current_time) + self.update_time_display() + print("πŸ”„ Video preview refreshed with effects") + except Exception as e: + print(f"⚠️ Error refreshing preview: {e}") + + def export_edited_video(self): + """Export the final edited video""" + if not hasattr(self, 'video_editor') or not self.video_editor: + messagebox.showerror("Error", "No video selected for editing!") + return + + filename = self.output_filename.get() + if not filename.endswith('.mp4'): + filename += '.mp4' + + output_path = os.path.join(self.output_folder.get(), filename) + quality = self.export_quality.get() + + print(f"πŸ’Ύ Exporting edited video to: {output_path}") + + def export_thread(): + try: + # Show progress bar + self.progress_bar.pack(pady=5) + self.progress_label.pack() + self.export_button.config(state="disabled", text="Exporting...") + + # Export with progress updates + def progress_callback(progress): + self.progress_var.set(progress * 100) + self.progress_label.config(text=f"Exporting... {progress*100:.1f}%") + self.editor_window.update_idletasks() + + # Create output directory if needed + os.makedirs(os.path.dirname(output_path), exist_ok=True) + + # Export the video + self.video_editor.export(output_path, quality, progress_callback) + + # Hide progress bar + self.progress_bar.pack_forget() + self.progress_label.pack_forget() + self.export_button.config(state="normal", text="πŸ’Ύ Export Final Video") + + messagebox.showinfo("Success", f"Video exported successfully to:\n{output_path}") + print(f"βœ… Video exported successfully: {output_path}") + + except Exception as e: + print(f"❌ Export error: {e}") + self.progress_bar.pack_forget() + self.progress_label.pack_forget() + self.export_button.config(state="normal", text="πŸ’Ύ Export Final Video") + messagebox.showerror("Export Error", f"Failed to export video:\n{str(e)}") + + # Run export in background thread + threading.Thread(target=export_thread, daemon=True).start() + + def reset_edited_video(self): + """Reset all edits and reload original video""" + if hasattr(self, 'video_editor') and self.video_editor: + self.video_editor.reset() + self.refresh_video_preview() + messagebox.showinfo("Reset", "All edits have been reset to original video") + print("πŸ”„ Video reset to original state") + else: + messagebox.showwarning("No Video", "No video loaded to reset!") + def set_widget_state(self, widget, state): """Recursively set widget state""" try: @@ -1308,8 +2725,8 @@ class ShortsEditorGUI: # Editing tool methods def trim_video(self): - """Trim the selected video""" - if not self.current_video: + """Apply trim to the current video editor""" + if not hasattr(self, 'video_editor') or not self.video_editor: messagebox.showwarning("No Video", "Please select a video first!") return @@ -1320,21 +2737,17 @@ class ShortsEditorGUI: messagebox.showwarning("Invalid Range", "Start time must be less than end time!") return - if end > self.video_info['duration']: - messagebox.showwarning("Invalid Range", f"End time cannot exceed video duration ({self.video_info['duration']:.1f}s)!") - return - - output_path = self.get_output_path("trimmed") - - def trim_operation(): - VideoEditor.trim_video(self.current_video, start, end, output_path) - return output_path - - self.show_progress_dialog("Trimming Video", trim_operation) + try: + self.video_editor.apply_trim(start, end) + self.refresh_video_preview() + messagebox.showinfo("Success", f"Video trimmed from {start:.1f}s to {end:.1f}s") + except Exception as e: + print(f"❌ Error applying trim: {e}") + messagebox.showerror("Trim Error", f"Failed to trim video:\n{str(e)}") def adjust_speed(self): - """Adjust video speed""" - if not self.current_video: + """Apply speed adjustment to the current video editor""" + if not hasattr(self, 'video_editor') or not self.video_editor: messagebox.showwarning("No Video", "Please select a video first!") return @@ -1343,51 +2756,50 @@ class ShortsEditorGUI: messagebox.showwarning("Invalid Speed", "Speed must be greater than 0!") return - output_path = self.get_output_path(f"speed_{speed:.1f}x") - - def speed_operation(): - VideoEditor.adjust_speed(self.current_video, speed, output_path) - return output_path - - self.show_progress_dialog("Adjusting Speed", speed_operation) + try: + self.video_editor.apply_speed(speed) + self.refresh_video_preview() + messagebox.showinfo("Success", f"Speed adjusted to {speed:.1f}x") + except Exception as e: + print(f"❌ Error applying speed: {e}") + messagebox.showerror("Speed Error", f"Failed to adjust speed:\n{str(e)}") def add_fades(self): - """Add fade effects""" - if not self.current_video: + """Apply fade effects to the current video editor""" + if not hasattr(self, 'video_editor') or not self.video_editor: messagebox.showwarning("No Video", "Please select a video first!") return fade_in = self.fade_in.get() fade_out = self.fade_out.get() - output_path = self.get_output_path("faded") - - def fade_operation(): - return VideoEditor.add_fade_effects(self.current_video, fade_in, fade_out, output_path) - - self.show_progress_dialog("Adding Fade Effects", fade_operation) + try: + self.video_editor.apply_fade_effects(fade_in, fade_out) + self.refresh_video_preview() + messagebox.showinfo("Success", f"Fade effects applied: in {fade_in:.1f}s, out {fade_out:.1f}s") + except Exception as e: + print(f"❌ Error applying fades: {e}") + messagebox.showerror("Fade Error", f"Failed to apply fade effects:\n{str(e)}") def adjust_volume(self): - """Adjust video volume""" - if not self.current_video: + """Apply volume adjustment to the current video editor""" + if not hasattr(self, 'video_editor') or not self.video_editor: messagebox.showwarning("No Video", "Please select a video first!") return - if not self.video_info['has_audio']: - messagebox.showwarning("No Audio", "Selected video has no audio track!") - return - volume = self.volume_factor.get() - output_path = self.get_output_path(f"volume_{volume:.1f}x") - def volume_operation(): - return VideoEditor.adjust_volume(self.current_video, volume, output_path) - - self.show_progress_dialog("Adjusting Volume", volume_operation) + try: + self.video_editor.apply_volume(volume) + self.refresh_video_preview() + messagebox.showinfo("Success", f"Volume adjusted to {volume:.1f}x") + except Exception as e: + print(f"❌ Error applying volume: {e}") + messagebox.showerror("Volume Error", f"Failed to adjust volume:\n{str(e)}") def resize_video(self): - """Resize video""" - if not self.current_video: + """Apply resize to the current video editor""" + if not hasattr(self, 'video_editor') or not self.video_editor: messagebox.showwarning("No Video", "Please select a video first!") return @@ -1398,16 +2810,17 @@ class ShortsEditorGUI: messagebox.showwarning("Invalid Size", "Width and height must be positive!") return - output_path = self.get_output_path(f"resized_{width}x{height}") - - def resize_operation(): - return VideoEditor.resize_video(self.current_video, width, height, output_path) - - self.show_progress_dialog("Resizing Video", resize_operation) + try: + self.video_editor.apply_resize(width, height) + self.refresh_video_preview() + messagebox.showinfo("Success", f"Video resized to {width}x{height}") + except Exception as e: + print(f"❌ Error applying resize: {e}") + messagebox.showerror("Resize Error", f"Failed to resize video:\n{str(e)}") def add_text_overlay(self): - """Add text overlay to video with speed options""" - if not self.current_video: + """Apply text overlay to the current video editor""" + if not hasattr(self, 'video_editor') or not self.video_editor: messagebox.showwarning("No Video", "Please select a video first!") return @@ -1421,19 +2834,13 @@ class ShortsEditorGUI: size = self.text_size.get() method = self.text_method.get() - output_path = self.get_output_path("with_text") - - # Choose method based on user selection - if method == "fast": - def text_operation(): - return VideoEditor.add_text_overlay_fast(self.current_video, text, position, - font_size=size, output_path=output_path) - self.show_progress_dialog("Adding Text Overlay (Fast Method)", text_operation) - else: - def text_operation(): - return VideoEditor.add_text_overlay(self.current_video, text, position, - font_size=size, output_path=output_path) - self.show_progress_dialog("Adding Text Overlay (High Quality)", text_operation) + try: + self.video_editor.apply_text_overlay_to_current(text, position, size, 'white', method) + self.refresh_video_preview() + messagebox.showinfo("Success", f"Text '{text[:30]}...' added successfully") + except Exception as e: + print(f"❌ Error applying text overlay: {e}") + messagebox.showerror("Text Error", f"Failed to add text overlay:\n{str(e)}") # GUI Components class ShortsGeneratorGUI: @@ -1747,6 +3154,17 @@ Create thumbnails that get clicks!""" self.progress_bar = ttk.Progressbar(progress_frame, length=400, mode="determinate") self.progress_bar.pack(pady=3) + + # Detection progress (initially hidden) + self.detection_progress_label = tk.Label(progress_frame, text="", font=("Arial", 9), fg="gray") + self.detection_progress_label.pack() + + self.detection_progress_bar = ttk.Progressbar(progress_frame, length=400, mode="determinate") + self.detection_progress_bar.pack(pady=(0, 3)) + + # Initially hide detection progress + self.detection_progress_label.pack_forget() + self.detection_progress_bar.pack_forget() def select_video(self): file_path = filedialog.askopenfilename( @@ -1971,6 +3389,24 @@ Create thumbnails that get clicks!""" self.progress_bar["value"] = percent self.root.update() + def show_detection_progress(self): + """Show the detection progress bar""" + self.detection_progress_label.pack(after=self.progress_bar) + self.detection_progress_bar.pack(after=self.detection_progress_label, pady=(0, 3)) + self.root.update_idletasks() + + def hide_detection_progress(self): + """Hide the detection progress bar""" + self.detection_progress_label.pack_forget() + self.detection_progress_bar.pack_forget() + self.root.update_idletasks() + + def update_detection_progress(self, message, percent): + """Update detection progress bar and message""" + self.detection_progress_label.config(text=message) + self.detection_progress_bar["value"] = percent + self.root.update_idletasks() + def generation_worker(self): try: # Check available disk space @@ -1979,15 +3415,42 @@ Create thumbnails that get clicks!""" if free_space_gb < 1: raise RuntimeError(f"Insufficient disk space. Only {free_space_gb:.1f} GB available. Need at least 1 GB.") - generate_shorts( - self.video_path, - max_clips=self.clips_var.get() if self.use_max_clips.get() else 10, # Default max for non-loud modes - output_folder=self.output_folder, - progress_callback=self.update_progress, - threshold_db=self.threshold_var.get(), - clip_duration=self.duration_var.get(), - detection_mode=self.detection_mode_var.get() - ) + # Show detection progress for heavy modes + detection_mode = self.detection_mode_var.get() + if detection_mode in ["scene", "motion", "speech", "peaks", "combined"]: + self.show_detection_progress() + + def detailed_progress_callback(status, percent): + # Update main progress + self.update_progress(status, percent) + + def detection_progress_callback(detection_percent, detection_status): + # Update detection progress bar + self.update_detection_progress(detection_status, detection_percent) + + # Pass both callbacks to generate_shorts + generate_shorts( + self.video_path, + max_clips=self.clips_var.get() if self.use_max_clips.get() else 10, + output_folder=self.output_folder, + progress_callback=detailed_progress_callback, + detection_progress_callback=detection_progress_callback, + threshold_db=self.threshold_var.get(), + clip_duration=self.duration_var.get(), + detection_mode=detection_mode + ) + else: + # Use regular progress for loud moments mode + generate_shorts( + self.video_path, + max_clips=self.clips_var.get() if self.use_max_clips.get() else 10, + output_folder=self.output_folder, + progress_callback=self.update_progress, + threshold_db=self.threshold_var.get(), + clip_duration=self.duration_var.get(), + detection_mode=detection_mode + ) + messagebox.showinfo("Success", f"Successfully generated shorts in '{self.output_folder}' folder!") except FileNotFoundError as e: messagebox.showerror("File Error", str(e)) @@ -1998,6 +3461,7 @@ Create thumbnails that get clicks!""" except Exception as e: messagebox.showerror("Error", f"An unexpected error occurred: {str(e)}") finally: + self.hide_detection_progress() self.generate_btn.config(state="normal", text="🎬 Generate Shorts") self.progress_bar["value"] = 0 self.progress_label.config(text="Ready to generate shorts")