2854 lines
133 KiB
Python
2854 lines
133 KiB
Python
"""
|
||
Professional Video Editor for Generated Shorts
|
||
Author: Dario Pascoal
|
||
|
||
Description: This is a comprehensive video editing application designed specifically for editing
|
||
short-form video content. The application provides a professional timeline-based interface
|
||
similar to industry-standard video editing software, with features including:
|
||
|
||
- Multi-track timeline with visual track roads for professional editing workflow
|
||
- Real-time video preview with frame-accurate scrubbing
|
||
- Professional editing tools: trim, speed adjustment, volume control, fade effects
|
||
- Text overlay capabilities with customizable styling
|
||
- Export functionality with multiple format support
|
||
- Tabbed interface organizing tools into logical categories
|
||
- Dark theme optimized for video editing work
|
||
- Support for multiple video formats (MP4, AVI, MOV, MKV, etc.)
|
||
|
||
The application is built using Python's Tkinter for the GUI, OpenCV for basic video processing,
|
||
and optionally MoviePy for advanced video editing features. It's designed to be educational,
|
||
showing how professional video editing interfaces work while remaining accessible to beginners.
|
||
|
||
Technical Architecture:
|
||
- Uses threading for non-blocking video playback and processing
|
||
- Implements canvas-based timeline with precise time-to-pixel calculations
|
||
- Supports both MoviePy (full features) and OpenCV (basic features) backends
|
||
- Maintains professional video editing workflow patterns
|
||
- Provides real-time preview updates during editing operations
|
||
|
||
This file serves as the main entry point for the video editing functionality and can be
|
||
integrated into larger applications or run as a standalone tool.
|
||
"""
|
||
|
||
import tkinter as tk
|
||
from tkinter import ttk, filedialog, messagebox
|
||
import cv2
|
||
import numpy as np
|
||
import os
|
||
import threading
|
||
import time
|
||
from datetime import datetime
|
||
from PIL import Image, ImageTk
|
||
|
||
# Try to import MoviePy, handle if not available
|
||
try:
|
||
from moviepy.editor import VideoFileClip, TextClip, CompositeVideoClip
|
||
from moviepy.video.fx import FadeIn, FadeOut, Resize
|
||
from moviepy.audio.fx import MultiplyVolume
|
||
MOVIEPY_AVAILABLE = True
|
||
except ImportError:
|
||
print("⚠️ MoviePy not available - using OpenCV backend for video processing")
|
||
MOVIEPY_AVAILABLE = False
|
||
|
||
# Create dummy classes for compatibility
|
||
class VideoFileClip:
|
||
def __init__(self, *args, **kwargs):
|
||
raise ImportError("MoviePy not available")
|
||
class TextClip:
|
||
def __init__(self, *args, **kwargs):
|
||
raise ImportError("MoviePy not available")
|
||
class CompositeVideoClip:
|
||
def __init__(self, *args, **kwargs):
|
||
raise ImportError("MoviePy not available")
|
||
|
||
class ShortsEditorGUI:
|
||
"""
|
||
Professional Video Editing Interface
|
||
|
||
This class provides a complete video editing interface with timeline controls,
|
||
real-time preview, and professional editing tools. It's designed to mimic the
|
||
workflow of professional video editing software while remaining accessible
|
||
to beginners.
|
||
|
||
Key Features:
|
||
- Multi-track timeline with visual separation (road lines)
|
||
- Frame-accurate video scrubbing and playback
|
||
- Professional editing tools (trim, speed, volume, effects)
|
||
- Real-time preview with synchronized timeline
|
||
- Tabbed tool interface for organized workflow
|
||
- Support for multiple video formats
|
||
- Export functionality with timestamp-based naming
|
||
|
||
The interface uses a dark theme optimized for video editing work and provides
|
||
both mouse and keyboard controls for efficient editing workflow.
|
||
|
||
Attributes:
|
||
parent: Parent window or None for standalone operation
|
||
shorts_folder: Directory path for video files
|
||
current_video: Path to currently loaded video file
|
||
current_clip: Video clip object (MoviePy or OpenCV)
|
||
timeline_*: Various timeline state and control variables
|
||
colors: Dictionary defining the dark theme color scheme
|
||
fonts: Dictionary defining typography for different UI elements
|
||
"""
|
||
|
||
def __init__(self, parent=None, shorts_folder="shorts"):
|
||
"""
|
||
Initialize the Professional Video Editor Interface
|
||
|
||
Sets up all the necessary state variables, UI theme, and data structures
|
||
needed for professional video editing. This includes timeline management,
|
||
video playback state, track configuration, and UI styling.
|
||
|
||
The initialization process:
|
||
1. Stores parent window reference and shorts folder path
|
||
2. Initializes video playback state variables
|
||
3. Sets up timeline state and interaction controls
|
||
4. Configures professional editing features (snap, magnetic timeline)
|
||
5. Defines track structure for multi-track editing
|
||
6. Establishes dark theme color scheme optimized for video work
|
||
7. Sets up typography for different UI elements
|
||
|
||
Args:
|
||
parent: Parent tkinter window (None for standalone operation)
|
||
shorts_folder: String path to directory containing video files
|
||
"""
|
||
# Store parent window reference and video directory path
|
||
self.parent = parent
|
||
self.shorts_folder = shorts_folder
|
||
|
||
# Video playback state management
|
||
# These variables track the current state of video loading and playback
|
||
self.current_video = None # File path to currently loaded video
|
||
self.current_clip = None # Video clip object (MoviePy VideoFileClip or OpenCV VideoCapture)
|
||
self.current_time = 0.0 # Current playback position in seconds
|
||
self.video_duration = 0.0 # Total video duration in seconds
|
||
self.is_playing = False # Whether video is currently playing
|
||
self.timeline_is_playing = False # Whether timeline playback is active
|
||
self.play_thread = None # Background thread for video playback
|
||
|
||
# Timeline display and interaction state
|
||
# Controls how the timeline is rendered and how users interact with it
|
||
self.timeline_position = 0.0 # Current scroll position of timeline view
|
||
self.timeline_scale = 1.0 # Zoom level: pixels per second of video
|
||
self.timeline_width = 800 # Width of timeline canvas in pixels
|
||
|
||
# Professional timeline editing features
|
||
# These lists store timeline content and user-created elements
|
||
self.timeline_clips = [] # List of video/audio clips on timeline
|
||
self.selected_clip = None # Currently selected clip for editing
|
||
self.markers = [] # User-placed timeline markers for navigation
|
||
|
||
# Multi-track system for professional video editing workflow
|
||
# Each track has its own properties and can hold different types of media
|
||
# This structure allows for layered editing with video and audio tracks
|
||
self.tracks = {
|
||
# Video tracks - higher tracks appear on top in the final composition
|
||
'video_1': {'y_offset': 40, 'height': 60, 'color': '#3498db', 'name': 'Video 1',
|
||
'muted': False, 'locked': False, 'solo': False, 'visible': True, 'type': 'video'},
|
||
'video_2': {'y_offset': 105, 'height': 60, 'color': '#2ecc71', 'name': 'Video 2',
|
||
'muted': False, 'locked': False, 'solo': False, 'visible': True, 'type': 'video'},
|
||
'video_3': {'y_offset': 170, 'height': 60, 'color': '#9b59b6', 'name': 'Video 3',
|
||
'muted': False, 'locked': False, 'solo': False, 'visible': True, 'type': 'video'},
|
||
# Audio tracks - for background music, sound effects, voiceovers
|
||
'audio_1': {'y_offset': 235, 'height': 40, 'color': '#e74c3c', 'name': 'Audio 1',
|
||
'muted': False, 'locked': False, 'solo': False, 'visible': True, 'type': 'audio'},
|
||
'audio_2': {'y_offset': 280, 'height': 40, 'color': '#f39c12', 'name': 'Audio 2',
|
||
'muted': False, 'locked': False, 'solo': False, 'visible': True, 'type': 'audio'}
|
||
}
|
||
|
||
# Timeline interaction state for drag-and-drop editing
|
||
# These variables track the current state of user interactions with timeline elements
|
||
self.dragging_clip = None # Clip currently being dragged by user
|
||
self.dragging_media = None # Media file being dragged from bin to timeline
|
||
self.resizing_clip = None # Clip being resized
|
||
self.moving_clip = None # Clip being moved
|
||
self.selected_timeline_clip = None # Index of selected timeline clip
|
||
self.drag_start_x = None # Mouse X position when drag started
|
||
self.drag_start_time = None # Original time position of dragged clip
|
||
self.drag_offset = 0 # Offset from clip start to mouse position
|
||
self.snap_enabled = True # Whether clips snap to grid/other clips
|
||
self.magnetic_timeline = True # Whether clips magnetically attract to each other
|
||
self.grid_size = 1.0 # Snap grid size in seconds (for precise alignment)
|
||
|
||
# Timeline editing modes for different types of operations
|
||
# This affects how mouse interactions behave on the timeline
|
||
self.edit_mode = 'select' # Current editing mode: 'select', 'cut', 'trim', 'ripple'
|
||
|
||
# Visual enhancement options for better user experience
|
||
# These can be toggled on/off based on performance and user preference
|
||
self.show_thumbnails = True # Show video thumbnail previews on timeline clips
|
||
self.show_waveforms = True # Show audio waveform visualization
|
||
|
||
# Cached visual data for performance optimization
|
||
# Storing thumbnails and waveforms prevents repeated generation
|
||
self.clip_thumbnails = {} # Dictionary storing thumbnail images by clip ID
|
||
self.audio_waveforms = {} # Dictionary storing waveform data by clip ID
|
||
|
||
# Timeline clips storage for media bin drops
|
||
self.timeline_clips = [] # List of all clips on the timeline
|
||
|
||
# Track control widget references for dynamic updates
|
||
# This allows us to update track control buttons when states change
|
||
self.track_widgets = {} # Dictionary storing widget references by track ID
|
||
|
||
# Professional dark color scheme optimized for video editing
|
||
# Dark themes reduce eye strain during long editing sessions and help
|
||
# focus attention on the video content rather than the interface
|
||
self.colors = {
|
||
'bg_primary': '#1a1a1a', # Main background - darkest for maximum contrast
|
||
'bg_secondary': '#2d2d2d', # Secondary panels - slightly lighter
|
||
'bg_tertiary': '#3d3d3d', # Buttons and controls - interactive elements
|
||
'bg_hover': '#404040', # Hover state - subtle highlight for interaction
|
||
'text_primary': '#ffffff', # Main text - high contrast for readability
|
||
'text_secondary': '#b8b8b8', # Secondary text - lower contrast for hierarchy
|
||
'accent_blue': '#007acc', # Primary actions - professional blue
|
||
'accent_green': '#28a745', # Success/positive actions - natural green
|
||
'accent_orange': '#fd7e14', # Warning/attention - energetic orange
|
||
'accent_red': '#dc3545', # Destructive/negative actions - clear red
|
||
'border': '#404040' # Subtle borders - defines sections without distraction
|
||
}
|
||
|
||
# Typography system for consistent text hierarchy
|
||
# Font choices prioritize readability and professional appearance
|
||
# Segoe UI provides excellent readability across different screen sizes
|
||
self.fonts = {
|
||
'title': ('Segoe UI', 16, 'bold'), # Main window titles and headers
|
||
'heading': ('Segoe UI', 11, 'bold'), # Section headings and tool categories
|
||
'body': ('Segoe UI', 10), # Regular text and input fields
|
||
'caption': ('Segoe UI', 9), # Small labels and descriptions
|
||
'button': ('Segoe UI', 10, 'bold') # Button text for clear action indicators
|
||
}
|
||
|
||
def open_editor(self):
|
||
"""
|
||
Open the Professional Video Editor Window
|
||
|
||
Creates and displays the main video editing interface window. This method
|
||
sets up the window properties, makes it responsive, and initializes the
|
||
complete editor interface.
|
||
|
||
The window creation process:
|
||
1. Creates either a new Tkinter window (standalone) or Toplevel window (child)
|
||
2. Sets window title, size, and minimum dimensions for usability
|
||
3. Applies the dark theme background color
|
||
4. Configures responsive layout with proper weight distribution
|
||
5. Calls create_editor_interface() to build the complete UI
|
||
6. Starts the main event loop if running standalone
|
||
|
||
Window Properties:
|
||
- Title: "Professional Shorts Editor"
|
||
- Size: 1200x800 pixels (optimal for video editing workflow)
|
||
- Minimum: 900x600 pixels (ensures UI remains functional when resized)
|
||
- Theme: Dark background optimized for video editing
|
||
- Layout: Responsive with proper weight distribution for resizing
|
||
"""
|
||
# Create the main editor window - either standalone or child window
|
||
# If parent exists, create as child window; otherwise create root window
|
||
self.editor_window = tk.Toplevel(self.parent) if self.parent else tk.Tk()
|
||
self.editor_window.title("Professional Shorts Editor")
|
||
self.editor_window.geometry("1200x900") # Increased height for Audio 2 visibility
|
||
self.editor_window.minsize(900, 700) # Increased minimum size
|
||
self.editor_window.configure(bg=self.colors['bg_primary']) # Apply dark theme
|
||
|
||
# Configure responsive layout for window resizing
|
||
# Row 1 gets all extra vertical space (main content area)
|
||
# Column 0 gets all extra horizontal space (full width utilization)
|
||
self.editor_window.rowconfigure(1, weight=1)
|
||
self.editor_window.columnconfigure(0, weight=1)
|
||
|
||
# Build the complete editor interface
|
||
self.create_editor_interface()
|
||
|
||
# Start the application main loop if running as standalone application
|
||
if not self.parent:
|
||
self.editor_window.mainloop()
|
||
|
||
def create_editor_interface(self):
|
||
"""
|
||
Create the Complete Professional Video Editor Interface
|
||
|
||
Builds the entire user interface for the video editor, organizing it into
|
||
logical sections that mirror professional video editing software. The
|
||
interface uses a three-panel layout with header, main content, and tools.
|
||
|
||
Interface Structure:
|
||
1. Header: Title, current file display, and file selection controls
|
||
2. Main Content: Split into left panel (video/timeline) and right panel (tools)
|
||
3. Left Panel: Video preview, playback controls, and professional timeline
|
||
4. Right Panel: Tabbed tool interface with editing functions
|
||
5. Timeline: Multi-track workspace with road lines and interactive controls
|
||
|
||
The layout is designed to be intuitive for users familiar with professional
|
||
video editing software while remaining accessible to beginners. Dark theme
|
||
reduces eye strain during long editing sessions.
|
||
"""
|
||
# Header section - contains title and file management controls
|
||
# Fixed height prevents layout shifts when content changes
|
||
header_frame = tk.Frame(self.editor_window, bg=self.colors['bg_secondary'], height=60)
|
||
header_frame.pack(fill="x", padx=10, pady=(10, 0))
|
||
header_frame.pack_propagate(False) # Maintain fixed height
|
||
|
||
# Application title with professional icon and branding
|
||
title_label = tk.Label(header_frame, text="✏️ Professional Shorts Editor",
|
||
font=self.fonts['title'], bg=self.colors['bg_secondary'],
|
||
fg=self.colors['text_primary'])
|
||
title_label.pack(side="left", padx=20, pady=15)
|
||
|
||
# File selection area - shows current file and provides selection control
|
||
file_frame = tk.Frame(header_frame, bg=self.colors['bg_secondary'])
|
||
file_frame.pack(side="right", padx=20, pady=15)
|
||
|
||
# Current file display - shows filename or "No video selected" status
|
||
self.current_file_label = tk.Label(file_frame, text="No video selected",
|
||
font=self.fonts['body'], bg=self.colors['bg_tertiary'],
|
||
fg=self.colors['text_secondary'], padx=15, pady=8)
|
||
self.current_file_label.pack(side="left", padx=(0, 10))
|
||
|
||
# File selection button - opens file browser or shows available videos
|
||
select_btn = tk.Button(file_frame, text="📁 Select Video",
|
||
command=self.select_video_file, font=self.fonts['button'],
|
||
bg=self.colors['accent_blue'], fg='white', padx=20, pady=8,
|
||
relief="flat", bd=0, cursor="hand2")
|
||
select_btn.pack(side="left")
|
||
|
||
# Main content area - contains the primary editing workspace
|
||
# Uses a two-column grid layout with weighted columns for responsive design
|
||
# Left column (weight=2) is larger for video preview and timeline
|
||
# Right column (weight=1) is smaller for tools and controls
|
||
main_frame = tk.Frame(self.editor_window, bg=self.colors['bg_primary'])
|
||
main_frame.pack(fill="both", expand=True, padx=10, pady=10)
|
||
main_frame.rowconfigure(0, weight=1) # Single row takes all vertical space
|
||
main_frame.columnconfigure(0, weight=0) # Media bin column (fixed width)
|
||
main_frame.columnconfigure(1, weight=2) # Left column gets 2/3 of remaining width
|
||
main_frame.columnconfigure(2, weight=1) # Right column gets 1/3 of width
|
||
|
||
# Media Bin - File library for drag and drop editing
|
||
# Container for video and audio files that can be dragged to timeline tracks
|
||
media_bin_frame = tk.Frame(main_frame, bg=self.colors['bg_tertiary'], width=200)
|
||
media_bin_frame.grid(row=0, column=0, sticky="nsew", padx=(0, 5))
|
||
media_bin_frame.pack_propagate(False) # Maintain fixed width
|
||
self.create_media_bin(media_bin_frame)
|
||
|
||
# Middle panel - Video preview and timeline workspace
|
||
# This is the main editing area where users see their video and timeline
|
||
# Organized vertically with video preview on top and timeline below
|
||
player_frame = tk.Frame(main_frame, bg=self.colors['bg_secondary'])
|
||
player_frame.grid(row=0, column=1, sticky="nsew", padx=(0, 5))
|
||
player_frame.rowconfigure(0, weight=1) # Video preview area (expandable)
|
||
player_frame.rowconfigure(1, weight=0) # Timeline area (fixed height)
|
||
player_frame.columnconfigure(0, weight=1) # Full width utilization
|
||
|
||
# Video Display Area - The main preview window for video content
|
||
# This section provides real-time video preview with proper aspect ratio maintenance
|
||
# Dark border provides visual separation and professional appearance
|
||
video_container = tk.Frame(player_frame, bg=self.colors['bg_tertiary'])
|
||
video_container.grid(row=0, column=0, sticky="nsew", padx=15, pady=15)
|
||
video_container.rowconfigure(0, weight=1) # Video canvas expands to fill space
|
||
video_container.columnconfigure(0, weight=1) # Full width utilization
|
||
|
||
# Video canvas - displays the actual video frames
|
||
# Black background provides professional video preview appearance
|
||
# No highlight thickness removes the default canvas border for cleaner look
|
||
self.video_canvas = tk.Canvas(video_container, bg='black', highlightthickness=0)
|
||
self.video_canvas.grid(row=0, column=0, sticky="nsew")
|
||
|
||
# Professional Timeline Workspace - Multi-track editing environment
|
||
# Fixed height prevents timeline from shrinking when window is resized
|
||
# This is where users perform the majority of their editing work
|
||
timeline_workspace = tk.Frame(player_frame, bg=self.colors['bg_secondary'], height=480)
|
||
timeline_workspace.grid(row=1, column=0, sticky="ew", padx=15, pady=(0, 15))
|
||
timeline_workspace.pack_propagate(False) # Maintain fixed height for consistent layout
|
||
|
||
# Timeline header - contains editing mode controls and timeline options
|
||
# These controls affect how the user interacts with timeline elements
|
||
header_frame = tk.Frame(timeline_workspace, bg=self.colors['bg_secondary'])
|
||
header_frame.pack(fill="x", pady=(5, 5))
|
||
|
||
# Left side timeline controls - editing modes and interaction options
|
||
left_controls = tk.Frame(header_frame, bg=self.colors['bg_secondary'])
|
||
left_controls.pack(side="left")
|
||
|
||
# Editing mode selector - determines how mouse interactions behave
|
||
# Different modes enable different types of editing operations
|
||
tk.Label(left_controls, text="Mode:", font=self.fonts['caption'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_secondary']).pack(side="left")
|
||
|
||
# Mode selection dropdown with professional editing modes
|
||
# select: Default mode for selecting and moving clips
|
||
# cut: Razor tool for splitting clips at specific points
|
||
# trim: For adjusting clip start/end points
|
||
# ripple: Moves clips and automatically adjusts following clips
|
||
self.mode_var = tk.StringVar(value="select")
|
||
mode_combo = ttk.Combobox(left_controls, textvariable=self.mode_var, width=8,
|
||
values=["select", "cut", "trim", "ripple"], state="readonly")
|
||
mode_combo.pack(side="left", padx=(5, 10))
|
||
mode_combo.bind('<<ComboboxSelected>>', self.on_mode_change)
|
||
|
||
# Professional timeline assistance features
|
||
# These options help users align and position clips precisely
|
||
self.snap_var = tk.BooleanVar(value=True)
|
||
snap_check = tk.Checkbutton(left_controls, text="Snap", variable=self.snap_var,
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
selectcolor=self.colors['accent_blue'], command=self.toggle_snap)
|
||
snap_check.pack(side="left", padx=5)
|
||
|
||
# Magnetic timeline - clips automatically attract to align with other clips
|
||
# This feature helps maintain precise timing relationships between clips
|
||
self.magnetic_var = tk.BooleanVar(value=True)
|
||
magnetic_check = tk.Checkbutton(left_controls, text="Magnetic", variable=self.magnetic_var,
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
selectcolor=self.colors['accent_blue'], command=self.toggle_magnetic)
|
||
magnetic_check.pack(side="left", padx=5)
|
||
|
||
# Center - Professional playback controls for timeline
|
||
# These controls manage video playback synchronized with timeline position
|
||
# Color-coded for immediate recognition: Green=Play, Orange=Pause, Red=Stop
|
||
center_controls = tk.Frame(header_frame, bg=self.colors['bg_secondary'])
|
||
center_controls.pack(side="left", padx=20)
|
||
|
||
# Play button - starts video playback from current timeline position
|
||
self.timeline_play_btn = tk.Button(center_controls, text="▶️",
|
||
command=self.timeline_play,
|
||
bg=self.colors['accent_green'], fg='white',
|
||
font=('Arial', 12, 'bold'), width=3, height=1,
|
||
relief="flat", bd=0, cursor="hand2")
|
||
self.timeline_play_btn.pack(side="left", padx=2)
|
||
|
||
# Pause button - temporarily stops playback, maintains current position
|
||
self.timeline_pause_btn = tk.Button(center_controls, text="⏸️",
|
||
command=self.timeline_pause,
|
||
bg=self.colors['accent_orange'], fg='white',
|
||
font=('Arial', 12, 'bold'), width=3, height=1,
|
||
relief="flat", bd=0, cursor="hand2")
|
||
self.timeline_pause_btn.pack(side="left", padx=2)
|
||
|
||
# Stop button - stops playback and returns to beginning of timeline
|
||
self.timeline_stop_btn = tk.Button(center_controls, text="⏹️",
|
||
command=self.timeline_stop,
|
||
bg=self.colors['accent_red'], fg='white',
|
||
font=('Arial', 12, 'bold'), width=3, height=1,
|
||
relief="flat", bd=0, cursor="hand2")
|
||
self.timeline_stop_btn.pack(side="left", padx=2)
|
||
|
||
# Right side - Zoom and time display
|
||
right_controls = tk.Frame(header_frame, bg=self.colors['bg_secondary'])
|
||
right_controls.pack(side="right")
|
||
|
||
# Zoom control
|
||
tk.Label(right_controls, text="Zoom:", font=self.fonts['caption'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_secondary']).pack(side="left")
|
||
|
||
self.zoom_var = tk.DoubleVar(value=1.0)
|
||
zoom_scale = tk.Scale(right_controls, from_=0.1, to=5.0, resolution=0.1,
|
||
orient="horizontal", variable=self.zoom_var,
|
||
command=self.on_zoom_change, length=150,
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
highlightthickness=0, troughcolor=self.colors['bg_tertiary'])
|
||
zoom_scale.pack(side="left", padx=10)
|
||
|
||
# Time display
|
||
self.time_display = tk.Label(right_controls, text="00:00 / 00:00",
|
||
font=self.fonts['body'], bg=self.colors['bg_secondary'],
|
||
fg=self.colors['text_primary'])
|
||
self.time_display.pack(side="left", padx=20)
|
||
|
||
# Main timeline container
|
||
timeline_container = tk.Frame(timeline_workspace, bg=self.colors['bg_tertiary'])
|
||
timeline_container.pack(fill="both", expand=True, pady=5)
|
||
|
||
# Track labels panel (left side)
|
||
self.track_panel = tk.Frame(timeline_container, bg=self.colors['bg_secondary'], width=120)
|
||
self.track_panel.pack(side="left", fill="y")
|
||
self.track_panel.pack_propagate(False)
|
||
|
||
# Timeline canvas with scrollbars
|
||
canvas_frame = tk.Frame(timeline_container, bg=self.colors['bg_tertiary'])
|
||
canvas_frame.pack(side="right", fill="both", expand=True)
|
||
|
||
# Create canvas without internal scrollbars
|
||
self.timeline_canvas = tk.Canvas(canvas_frame, bg='#1a1a1a',
|
||
highlightthickness=0, scrollregion=(0, 0, 2000, 400))
|
||
|
||
# Pack canvas to fill the frame completely
|
||
self.timeline_canvas.pack(fill="both", expand=True)
|
||
|
||
# Create external horizontal scrollbar for timeline control
|
||
# Place it outside the timeline in the workspace area
|
||
external_scrollbar_frame = tk.Frame(timeline_workspace, bg=self.colors['bg_primary'], height=20)
|
||
external_scrollbar_frame.pack(side="bottom", fill="x", padx=5, pady=2)
|
||
external_scrollbar_frame.pack_propagate(False)
|
||
|
||
# Professional tips label
|
||
tips_label = tk.Label(external_scrollbar_frame,
|
||
text="💡 CTRL+Click: Select clips • Delete/Backspace: Delete selected • Click: Move playhead",
|
||
bg=self.colors['bg_primary'], fg=self.colors['text_secondary'],
|
||
font=('Segoe UI', 8))
|
||
tips_label.pack(side="left", padx=5, pady=2)
|
||
|
||
# Horizontal scrollbar outside timeline but controlling it
|
||
self.external_h_scrollbar = ttk.Scrollbar(external_scrollbar_frame, orient="horizontal",
|
||
command=self.timeline_canvas.xview)
|
||
self.timeline_canvas.configure(xscrollcommand=self.external_h_scrollbar.set)
|
||
self.external_h_scrollbar.pack(side="right", fill="x", expand=True)
|
||
|
||
# Bind professional timeline events
|
||
self.timeline_canvas.bind("<Button-1>", self.timeline_click)
|
||
self.timeline_canvas.bind("<B1-Motion>", self.timeline_drag)
|
||
self.timeline_canvas.bind("<ButtonRelease-1>", self.on_timeline_drag_end)
|
||
self.timeline_canvas.bind("<Button-3>", self.on_timeline_right_click)
|
||
self.timeline_canvas.bind("<Double-Button-1>", self.on_timeline_double_click)
|
||
|
||
# Keyboard shortcuts for timeline
|
||
self.timeline_canvas.bind("<KeyPress-Delete>", self.delete_selected_clip)
|
||
self.timeline_canvas.bind("<KeyPress-BackSpace>", self.delete_selected_clip)
|
||
self.timeline_canvas.focus_set() # Allow timeline to receive keyboard events
|
||
|
||
# Create track controls
|
||
self.create_track_controls()
|
||
|
||
# Bind canvas resize to redraw road lines
|
||
self.track_panel.bind("<Configure>", self.on_track_panel_resize)
|
||
|
||
# Initialize sample clips for demonstration
|
||
self.create_sample_timeline_content()
|
||
|
||
# Right panel - Tools and effects
|
||
tools_frame = tk.Frame(main_frame, bg=self.colors['bg_secondary'])
|
||
tools_frame.grid(row=0, column=2, sticky="nsew", padx=(5, 0))
|
||
|
||
# Tools header
|
||
tools_header = tk.Label(tools_frame, text="🛠️ Editing Tools",
|
||
font=self.fonts['heading'], bg=self.colors['bg_secondary'],
|
||
fg=self.colors['text_primary'])
|
||
tools_header.pack(pady=(15, 10))
|
||
|
||
# Create tabbed interface for tools
|
||
self.create_tabbed_tools(tools_frame)
|
||
|
||
# Initialize timeline
|
||
self.update_timeline()
|
||
|
||
def create_tabbed_tools(self, parent):
|
||
"""Create tabbed interface for editing tools"""
|
||
import tkinter.ttk as ttk
|
||
|
||
# Create notebook for tabs
|
||
self.tools_notebook = ttk.Notebook(parent)
|
||
self.tools_notebook.pack(fill="both", expand=True, padx=10, pady=5)
|
||
|
||
# Configure notebook style for dark theme
|
||
style = ttk.Style()
|
||
|
||
# Set theme and configure colors
|
||
try:
|
||
style.theme_use('clam') # Use clam theme as base for better customization
|
||
except:
|
||
pass # Fall back to default theme if clam isn't available
|
||
|
||
# Configure the notebook (main container)
|
||
style.configure('TNotebook',
|
||
background=self.colors['bg_secondary'],
|
||
borderwidth=0,
|
||
tabmargins=[2, 5, 2, 0])
|
||
|
||
# Configure the tabs themselves with consistent sizing
|
||
style.configure('TNotebook.Tab',
|
||
background=self.colors['bg_tertiary'],
|
||
foreground=self.colors['text_primary'],
|
||
padding=[15, 8, 15, 8], # Explicit left, top, right, bottom padding
|
||
borderwidth=0, # Remove border to prevent size changes
|
||
focuscolor='none',
|
||
relief='flat')
|
||
|
||
# Configure selected tab with same padding to prevent shrinking
|
||
style.map('TNotebook.Tab',
|
||
background=[('selected', self.colors['accent_blue']),
|
||
('active', self.colors['bg_primary']),
|
||
('!active', self.colors['bg_tertiary'])],
|
||
foreground=[('selected', 'white'),
|
||
('active', self.colors['text_primary']),
|
||
('!active', self.colors['text_primary'])],
|
||
padding=[('selected', [15, 8, 15, 8]), # Same padding for selected
|
||
('active', [15, 8, 15, 8]), # Same padding for active
|
||
('!active', [15, 8, 15, 8])], # Same padding for inactive
|
||
borderwidth=[('selected', 0), # No border changes
|
||
('active', 0),
|
||
('!active', 0)],
|
||
relief=[('selected', 'flat'), # Consistent relief
|
||
('active', 'flat'),
|
||
('!active', 'flat')])
|
||
|
||
# Basic Editing Tab
|
||
basic_frame = tk.Frame(self.tools_notebook, bg=self.colors['bg_secondary'])
|
||
self.tools_notebook.add(basic_frame, text="Basic Editing")
|
||
self.create_basic_tools(basic_frame)
|
||
|
||
# Video Effects Tab
|
||
video_effects_frame = tk.Frame(self.tools_notebook, bg=self.colors['bg_secondary'])
|
||
self.tools_notebook.add(video_effects_frame, text="Video Effects")
|
||
self.create_video_effects_tools(video_effects_frame)
|
||
|
||
# Audio Effects Tab
|
||
audio_effects_frame = tk.Frame(self.tools_notebook, bg=self.colors['bg_secondary'])
|
||
self.tools_notebook.add(audio_effects_frame, text="Audio Effects")
|
||
self.create_audio_effects_tools(audio_effects_frame)
|
||
|
||
# Export Tab
|
||
export_frame = tk.Frame(self.tools_notebook, bg=self.colors['bg_secondary'])
|
||
self.tools_notebook.add(export_frame, text="Export")
|
||
self.create_export_tools(export_frame)
|
||
|
||
def create_track_controls(self):
|
||
"""Create professional track control panel with road lines"""
|
||
# Clear existing track controls
|
||
for widget in self.track_panel.winfo_children():
|
||
widget.destroy()
|
||
|
||
# Create road line background canvas
|
||
self.track_road_canvas = tk.Canvas(self.track_panel,
|
||
bg=self.colors['bg_secondary'],
|
||
highlightthickness=0,
|
||
scrollregion=(0, 0, 120, 400))
|
||
self.track_road_canvas.pack(fill="both", expand=True)
|
||
|
||
# Draw road lines for track separation
|
||
self.draw_track_road_lines()
|
||
|
||
# Create controls for each track
|
||
for track_id, track_info in self.tracks.items():
|
||
self.create_track_control(track_id, track_info)
|
||
|
||
def create_media_bin(self, parent_frame):
|
||
"""Create media bin for drag and drop file management"""
|
||
# Media bin header
|
||
header = tk.Frame(parent_frame, bg=self.colors['bg_secondary'], height=40)
|
||
header.pack(fill="x", padx=5, pady=5)
|
||
header.pack_propagate(False)
|
||
|
||
title_label = tk.Label(header, text="📁 Media Bin",
|
||
fg=self.colors['text_primary'], bg=self.colors['bg_secondary'],
|
||
font=('Segoe UI', 12, 'bold'))
|
||
title_label.pack(side="left", padx=10, pady=10)
|
||
|
||
# Add file button
|
||
add_btn = tk.Button(header, text="+ Add", command=self.add_media_file,
|
||
bg=self.colors['accent_blue'], fg='white', bd=0,
|
||
font=('Segoe UI', 10), relief='flat')
|
||
add_btn.pack(side="right", padx=10, pady=5)
|
||
|
||
# Media files container with scrollbar
|
||
container = tk.Frame(parent_frame, bg=self.colors['bg_tertiary'])
|
||
container.pack(fill="both", expand=True, padx=5, pady=(0, 5))
|
||
|
||
# Create canvas for scrollable media list
|
||
self.media_canvas = tk.Canvas(container, bg=self.colors['bg_tertiary'],
|
||
highlightthickness=0)
|
||
scrollbar = ttk.Scrollbar(container, orient="vertical", command=self.media_canvas.yview)
|
||
self.scrollable_media_frame = tk.Frame(self.media_canvas, bg=self.colors['bg_tertiary'])
|
||
|
||
self.scrollable_media_frame.bind(
|
||
"<Configure>",
|
||
lambda e: self.media_canvas.configure(scrollregion=self.media_canvas.bbox("all"))
|
||
)
|
||
|
||
self.media_canvas.create_window((0, 0), window=self.scrollable_media_frame, anchor="nw")
|
||
self.media_canvas.configure(yscrollcommand=scrollbar.set)
|
||
|
||
self.media_canvas.pack(side="left", fill="both", expand=True)
|
||
scrollbar.pack(side="right", fill="y")
|
||
|
||
# Initialize media files list
|
||
self.media_files = []
|
||
|
||
# Create drop zone with proper drag and drop support
|
||
drop_zone = tk.Frame(self.scrollable_media_frame, bg=self.colors['bg_secondary'],
|
||
height=100, relief='solid', bd=1)
|
||
drop_zone.pack(fill="x", padx=10, pady=10)
|
||
drop_zone.pack_propagate(False)
|
||
|
||
drop_label = tk.Label(drop_zone, text="🎬 Drop media files here\nor click 'Add' button",
|
||
fg=self.colors['text_secondary'], bg=self.colors['bg_secondary'],
|
||
font=('Segoe UI', 10), justify='center')
|
||
drop_label.pack(expand=True)
|
||
|
||
# Enable Windows Explorer drag and drop
|
||
self.setup_external_drop(drop_zone)
|
||
self.setup_external_drop(drop_label)
|
||
|
||
# Bind click to add files
|
||
drop_zone.bind("<Button-1>", lambda e: self.add_media_file())
|
||
drop_label.bind("<Button-1>", lambda e: self.add_media_file())
|
||
|
||
def add_media_file(self):
|
||
"""Add media file to the bin"""
|
||
from tkinter import filedialog
|
||
import os
|
||
|
||
file_types = [
|
||
("Video files", "*.mp4 *.avi *.mov *.mkv *.wmv *.flv"),
|
||
("Audio files", "*.mp3 *.wav *.aac *.ogg *.m4a"),
|
||
("All files", "*.*")
|
||
]
|
||
|
||
# Use current directory or Desktop as default
|
||
initial_dir = os.path.join(os.path.expanduser("~"), "Desktop")
|
||
|
||
file_path = filedialog.askopenfilename(
|
||
title="Select media file",
|
||
filetypes=file_types,
|
||
initialdir=initial_dir
|
||
)
|
||
|
||
if file_path:
|
||
self.add_media_to_bin(file_path)
|
||
|
||
def add_media_to_bin(self, file_path):
|
||
"""Add media file to bin with preview"""
|
||
import os
|
||
|
||
filename = os.path.basename(file_path)
|
||
file_ext = os.path.splitext(filename)[1].lower()
|
||
|
||
# Determine file type
|
||
video_exts = ['.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv']
|
||
audio_exts = ['.mp3', '.wav', '.aac', '.ogg', '.m4a']
|
||
|
||
if file_ext in video_exts:
|
||
file_type = "🎬"
|
||
type_name = "Video"
|
||
elif file_ext in audio_exts:
|
||
file_type = "🎵"
|
||
type_name = "Audio"
|
||
else:
|
||
file_type = "📄"
|
||
type_name = "File"
|
||
|
||
# Create media item frame
|
||
media_item = tk.Frame(self.scrollable_media_frame, bg=self.colors['bg_secondary'],
|
||
relief='solid', bd=1)
|
||
media_item.pack(fill="x", padx=10, pady=2)
|
||
|
||
# File info frame
|
||
info_frame = tk.Frame(media_item, bg=self.colors['bg_secondary'])
|
||
info_frame.pack(fill="x", padx=5, pady=5)
|
||
|
||
# File type icon and name
|
||
tk.Label(info_frame, text=file_type, bg=self.colors['bg_secondary'],
|
||
font=('Segoe UI', 16)).pack(side="left", padx=(0, 5))
|
||
|
||
name_frame = tk.Frame(info_frame, bg=self.colors['bg_secondary'])
|
||
name_frame.pack(side="left", fill="x", expand=True)
|
||
|
||
tk.Label(name_frame, text=filename[:25] + "..." if len(filename) > 25 else filename,
|
||
fg=self.colors['text_primary'], bg=self.colors['bg_secondary'],
|
||
font=('Segoe UI', 9, 'bold'), anchor="w").pack(anchor="w")
|
||
|
||
tk.Label(name_frame, text=f"{type_name} • {file_ext.upper()}",
|
||
fg=self.colors['text_secondary'], bg=self.colors['bg_secondary'],
|
||
font=('Segoe UI', 8), anchor="w").pack(anchor="w")
|
||
|
||
# Store file info
|
||
media_info = {
|
||
'path': file_path,
|
||
'filename': filename,
|
||
'type': type_name.lower(),
|
||
'widget': media_item
|
||
}
|
||
self.media_files.append(media_info)
|
||
|
||
# Enable drag and drop from media item to timeline
|
||
self.setup_media_drag(media_item, media_info)
|
||
|
||
print(f"📁 Added {type_name.lower()}: {filename}")
|
||
|
||
def setup_external_drop(self, widget):
|
||
"""Setup drag and drop from Windows Explorer"""
|
||
try:
|
||
# Try to enable Windows file drag and drop
|
||
import tkinter.dnd as dnd
|
||
|
||
def drop_enter(event):
|
||
widget.configure(bg=self.colors['accent_green'])
|
||
return 'copy'
|
||
|
||
def drop_leave(event):
|
||
widget.configure(bg=self.colors['bg_secondary'])
|
||
|
||
def drop_action(event):
|
||
# Handle file drop from Windows Explorer
|
||
files = widget.tk.splitlist(event.data)
|
||
for file_path in files:
|
||
if file_path.startswith('{') and file_path.endswith('}'):
|
||
file_path = file_path[1:-1] # Remove braces
|
||
|
||
# Check if it's a media file
|
||
import os
|
||
if os.path.isfile(file_path):
|
||
ext = os.path.splitext(file_path)[1].lower()
|
||
media_extensions = ['.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv',
|
||
'.mp3', '.wav', '.aac', '.ogg', '.m4a']
|
||
if ext in media_extensions:
|
||
self.add_media_to_bin(file_path)
|
||
print(f"📁 Dropped file: {os.path.basename(file_path)}")
|
||
|
||
widget.configure(bg=self.colors['bg_secondary'])
|
||
return 'copy'
|
||
|
||
# Register for file drops
|
||
widget.bind('<Drop>', drop_action)
|
||
widget.bind('<DragEnter>', drop_enter)
|
||
widget.bind('<DragLeave>', drop_leave)
|
||
|
||
# Enable as drop target
|
||
widget.drop_target_register('DND_Files')
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ External drag-drop setup failed: {e}")
|
||
# Fallback to click-to-add only
|
||
pass
|
||
|
||
def setup_media_drag(self, widget, media_info):
|
||
"""Setup drag and drop functionality for media items"""
|
||
def start_drag(event):
|
||
# Store the media info for drag operation
|
||
self.dragging_media = media_info
|
||
# Visual feedback: blue border and background when dragging starts
|
||
widget.configure(bg=self.colors['accent_blue'], relief='solid', bd=2)
|
||
print(f"🎬 Started dragging: {media_info['filename']}")
|
||
|
||
def end_drag(event):
|
||
# Check if we're over the timeline
|
||
try:
|
||
timeline_x = self.timeline_canvas.winfo_rootx()
|
||
timeline_y = self.timeline_canvas.winfo_rooty()
|
||
timeline_width = self.timeline_canvas.winfo_width()
|
||
timeline_height = self.timeline_canvas.winfo_height()
|
||
|
||
mouse_x = event.x_root
|
||
mouse_y = event.y_root
|
||
|
||
print(f"🖱️ Drop coordinates: mouse({mouse_x},{mouse_y}), timeline({timeline_x},{timeline_y},{timeline_width},{timeline_height})")
|
||
|
||
# Check if dropped on timeline
|
||
if (timeline_x <= mouse_x <= timeline_x + timeline_width and
|
||
timeline_y <= mouse_y <= timeline_y + timeline_height):
|
||
self.handle_media_drop_on_timeline(mouse_x, mouse_y)
|
||
else:
|
||
print(f"❌ Dropped outside timeline area")
|
||
|
||
except Exception as e:
|
||
print(f"❌ Error during drag end: {e}")
|
||
|
||
# Reset appearance to normal
|
||
widget.configure(bg=self.colors['bg_secondary'], relief='solid', bd=1)
|
||
self.dragging_media = None
|
||
|
||
def on_drag(event):
|
||
# Visual feedback during drag - keep blue highlight
|
||
if hasattr(self, 'dragging_media') and self.dragging_media:
|
||
widget.configure(bg=self.colors['accent_blue'], relief='solid', bd=2)
|
||
|
||
def on_hover_enter(event):
|
||
# Highlight on mouse hover (but not as strong as drag)
|
||
if not (hasattr(self, 'dragging_media') and self.dragging_media):
|
||
widget.configure(bg=self.colors['bg_hover'], relief='solid', bd=1)
|
||
|
||
def on_hover_leave(event):
|
||
# Remove highlight when mouse leaves (unless dragging)
|
||
if not (hasattr(self, 'dragging_media') and self.dragging_media):
|
||
widget.configure(bg=self.colors['bg_secondary'], relief='solid', bd=1)
|
||
|
||
# Bind drag events
|
||
widget.bind("<Button-1>", start_drag)
|
||
widget.bind("<ButtonRelease-1>", end_drag)
|
||
widget.bind("<B1-Motion>", on_drag)
|
||
|
||
# Bind hover events for better visual feedback
|
||
widget.bind("<Enter>", on_hover_enter)
|
||
widget.bind("<Leave>", on_hover_leave)
|
||
|
||
# Make all child widgets draggable too
|
||
for child in widget.winfo_children():
|
||
child.bind("<Button-1>", start_drag)
|
||
child.bind("<ButtonRelease-1>", end_drag)
|
||
child.bind("<B1-Motion>", on_drag)
|
||
child.bind("<Enter>", on_hover_enter)
|
||
child.bind("<Leave>", on_hover_leave)
|
||
for grandchild in child.winfo_children():
|
||
grandchild.bind("<Button-1>", start_drag)
|
||
grandchild.bind("<ButtonRelease-1>", end_drag)
|
||
grandchild.bind("<B1-Motion>", on_drag)
|
||
grandchild.bind("<Enter>", on_hover_enter)
|
||
grandchild.bind("<Leave>", on_hover_leave)
|
||
|
||
def handle_media_drop_on_timeline(self, mouse_x, mouse_y):
|
||
"""Handle dropping media from bin onto timeline"""
|
||
if not self.dragging_media:
|
||
return
|
||
|
||
try:
|
||
# Convert screen coordinates to timeline coordinates
|
||
timeline_x = self.timeline_canvas.winfo_rootx()
|
||
timeline_y = self.timeline_canvas.winfo_rooty()
|
||
|
||
# Calculate relative position on timeline
|
||
rel_x = mouse_x - timeline_x
|
||
rel_y = mouse_y - timeline_y
|
||
|
||
# Convert to timeline time and track
|
||
# Use a safe scale calculation to avoid division by zero
|
||
scale = max(self.timeline_scale, 1.0) # Ensure scale is at least 1.0
|
||
time_position = rel_x / scale
|
||
|
||
# Determine which track based on Y position
|
||
target_track = None
|
||
for track_id, track_info in self.tracks.items():
|
||
track_top = track_info['y_offset']
|
||
track_bottom = track_top + track_info['height']
|
||
|
||
if track_top <= rel_y <= track_bottom:
|
||
# Check if media type matches track type
|
||
media_type = self.dragging_media['type']
|
||
track_type = track_info['type']
|
||
|
||
if (media_type == 'video' and track_type == 'video') or \
|
||
(media_type == 'audio' and track_type == 'audio'):
|
||
target_track = track_id
|
||
break
|
||
|
||
if target_track:
|
||
# Create a clip on the timeline with reasonable duration
|
||
clip_info = {
|
||
'file_path': self.dragging_media['path'],
|
||
'filename': self.dragging_media['filename'],
|
||
'start_time': max(0, time_position),
|
||
'duration': 3.0, # Reasonable default duration for timeline clips
|
||
'track': target_track,
|
||
'type': self.dragging_media['type']
|
||
}
|
||
|
||
# Add clip to timeline
|
||
if not hasattr(self, 'timeline_clips'):
|
||
self.timeline_clips = []
|
||
|
||
self.timeline_clips.append(clip_info)
|
||
self.update_timeline() # Refresh timeline display
|
||
|
||
print(f"✅ Added {self.dragging_media['filename']} to {target_track} at {time_position:.1f}s")
|
||
else:
|
||
print(f"❌ Cannot drop {self.dragging_media['type']} file on this track location")
|
||
|
||
except Exception as e:
|
||
print(f"❌ Error handling drop: {e}")
|
||
|
||
def draw_track_road_lines(self):
|
||
"""Draw road lines for track visual separation"""
|
||
# Clear existing lines
|
||
self.track_road_canvas.delete("road_lines")
|
||
|
||
# Get canvas dimensions
|
||
canvas_width = self.track_panel.winfo_width() or 120
|
||
canvas_height = self.track_panel.winfo_height() or 320
|
||
|
||
# Draw horizontal road lines for each track
|
||
for track_id, track_info in self.tracks.items():
|
||
y_pos = track_info['y_offset']
|
||
track_height = track_info['height']
|
||
|
||
# Top line of track
|
||
self.track_road_canvas.create_line(
|
||
0, y_pos, canvas_width, y_pos,
|
||
fill=self.colors['border'], width=1, tags="road_lines"
|
||
)
|
||
|
||
# Bottom line of track
|
||
self.track_road_canvas.create_line(
|
||
0, y_pos + track_height, canvas_width, y_pos + track_height,
|
||
fill=self.colors['border'], width=3, tags="road_lines"
|
||
)
|
||
|
||
# Track type indicator line (left edge with track color)
|
||
self.track_road_canvas.create_line(
|
||
0, y_pos, 0, y_pos + track_height,
|
||
fill=track_info['color'], width=1, tags="road_lines"
|
||
)
|
||
|
||
# Center dashed line for alignment reference
|
||
center_x = canvas_width // 2
|
||
for y in range(0, int(canvas_height), 10):
|
||
self.track_road_canvas.create_line(
|
||
center_x, y, center_x, y + 5,
|
||
fill=self.colors['text_secondary'], width=1,
|
||
tags="road_lines", dash=(2, 3)
|
||
)
|
||
|
||
def on_track_panel_resize(self, event):
|
||
"""Handle track panel resize to redraw road lines"""
|
||
if hasattr(self, 'track_road_canvas'):
|
||
# Small delay to ensure canvas size is updated
|
||
self.editor_window.after(10, self.draw_track_road_lines)
|
||
|
||
def sync_vertical_scroll(self, *args):
|
||
"""Vertical scrolling disabled - no scrollbars for up/down movement"""
|
||
# Vertical scrolling removed for cleaner interface
|
||
pass
|
||
|
||
def create_track_control(self, track_id, track_info):
|
||
"""Create control panel for a single track positioned on road lines"""
|
||
# Calculate precise positioning based on track offset
|
||
y_position = track_info['y_offset']
|
||
track_height = track_info['height']
|
||
|
||
# Create control buttons frame positioned on canvas
|
||
controls_frame = tk.Frame(self.track_road_canvas, bg=self.colors['bg_secondary'])
|
||
|
||
# Control buttons container
|
||
controls = tk.Frame(controls_frame, bg=self.colors['bg_secondary'])
|
||
controls.pack(padx=5, pady=2)
|
||
|
||
# Track name label (small, top of controls)
|
||
name_label = tk.Label(controls, text=track_info['name'],
|
||
font=('Arial', 7, 'bold'), bg=self.colors['bg_secondary'],
|
||
fg=track_info['color'])
|
||
name_label.pack(anchor="center")
|
||
|
||
# Button container
|
||
button_container = tk.Frame(controls, bg=self.colors['bg_secondary'])
|
||
button_container.pack()
|
||
|
||
# Mute button
|
||
mute_text = "🔇" if track_info['muted'] else "🔊"
|
||
mute_btn = tk.Button(button_container, text=mute_text, width=2, height=1,
|
||
bg=self.colors['accent_red'] if track_info['muted'] else self.colors['bg_tertiary'],
|
||
fg='white', font=('Arial', 7), relief="flat", bd=0,
|
||
command=lambda: self.toggle_track_mute(track_id))
|
||
mute_btn.pack(side="left", padx=1)
|
||
|
||
# Solo button
|
||
solo_text = "S"
|
||
solo_btn = tk.Button(button_container, text=solo_text, width=2, height=1,
|
||
bg=self.colors['accent_orange'] if track_info['solo'] else self.colors['bg_tertiary'],
|
||
fg='white', font=('Arial', 7, 'bold'), relief="flat", bd=0,
|
||
command=lambda: self.toggle_track_solo(track_id))
|
||
solo_btn.pack(side="left", padx=1)
|
||
|
||
# Lock button
|
||
lock_text = "🔒" if track_info['locked'] else "🔓"
|
||
lock_btn = tk.Button(button_container, text=lock_text, width=2, height=1,
|
||
bg=self.colors['accent_blue'] if track_info['locked'] else self.colors['bg_tertiary'],
|
||
fg='white', font=('Arial', 7), relief="flat", bd=0,
|
||
command=lambda: self.toggle_track_lock(track_id))
|
||
lock_btn.pack(side="left", padx=1)
|
||
|
||
# Position the controls frame on the canvas
|
||
canvas_width = 120
|
||
control_y = y_position + (track_height // 2) - 20 # Center vertically in track
|
||
|
||
self.track_road_canvas.create_window(
|
||
canvas_width // 2, control_y,
|
||
window=controls_frame, anchor="center"
|
||
)
|
||
|
||
# Store track widgets for updates
|
||
self.track_widgets[track_id] = {
|
||
'frame': controls_frame,
|
||
'mute_btn': mute_btn,
|
||
'solo_btn': solo_btn,
|
||
'lock_btn': lock_btn
|
||
}
|
||
|
||
def create_sample_timeline_content(self):
|
||
"""Create sample timeline content for demonstration"""
|
||
if self.current_video and self.video_duration > 0:
|
||
# Create a sample clip representing the loaded video
|
||
sample_clip = {
|
||
'id': 1,
|
||
'name': os.path.basename(self.current_video) if self.current_video else 'Sample Video',
|
||
'start_time': 0,
|
||
'end_time': min(self.video_duration, 10), # Cap at 10 seconds for demo
|
||
'track': 'video_1',
|
||
'color': self.tracks['video_1']['color'],
|
||
'file_path': self.current_video,
|
||
'type': 'video'
|
||
}
|
||
self.timeline_clips = [sample_clip]
|
||
|
||
# Add sample markers
|
||
self.markers = [
|
||
{'time': 2.0, 'name': 'Intro End', 'color': '#ffeb3b'},
|
||
{'time': 5.0, 'name': 'Mid Point', 'color': '#4caf50'},
|
||
{'time': 8.0, 'name': 'Outro Start', 'color': '#f44336'}
|
||
]
|
||
|
||
self.update_timeline()
|
||
|
||
# Professional timeline interaction methods
|
||
def on_mode_change(self, event=None):
|
||
"""Handle editing mode change"""
|
||
self.edit_mode = self.mode_var.get()
|
||
print(f"🎬 Editing mode changed to: {self.edit_mode}")
|
||
|
||
# Update cursor based on mode
|
||
cursor_map = {
|
||
'select': 'hand2',
|
||
'cut': 'crosshair',
|
||
'trim': 'sb_h_double_arrow',
|
||
'ripple': 'fleur'
|
||
}
|
||
self.timeline_canvas.configure(cursor=cursor_map.get(self.edit_mode, 'hand2'))
|
||
|
||
def toggle_snap(self):
|
||
"""Toggle snap to grid"""
|
||
self.snap_enabled = self.snap_var.get()
|
||
print(f"🧲 Snap enabled: {self.snap_enabled}")
|
||
|
||
def toggle_magnetic(self):
|
||
"""Toggle magnetic timeline"""
|
||
self.magnetic_timeline = self.magnetic_var.get()
|
||
print(f"🧲 Magnetic timeline: {self.magnetic_timeline}")
|
||
|
||
def toggle_track_mute(self, track_id):
|
||
"""Toggle track mute"""
|
||
self.tracks[track_id]['muted'] = not self.tracks[track_id]['muted']
|
||
self.update_track_controls()
|
||
print(f"🔇 Track {track_id} muted: {self.tracks[track_id]['muted']}")
|
||
|
||
def toggle_track_solo(self, track_id):
|
||
"""Toggle track solo"""
|
||
self.tracks[track_id]['solo'] = not self.tracks[track_id]['solo']
|
||
self.update_track_controls()
|
||
print(f"🎵 Track {track_id} solo: {self.tracks[track_id]['solo']}")
|
||
|
||
def toggle_track_lock(self, track_id):
|
||
"""Toggle track lock"""
|
||
self.tracks[track_id]['locked'] = not self.tracks[track_id]['locked']
|
||
self.update_track_controls()
|
||
print(f"🔒 Track {track_id} locked: {self.tracks[track_id]['locked']}")
|
||
|
||
def update_track_controls(self):
|
||
"""Update track control button states"""
|
||
for track_id, widgets in self.track_widgets.items():
|
||
track_info = self.tracks[track_id]
|
||
|
||
# Update mute button
|
||
mute_text = "🔇" if track_info['muted'] else "🔊"
|
||
mute_color = self.colors['accent_red'] if track_info['muted'] else self.colors['bg_tertiary']
|
||
widgets['mute_btn'].configure(text=mute_text, bg=mute_color)
|
||
|
||
# Update solo button
|
||
solo_color = self.colors['accent_orange'] if track_info['solo'] else self.colors['bg_tertiary']
|
||
widgets['solo_btn'].configure(bg=solo_color)
|
||
|
||
# Update lock button
|
||
lock_text = "🔒" if track_info['locked'] else "🔓"
|
||
lock_color = self.colors['accent_blue'] if track_info['locked'] else self.colors['bg_tertiary']
|
||
widgets['lock_btn'].configure(text=lock_text, bg=lock_color)
|
||
|
||
def on_zoom_change(self, value):
|
||
"""Handle timeline zoom change"""
|
||
zoom_level = float(value)
|
||
self.timeline_scale = 50 * zoom_level # Base scale of 50 pixels per second
|
||
self.update_timeline()
|
||
print(f"🔍 Timeline zoom: {zoom_level:.1f}x")
|
||
|
||
# Initialize timeline
|
||
self.update_timeline()
|
||
|
||
def create_basic_tools(self, parent):
|
||
"""Create basic editing tools"""
|
||
# Create scrollable frame for tools
|
||
tools_canvas = tk.Canvas(parent, bg=self.colors['bg_secondary'], highlightthickness=0)
|
||
scrollbar = tk.Scrollbar(parent, orient="vertical", command=tools_canvas.yview)
|
||
scrollable_frame = tk.Frame(tools_canvas, bg=self.colors['bg_secondary'])
|
||
|
||
scrollable_frame.bind(
|
||
"<Configure>",
|
||
lambda e: tools_canvas.configure(scrollregion=tools_canvas.bbox("all"))
|
||
)
|
||
|
||
tools_canvas.create_window((0, 0), window=scrollable_frame, anchor="nw")
|
||
tools_canvas.configure(yscrollcommand=scrollbar.set)
|
||
|
||
tools_canvas.pack(side="left", fill="both", expand=True)
|
||
scrollbar.pack(side="right", fill="y")
|
||
|
||
# Trim controls
|
||
trim_frame = tk.LabelFrame(scrollable_frame, text="✂️ Trim Video", font=self.fonts['body'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
relief="flat", bd=1)
|
||
trim_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
trim_controls = tk.Frame(trim_frame, bg=self.colors['bg_secondary'])
|
||
trim_controls.pack(fill="x", pady=5)
|
||
|
||
tk.Label(trim_controls, text="Start:", font=self.fonts['caption'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_secondary']).pack(side="left")
|
||
|
||
self.trim_start_var = tk.DoubleVar(value=0.0)
|
||
trim_start_spin = tk.Spinbox(trim_controls, from_=0, to=999, increment=0.1,
|
||
textvariable=self.trim_start_var, width=8,
|
||
font=self.fonts['caption'])
|
||
trim_start_spin.pack(side="left", padx=5)
|
||
|
||
tk.Label(trim_controls, text="End:", font=self.fonts['caption'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_secondary']).pack(side="left", padx=(10, 0))
|
||
|
||
self.trim_end_var = tk.DoubleVar(value=10.0)
|
||
trim_end_spin = tk.Spinbox(trim_controls, from_=0, to=999, increment=0.1,
|
||
textvariable=self.trim_end_var, width=8,
|
||
font=self.fonts['caption'])
|
||
trim_end_spin.pack(side="left", padx=5)
|
||
|
||
trim_btn = tk.Button(trim_frame, text="✂️ Apply Trim", command=self.apply_trim,
|
||
bg=self.colors['accent_blue'], fg='white', font=self.fonts['button'],
|
||
relief="flat", bd=0, cursor="hand2")
|
||
trim_btn.pack(fill="x", padx=10, pady=5)
|
||
|
||
# Speed controls
|
||
speed_frame = tk.LabelFrame(scrollable_frame, text="⚡ Speed Control", font=self.fonts['body'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
relief="flat", bd=1)
|
||
speed_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
self.speed_var = tk.DoubleVar(value=1.0)
|
||
speed_scale = tk.Scale(speed_frame, from_=0.25, to=3.0, resolution=0.25,
|
||
orient="horizontal", variable=self.speed_var,
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
highlightthickness=0, troughcolor=self.colors['bg_tertiary'])
|
||
speed_scale.pack(fill="x", pady=5, padx=10)
|
||
|
||
speed_btn = tk.Button(speed_frame, text="⚡ Apply Speed", command=self.apply_speed,
|
||
bg=self.colors['accent_green'], fg='white', font=self.fonts['button'],
|
||
relief="flat", bd=0, cursor="hand2")
|
||
speed_btn.pack(fill="x", padx=10, pady=5)
|
||
|
||
# Volume controls
|
||
volume_frame = tk.LabelFrame(scrollable_frame, text="🔊 Volume Control", font=self.fonts['body'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
relief="flat", bd=1)
|
||
volume_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
self.volume_var = tk.DoubleVar(value=1.0)
|
||
volume_scale = tk.Scale(volume_frame, from_=0.0, to=2.0, resolution=0.1,
|
||
orient="horizontal", variable=self.volume_var,
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
highlightthickness=0, troughcolor=self.colors['bg_tertiary'])
|
||
volume_scale.pack(fill="x", pady=5, padx=10)
|
||
|
||
volume_btn = tk.Button(volume_frame, text="🔊 Apply Volume", command=self.apply_volume,
|
||
bg=self.colors['accent_orange'], fg='white', font=self.fonts['button'],
|
||
relief="flat", bd=0, cursor="hand2")
|
||
volume_btn.pack(fill="x", padx=10, pady=5)
|
||
|
||
# Resize controls
|
||
resize_frame = tk.LabelFrame(scrollable_frame, text="📐 Resize Video", font=self.fonts['body'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
relief="flat", bd=1)
|
||
resize_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
# Preset sizes
|
||
preset_frame = tk.Frame(resize_frame, bg=self.colors['bg_secondary'])
|
||
preset_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
tk.Label(preset_frame, text="Presets:", font=self.fonts['caption'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_secondary']).pack(anchor="w")
|
||
|
||
presets = [("9:16 (1080x1920)", 1080, 1920), ("16:9 (1920x1080)", 1920, 1080), ("1:1 (1080x1080)", 1080, 1080)]
|
||
for name, width, height in presets:
|
||
btn = tk.Button(preset_frame, text=name,
|
||
command=lambda w=width, h=height: self.apply_resize(w, h),
|
||
bg=self.colors['bg_tertiary'], fg=self.colors['text_primary'],
|
||
font=self.fonts['caption'], relief="flat", bd=0, cursor="hand2")
|
||
btn.pack(fill="x", pady=2)
|
||
|
||
def create_video_effects_tools(self, parent):
|
||
"""Create video effects tools"""
|
||
# Create scrollable frame for video effects
|
||
effects_canvas = tk.Canvas(parent, bg=self.colors['bg_secondary'], highlightthickness=0)
|
||
scrollbar_effects = tk.Scrollbar(parent, orient="vertical", command=effects_canvas.yview)
|
||
scrollable_effects_frame = tk.Frame(effects_canvas, bg=self.colors['bg_secondary'])
|
||
|
||
scrollable_effects_frame.bind(
|
||
"<Configure>",
|
||
lambda e: effects_canvas.configure(scrollregion=effects_canvas.bbox("all"))
|
||
)
|
||
|
||
effects_canvas.create_window((0, 0), window=scrollable_effects_frame, anchor="nw")
|
||
effects_canvas.configure(yscrollcommand=scrollbar_effects.set)
|
||
|
||
effects_canvas.pack(side="left", fill="both", expand=True)
|
||
scrollbar_effects.pack(side="right", fill="y")
|
||
|
||
# Fade effects
|
||
fade_frame = tk.LabelFrame(scrollable_effects_frame, text="🌅 Fade Effects", font=self.fonts['body'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
relief="flat", bd=1)
|
||
fade_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
fade_btn = tk.Button(fade_frame, text="🌅 Add Fade In/Out", command=self.apply_fade,
|
||
bg=self.colors['accent_blue'], fg='white', font=self.fonts['button'],
|
||
relief="flat", bd=0, cursor="hand2")
|
||
fade_btn.pack(fill="x", padx=10, pady=5)
|
||
|
||
# Text overlay
|
||
text_frame = tk.LabelFrame(scrollable_effects_frame, text="📝 Text Overlay", font=self.fonts['body'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
relief="flat", bd=1)
|
||
text_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
text_controls = tk.Frame(text_frame, bg=self.colors['bg_secondary'])
|
||
text_controls.pack(fill="x", padx=10, pady=5)
|
||
|
||
tk.Label(text_controls, text="Text:", font=self.fonts['caption'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_secondary']).pack(anchor="w")
|
||
|
||
self.text_var = tk.StringVar(value="Sample Text")
|
||
text_entry = tk.Entry(text_controls, textvariable=self.text_var, font=self.fonts['body'],
|
||
width=25)
|
||
text_entry.pack(fill="x", pady=5)
|
||
|
||
text_btn = tk.Button(text_frame, text="📝 Add Text", command=self.apply_text,
|
||
bg=self.colors['accent_green'], fg='white', font=self.fonts['button'],
|
||
relief="flat", bd=0, cursor="hand2")
|
||
text_btn.pack(fill="x", padx=10, pady=5)
|
||
|
||
def create_audio_effects_tools(self, parent):
|
||
"""Create audio effects tools"""
|
||
# Create scrollable frame for audio effects
|
||
audio_canvas = tk.Canvas(parent, bg=self.colors['bg_secondary'], highlightthickness=0)
|
||
scrollbar_audio = tk.Scrollbar(parent, orient="vertical", command=audio_canvas.yview)
|
||
scrollable_audio_frame = tk.Frame(audio_canvas, bg=self.colors['bg_secondary'])
|
||
|
||
scrollable_audio_frame.bind(
|
||
"<Configure>",
|
||
lambda e: audio_canvas.configure(scrollregion=audio_canvas.bbox("all"))
|
||
)
|
||
|
||
audio_canvas.create_window((0, 0), window=scrollable_audio_frame, anchor="nw")
|
||
audio_canvas.configure(yscrollcommand=scrollbar_audio.set)
|
||
|
||
audio_canvas.pack(side="left", fill="both", expand=True)
|
||
scrollbar_audio.pack(side="right", fill="y")
|
||
|
||
# Volume controls (detailed)
|
||
volume_frame = tk.LabelFrame(scrollable_audio_frame, text="🔊 Volume Controls", font=self.fonts['body'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
relief="flat", bd=1)
|
||
volume_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
# Volume slider with fine control
|
||
self.audio_volume_var = tk.DoubleVar(value=1.0)
|
||
volume_scale = tk.Scale(volume_frame, from_=0.0, to=3.0, resolution=0.01,
|
||
orient="horizontal", variable=self.audio_volume_var,
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
highlightthickness=0, length=250)
|
||
volume_scale.pack(fill="x", pady=5, padx=10)
|
||
|
||
volume_btn = tk.Button(volume_frame, text="🔊 Apply Volume", command=self.apply_volume,
|
||
bg=self.colors['accent_blue'], fg='white', font=self.fonts['button'],
|
||
relief="flat", bd=0, cursor="hand2")
|
||
volume_btn.pack(fill="x", padx=10, pady=5)
|
||
|
||
# Audio info
|
||
info_frame = tk.LabelFrame(scrollable_audio_frame, text="ℹ️ Audio Information", font=self.fonts['body'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
relief="flat", bd=1)
|
||
info_frame.pack(fill="x", padx=10, pady=5)
|
||
|
||
info_text = tk.Label(info_frame,
|
||
text="Audio effects require external tools\nor MoviePy installation.\n\nFor advanced audio editing:\n• Audacity (free)\n• FFmpeg\n• pip install moviepy",
|
||
font=self.fonts['caption'], bg=self.colors['bg_secondary'],
|
||
fg=self.colors['text_secondary'], justify="left")
|
||
info_text.pack(padx=10, pady=5)
|
||
|
||
def create_export_tools(self, parent):
|
||
"""Create export tools"""
|
||
export_frame = tk.LabelFrame(parent, text="Export", font=self.fonts['heading'],
|
||
bg=self.colors['bg_secondary'], fg=self.colors['text_primary'],
|
||
relief="flat", bd=1)
|
||
export_frame.pack(fill="x", padx=15, pady=5)
|
||
|
||
# Reset button
|
||
reset_btn = tk.Button(export_frame, text="🔄 Reset", command=self.reset_video,
|
||
bg=self.colors['accent_red'], fg='white', font=self.fonts['button'],
|
||
relief="flat", bd=0, cursor="hand2")
|
||
reset_btn.pack(fill="x", padx=10, pady=5)
|
||
|
||
# Export button
|
||
export_btn = tk.Button(export_frame, text="💾 Export Video", command=self.export_video,
|
||
bg=self.colors['accent_green'], fg='white', font=self.fonts['button'],
|
||
relief="flat", bd=0, cursor="hand2")
|
||
export_btn.pack(fill="x", padx=10, pady=5)
|
||
|
||
def select_video_file(self):
|
||
"""Select a video file to edit"""
|
||
# Check for videos in shorts folder first
|
||
if os.path.exists(self.shorts_folder):
|
||
video_files = [f for f in os.listdir(self.shorts_folder)
|
||
if f.lower().endswith(('.mp4', '.avi', '.mov', '.mkv'))]
|
||
|
||
if video_files:
|
||
# Show selection dialog for shorts
|
||
choice_window = tk.Toplevel(self.editor_window)
|
||
choice_window.title("Select Video to Edit")
|
||
choice_window.geometry("400x300")
|
||
choice_window.configure(bg=self.colors['bg_primary'])
|
||
choice_window.transient(self.editor_window)
|
||
choice_window.grab_set()
|
||
|
||
tk.Label(choice_window, text="Select a video to edit:",
|
||
font=self.fonts['heading'], bg=self.colors['bg_primary'],
|
||
fg=self.colors['text_primary']).pack(pady=10)
|
||
|
||
selected_file = None
|
||
|
||
def select_file(filename):
|
||
nonlocal selected_file
|
||
selected_file = os.path.join(self.shorts_folder, filename)
|
||
choice_window.destroy()
|
||
|
||
# List videos
|
||
for video_file in video_files:
|
||
btn = tk.Button(choice_window, text=f"📹 {video_file}",
|
||
command=lambda f=video_file: select_file(f),
|
||
bg=self.colors['accent_blue'], fg='white',
|
||
font=self.fonts['button'], relief="flat", bd=0,
|
||
cursor="hand2")
|
||
btn.pack(fill="x", padx=20, pady=2)
|
||
|
||
# Browse button
|
||
browse_btn = tk.Button(choice_window, text="📁 Browse Other Files",
|
||
command=lambda: self.browse_video_file(choice_window),
|
||
bg=self.colors['accent_orange'], fg='white',
|
||
font=self.fonts['button'], relief="flat", bd=0,
|
||
cursor="hand2")
|
||
browse_btn.pack(fill="x", padx=20, pady=10)
|
||
|
||
choice_window.wait_window()
|
||
|
||
if selected_file:
|
||
self.load_video(selected_file)
|
||
else:
|
||
self.browse_video_file()
|
||
else:
|
||
self.browse_video_file()
|
||
|
||
def browse_video_file(self, parent_window=None):
|
||
"""Browse for video file"""
|
||
filetypes = [
|
||
("Video files", "*.mp4 *.avi *.mov *.mkv *.wmv *.flv *.webm"),
|
||
("All files", "*.*")
|
||
]
|
||
|
||
file_path = filedialog.askopenfilename(
|
||
title="Select Video File",
|
||
filetypes=filetypes,
|
||
parent=parent_window or self.editor_window
|
||
)
|
||
|
||
if file_path:
|
||
if parent_window:
|
||
parent_window.destroy()
|
||
self.load_video(file_path)
|
||
|
||
def load_video(self, video_path):
|
||
"""Load a video for editing"""
|
||
try:
|
||
# Clean up previous video
|
||
if hasattr(self, 'current_clip') and self.current_clip:
|
||
if MOVIEPY_AVAILABLE:
|
||
self.current_clip.close()
|
||
else:
|
||
if hasattr(self.current_clip, 'release'):
|
||
self.current_clip.release()
|
||
|
||
# Load new video
|
||
self.current_video = video_path
|
||
|
||
if MOVIEPY_AVAILABLE:
|
||
# Use MoviePy for full functionality
|
||
self.current_clip = VideoFileClip(video_path)
|
||
self.video_duration = self.current_clip.duration
|
||
self.current_time = 0.0
|
||
|
||
# Display first frame
|
||
self.display_frame_at_time(0.0)
|
||
else:
|
||
# Use OpenCV for basic functionality
|
||
self.current_clip = cv2.VideoCapture(video_path)
|
||
if not self.current_clip.isOpened():
|
||
raise Exception("Could not open video file")
|
||
|
||
# Get video properties
|
||
fps = self.current_clip.get(cv2.CAP_PROP_FPS)
|
||
frame_count = self.current_clip.get(cv2.CAP_PROP_FRAME_COUNT)
|
||
self.video_duration = frame_count / fps if fps > 0 else 0
|
||
self.current_time = 0.0
|
||
|
||
# Display first frame
|
||
self.display_frame_at_time_opencv(0.0)
|
||
|
||
# Update UI
|
||
filename = os.path.basename(video_path)
|
||
self.current_file_label.config(text=filename)
|
||
|
||
# Add video to media bin automatically
|
||
if hasattr(self, 'media_files'):
|
||
self.add_media_to_bin(video_path)
|
||
|
||
# Update trim controls
|
||
self.trim_start_var.set(0.0)
|
||
self.trim_end_var.set(self.video_duration)
|
||
|
||
# Update timeline
|
||
self.update_timeline()
|
||
self.update_time_display()
|
||
|
||
backend = "MoviePy" if MOVIEPY_AVAILABLE else "OpenCV"
|
||
print(f"✅ Loaded video: {filename} ({self.video_duration:.1f}s) using {backend}")
|
||
|
||
if not MOVIEPY_AVAILABLE:
|
||
messagebox.showinfo("Limited Functionality",
|
||
"Video editor is running with limited functionality.\n" +
|
||
"Only basic playback and timeline controls are available.\n" +
|
||
"For full editing features, install MoviePy:\n" +
|
||
"pip install moviepy")
|
||
|
||
except Exception as e:
|
||
messagebox.showerror("Load Error", f"Could not load video: {e}")
|
||
|
||
def display_frame_at_time_opencv(self, time_sec):
|
||
"""Display a specific frame using OpenCV"""
|
||
if not self.current_clip or not hasattr(self.current_clip, 'get'):
|
||
return
|
||
|
||
try:
|
||
# Calculate frame number
|
||
fps = self.current_clip.get(cv2.CAP_PROP_FPS)
|
||
frame_number = int(time_sec * fps)
|
||
|
||
# Set video position
|
||
self.current_clip.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
|
||
|
||
# Read frame
|
||
ret, frame = self.current_clip.read()
|
||
if not ret:
|
||
return
|
||
|
||
# Convert BGR to RGB
|
||
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||
|
||
# Convert to PIL Image
|
||
pil_image = Image.fromarray(frame_rgb)
|
||
|
||
# Resize to fit canvas while maintaining aspect ratio
|
||
canvas_width = self.video_canvas.winfo_width()
|
||
canvas_height = self.video_canvas.winfo_height()
|
||
|
||
if canvas_width > 1 and canvas_height > 1:
|
||
# Calculate scaling to fit canvas
|
||
scale_w = canvas_width / pil_image.width
|
||
scale_h = canvas_height / pil_image.height
|
||
scale = min(scale_w, scale_h)
|
||
|
||
new_width = int(pil_image.width * scale)
|
||
new_height = int(pil_image.height * scale)
|
||
|
||
pil_image = pil_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
||
|
||
# Convert to PhotoImage
|
||
photo = ImageTk.PhotoImage(pil_image)
|
||
|
||
# Clear canvas and display image
|
||
self.video_canvas.delete("all")
|
||
x = canvas_width // 2
|
||
y = canvas_height // 2
|
||
self.video_canvas.create_image(x, y, image=photo)
|
||
|
||
# Keep reference to prevent garbage collection
|
||
self.video_canvas.image = photo
|
||
|
||
except Exception as e:
|
||
print(f"Frame display error: {e}")
|
||
|
||
def display_frame_at_time(self, time_sec):
|
||
"""Display a specific frame from the video"""
|
||
if not self.current_clip:
|
||
return
|
||
|
||
if MOVIEPY_AVAILABLE:
|
||
self.display_frame_at_time_moviepy(time_sec)
|
||
else:
|
||
self.display_frame_at_time_opencv(time_sec)
|
||
|
||
def display_frame_at_time_moviepy(self, time_sec):
|
||
"""Display a specific frame using MoviePy"""
|
||
try:
|
||
# Get frame at specified time
|
||
time_sec = max(0, min(time_sec, self.video_duration))
|
||
frame = self.current_clip.get_frame(time_sec)
|
||
|
||
# Convert to PIL Image
|
||
if frame.dtype != np.uint8:
|
||
frame = (frame * 255).astype(np.uint8)
|
||
|
||
pil_image = Image.fromarray(frame)
|
||
|
||
# Resize to fit canvas while maintaining aspect ratio
|
||
canvas_width = self.video_canvas.winfo_width()
|
||
canvas_height = self.video_canvas.winfo_height()
|
||
|
||
if canvas_width > 1 and canvas_height > 1:
|
||
# Calculate scaling to fit canvas
|
||
scale_w = canvas_width / pil_image.width
|
||
scale_h = canvas_height / pil_image.height
|
||
scale = min(scale_w, scale_h)
|
||
|
||
new_width = int(pil_image.width * scale)
|
||
new_height = int(pil_image.height * scale)
|
||
|
||
pil_image = pil_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
||
|
||
# Convert to PhotoImage
|
||
photo = ImageTk.PhotoImage(pil_image)
|
||
|
||
# Clear canvas and display image
|
||
self.video_canvas.delete("all")
|
||
x = canvas_width // 2
|
||
y = canvas_height // 2
|
||
self.video_canvas.create_image(x, y, image=photo)
|
||
|
||
# Keep reference to prevent garbage collection
|
||
self.video_canvas.image = photo
|
||
|
||
except Exception as e:
|
||
print(f"Frame display error: {e}")
|
||
|
||
def update_timeline(self):
|
||
"""Update the timeline display"""
|
||
if not self.timeline_canvas.winfo_exists():
|
||
return
|
||
|
||
self.timeline_canvas.delete("all")
|
||
|
||
canvas_width = self.timeline_canvas.winfo_width()
|
||
canvas_height = self.timeline_canvas.winfo_height()
|
||
|
||
if canvas_width <= 1:
|
||
return
|
||
|
||
# Calculate timeline scale based on video duration or default
|
||
video_duration = getattr(self, 'video_duration', 10.0) # Default 10 seconds if no video
|
||
self.timeline_scale = (canvas_width - 40) / max(video_duration, 1)
|
||
|
||
# Draw timeline background
|
||
self.timeline_canvas.create_rectangle(20, 20, canvas_width - 20, canvas_height - 20,
|
||
fill=self.colors['bg_primary'], outline=self.colors['border'])
|
||
|
||
# Draw track road lines
|
||
self.draw_timeline_track_roads(canvas_width, canvas_height)
|
||
|
||
# Draw dropped clips from media bin
|
||
self.draw_timeline_clips(canvas_width, canvas_height)
|
||
|
||
# Draw time markers
|
||
for i in range(0, int(video_duration) + 1):
|
||
x = 20 + i * self.timeline_scale
|
||
if x < canvas_width - 20:
|
||
self.timeline_canvas.create_line(x, 20, x, canvas_height - 20,
|
||
fill=self.colors['border'], width=1)
|
||
|
||
# Time labels
|
||
if i % 2 == 0: # Every 2 seconds
|
||
self.timeline_canvas.create_text(x, canvas_height - 35,
|
||
text=f"{i}s", fill=self.colors['text_secondary'],
|
||
font=self.fonts['caption'])
|
||
|
||
# Draw playhead if there's a current clip
|
||
if hasattr(self, 'current_clip') and self.current_clip:
|
||
playhead_x = 20 + self.current_time * self.timeline_scale
|
||
self.timeline_canvas.create_line(playhead_x, 20, playhead_x, canvas_height - 20,
|
||
fill=self.colors['accent_blue'], width=3)
|
||
|
||
# Draw playhead handle
|
||
self.timeline_canvas.create_oval(playhead_x - 5, 15, playhead_x + 5, 25,
|
||
fill=self.colors['accent_blue'], outline='white')
|
||
|
||
def draw_timeline_track_roads(self, canvas_width, canvas_height):
|
||
"""Draw track road lines on timeline canvas"""
|
||
left_margin = 20
|
||
right_margin = 20
|
||
|
||
# Draw horizontal road lines for each track
|
||
for track_id, track_info in self.tracks.items():
|
||
y_pos = track_info['y_offset']
|
||
track_height = track_info['height']
|
||
|
||
# Top line of track
|
||
self.timeline_canvas.create_line(
|
||
left_margin, y_pos, canvas_width - right_margin, y_pos,
|
||
fill=self.colors['border'], width=1, tags="track_roads"
|
||
)
|
||
|
||
# Bottom line of track
|
||
self.timeline_canvas.create_line(
|
||
left_margin, y_pos + track_height, canvas_width - right_margin, y_pos + track_height,
|
||
fill=self.colors['border'], width=1, tags="track_roads"
|
||
)
|
||
|
||
# Track type indicator line (left edge with track color)
|
||
self.timeline_canvas.create_line(
|
||
left_margin, y_pos, left_margin, y_pos + track_height,
|
||
fill=track_info['color'], width=4, tags="track_roads"
|
||
)
|
||
|
||
# Track name label on the left
|
||
self.timeline_canvas.create_text(
|
||
left_margin + 50, y_pos + track_height // 2,
|
||
text=track_info['name'], fill=track_info['color'],
|
||
font=('Arial', 8, 'bold'), anchor="center", tags="track_roads"
|
||
)
|
||
|
||
def draw_timeline_clips(self, canvas_width, canvas_height):
|
||
"""Draw clips dropped from media bin onto timeline"""
|
||
if not hasattr(self, 'timeline_clips'):
|
||
return
|
||
|
||
for i, clip in enumerate(self.timeline_clips):
|
||
track_info = self.tracks.get(clip['track'])
|
||
if not track_info:
|
||
continue
|
||
|
||
# Calculate clip position and size with better scaling
|
||
clip_x = 20 + clip['start_time'] * self.timeline_scale
|
||
clip_width = max(clip['duration'] * self.timeline_scale, 20) # Minimum width of 20px
|
||
|
||
# Ensure clip fits within canvas bounds
|
||
max_x = canvas_width - 20
|
||
if clip_x + clip_width > max_x:
|
||
clip_width = max_x - clip_x
|
||
|
||
clip_y = track_info['y_offset'] + 2
|
||
clip_height = track_info['height'] - 4
|
||
|
||
# Only draw if clip is visible
|
||
if clip_x < canvas_width - 20 and clip_x + clip_width > 20:
|
||
# Determine if this clip is selected
|
||
is_selected = (hasattr(self, 'selected_timeline_clip') and
|
||
self.selected_timeline_clip == i)
|
||
|
||
# Clip background with selection highlighting
|
||
clip_color = '#4CAF50' if clip['type'] == 'video' else '#FF9800'
|
||
outline_color = '#FFD700' if is_selected else 'white' # Gold outline for selected
|
||
outline_width = 3 if is_selected else 2
|
||
|
||
clip_rect = self.timeline_canvas.create_rectangle(clip_x, clip_y, clip_x + clip_width, clip_y + clip_height,
|
||
fill=clip_color, outline=outline_color, width=outline_width,
|
||
tags=f"clip_{i}")
|
||
|
||
# Add resize handles (small rectangles on the edges)
|
||
handle_size = 8
|
||
handle_color = '#FFD700' if is_selected else 'white'
|
||
|
||
# Left resize handle
|
||
left_handle = self.timeline_canvas.create_rectangle(
|
||
clip_x, clip_y, clip_x + handle_size, clip_y + clip_height,
|
||
fill=handle_color, outline='gray', width=1, tags=f"resize_left_{i}")
|
||
|
||
# Right resize handle
|
||
right_handle = self.timeline_canvas.create_rectangle(
|
||
clip_x + clip_width - handle_size, clip_y, clip_x + clip_width, clip_y + clip_height,
|
||
fill=handle_color, outline='gray', width=1, tags=f"resize_right_{i}")
|
||
|
||
# Make clip draggable and resizable
|
||
self.setup_clip_interaction(clip_rect, left_handle, right_handle, i)
|
||
|
||
# Clip filename (truncated)
|
||
filename = clip['filename'][:12] + "..." if len(clip['filename']) > 12 else clip['filename']
|
||
if clip_width > 40: # Only show text if clip is wide enough
|
||
text_color = 'black' if is_selected else 'white'
|
||
self.timeline_canvas.create_text(clip_x + clip_width//2, clip_y + clip_height // 2,
|
||
text=filename, fill=text_color,
|
||
font=('Arial', 8, 'bold'), anchor='center',
|
||
tags=f"clip_text_{i}")
|
||
|
||
def setup_clip_interaction(self, clip_rect, left_handle, right_handle, clip_index):
|
||
"""Setup interaction for clip resizing and moving"""
|
||
|
||
def start_resize_left(event):
|
||
self.resizing_clip = {'index': clip_index, 'side': 'left', 'start_x': event.x}
|
||
self.timeline_canvas.configure(cursor="sb_h_double_arrow")
|
||
|
||
def start_resize_right(event):
|
||
self.resizing_clip = {'index': clip_index, 'side': 'right', 'start_x': event.x}
|
||
self.timeline_canvas.configure(cursor="sb_h_double_arrow")
|
||
|
||
def start_move_clip(event):
|
||
self.moving_clip = {'index': clip_index, 'start_x': event.x}
|
||
self.timeline_canvas.configure(cursor="hand2")
|
||
|
||
def on_resize_drag(event):
|
||
if hasattr(self, 'resizing_clip') and self.resizing_clip:
|
||
self.handle_clip_resize(event)
|
||
elif hasattr(self, 'moving_clip') and self.moving_clip:
|
||
self.handle_clip_move(event)
|
||
|
||
def end_interaction(event):
|
||
if hasattr(self, 'resizing_clip'):
|
||
self.resizing_clip = None
|
||
if hasattr(self, 'moving_clip'):
|
||
self.moving_clip = None
|
||
self.timeline_canvas.configure(cursor="arrow")
|
||
|
||
# Keep the clip selected after interaction
|
||
if hasattr(self, 'selected_timeline_clip') and self.selected_timeline_clip == clip_index:
|
||
print(f"🎯 Clip {clip_index} modified and still selected")
|
||
|
||
self.update_timeline()
|
||
|
||
# Bind resize handles
|
||
self.timeline_canvas.tag_bind(left_handle, "<Button-1>", start_resize_left)
|
||
self.timeline_canvas.tag_bind(right_handle, "<Button-1>", start_resize_right)
|
||
|
||
# Bind clip body for moving
|
||
self.timeline_canvas.tag_bind(clip_rect, "<Button-1>", start_move_clip)
|
||
|
||
# Bind drag motion and release
|
||
self.timeline_canvas.tag_bind(left_handle, "<B1-Motion>", on_resize_drag)
|
||
self.timeline_canvas.tag_bind(right_handle, "<B1-Motion>", on_resize_drag)
|
||
self.timeline_canvas.tag_bind(clip_rect, "<B1-Motion>", on_resize_drag)
|
||
|
||
self.timeline_canvas.tag_bind(left_handle, "<ButtonRelease-1>", end_interaction)
|
||
self.timeline_canvas.tag_bind(right_handle, "<ButtonRelease-1>", end_interaction)
|
||
self.timeline_canvas.tag_bind(clip_rect, "<ButtonRelease-1>", end_interaction)
|
||
|
||
def handle_clip_resize(self, event):
|
||
"""Handle clip resizing"""
|
||
if not self.resizing_clip or not hasattr(self, 'timeline_clips'):
|
||
return
|
||
|
||
clip_index = self.resizing_clip['index']
|
||
if clip_index >= len(self.timeline_clips):
|
||
return
|
||
|
||
clip = self.timeline_clips[clip_index]
|
||
scale = max(self.timeline_scale, 1.0)
|
||
|
||
# Calculate time delta
|
||
time_delta = (event.x - self.resizing_clip['start_x']) / scale
|
||
|
||
if self.resizing_clip['side'] == 'left':
|
||
# Resize from left - adjust start time and duration
|
||
new_start = clip['start_time'] + time_delta
|
||
new_duration = clip['duration'] - time_delta
|
||
|
||
if new_start >= 0 and new_duration >= 0.5: # Minimum 0.5 second duration
|
||
clip['start_time'] = new_start
|
||
clip['duration'] = new_duration
|
||
|
||
elif self.resizing_clip['side'] == 'right':
|
||
# Resize from right - adjust duration only
|
||
new_duration = clip['duration'] + time_delta
|
||
|
||
if new_duration >= 0.5: # Minimum 0.5 second duration
|
||
clip['duration'] = new_duration
|
||
|
||
self.resizing_clip['start_x'] = event.x
|
||
|
||
def handle_clip_move(self, event):
|
||
"""Handle clip moving"""
|
||
if not self.moving_clip or not hasattr(self, 'timeline_clips'):
|
||
return
|
||
|
||
clip_index = self.moving_clip['index']
|
||
if clip_index >= len(self.timeline_clips):
|
||
return
|
||
|
||
clip = self.timeline_clips[clip_index]
|
||
scale = max(self.timeline_scale, 1.0)
|
||
|
||
# Calculate time delta
|
||
time_delta = (event.x - self.moving_clip['start_x']) / scale
|
||
new_start = clip['start_time'] + time_delta
|
||
|
||
if new_start >= 0: # Don't allow negative start times
|
||
clip['start_time'] = new_start
|
||
|
||
self.moving_clip['start_x'] = event.x
|
||
|
||
def check_timeline_clips_at_playhead(self):
|
||
"""Check if playhead is hitting any clips and trigger playback"""
|
||
if not hasattr(self, 'timeline_clips') or not self.timeline_clips:
|
||
return
|
||
|
||
current_time = self.current_time
|
||
|
||
for clip in self.timeline_clips:
|
||
clip_start = clip['start_time']
|
||
clip_end = clip_start + clip['duration']
|
||
|
||
# Check if playhead is within this clip
|
||
if clip_start <= current_time <= clip_end:
|
||
# Calculate relative time within the clip
|
||
relative_time = current_time - clip_start
|
||
|
||
if clip['type'] == 'video':
|
||
self.play_timeline_video_clip(clip, relative_time)
|
||
elif clip['type'] == 'audio':
|
||
self.play_timeline_audio_clip(clip, relative_time)
|
||
|
||
def play_timeline_video_clip(self, clip, relative_time):
|
||
"""Play video clip at the specified relative time"""
|
||
try:
|
||
# Load the video clip if it's different from current
|
||
if not hasattr(self, 'current_timeline_clip') or self.current_timeline_clip != clip['file_path']:
|
||
# For now, just display a frame from the clip
|
||
# This is a simplified implementation - you'd want to load the actual video
|
||
print(f"🎬 Playing video clip: {clip['filename']} at {relative_time:.1f}s")
|
||
self.current_timeline_clip = clip['file_path']
|
||
|
||
# Try to load and display the video clip
|
||
try:
|
||
import cv2
|
||
cap = cv2.VideoCapture(clip['file_path'])
|
||
if cap.isOpened():
|
||
# Seek to the relative time
|
||
fps = cap.get(cv2.CAP_PROP_FPS) or 30
|
||
frame_number = int(relative_time * fps)
|
||
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
|
||
|
||
ret, frame = cap.read()
|
||
if ret:
|
||
# Display this frame in the video player
|
||
self.display_opencv_frame(frame)
|
||
cap.release()
|
||
except Exception as e:
|
||
print(f"⚠️ Error playing video clip: {e}")
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Timeline video playback error: {e}")
|
||
|
||
def play_timeline_audio_clip(self, clip, relative_time):
|
||
"""Play audio clip at the specified relative time"""
|
||
try:
|
||
# Audio playback would require a library like pygame or similar
|
||
print(f"🎵 Playing audio clip: {clip['filename']} at {relative_time:.1f}s")
|
||
# This is a placeholder - actual audio playback would need implementation
|
||
except Exception as e:
|
||
print(f"⚠️ Timeline audio playback error: {e}")
|
||
|
||
def display_opencv_frame(self, frame):
|
||
"""Display an OpenCV frame in the video player"""
|
||
try:
|
||
import cv2
|
||
from PIL import Image, ImageTk
|
||
|
||
# Convert BGR to RGB
|
||
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||
|
||
# Convert to PIL Image
|
||
image = Image.fromarray(frame_rgb)
|
||
|
||
# Resize to fit the video player
|
||
if hasattr(self, 'video_canvas'):
|
||
canvas_width = self.video_canvas.winfo_width() or 400
|
||
canvas_height = self.video_canvas.winfo_height() or 300
|
||
image = image.resize((canvas_width, canvas_height), Image.Resampling.LANCZOS)
|
||
|
||
# Convert to PhotoImage
|
||
photo = ImageTk.PhotoImage(image)
|
||
|
||
# Display in video canvas
|
||
if hasattr(self, 'video_canvas'):
|
||
self.video_canvas.delete("all")
|
||
self.video_canvas.create_image(0, 0, anchor="nw", image=photo)
|
||
self.video_canvas.image = photo # Keep a reference
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Frame display error: {e}")
|
||
|
||
def timeline_click(self, event):
|
||
"""Handle timeline click - CTRL+click for clip selection, regular click for playhead"""
|
||
canvas_width = self.timeline_canvas.winfo_width()
|
||
click_x = event.x
|
||
click_y = event.y
|
||
|
||
# Check if CTRL key is pressed
|
||
ctrl_pressed = (event.state & 0x4) != 0 # CTRL key modifier
|
||
|
||
if ctrl_pressed:
|
||
# CTRL+Click: Select clip without moving playhead
|
||
self.select_clip_at_position(click_x, click_y)
|
||
else:
|
||
# Regular click: Move playhead (existing behavior)
|
||
if not self.current_clip:
|
||
return
|
||
|
||
# Convert click position to time
|
||
relative_x = click_x - 20
|
||
if relative_x >= 0 and relative_x <= canvas_width - 40:
|
||
clicked_time = relative_x / self.timeline_scale
|
||
clicked_time = max(0, min(clicked_time, self.video_duration))
|
||
|
||
# Update current time and display
|
||
self.current_time = clicked_time
|
||
self.display_frame_at_time(self.current_time)
|
||
self.update_timeline()
|
||
self.update_time_display()
|
||
|
||
def select_clip_at_position(self, click_x, click_y):
|
||
"""Select clip at the clicked position without moving playhead"""
|
||
if not hasattr(self, 'timeline_clips') or not self.timeline_clips:
|
||
return
|
||
|
||
# Convert click position to time
|
||
relative_x = click_x - 20
|
||
if relative_x < 0:
|
||
return
|
||
|
||
clicked_time = relative_x / self.timeline_scale
|
||
|
||
# Find which clip was clicked
|
||
for i, clip in enumerate(self.timeline_clips):
|
||
clip_start = clip['start_time']
|
||
clip_end = clip_start + clip['duration']
|
||
|
||
# Check if click is within this clip's time range
|
||
if clip_start <= clicked_time <= clip_end:
|
||
# Check if click is within this clip's track (Y position)
|
||
track_info = self.tracks.get(clip['track'])
|
||
if track_info:
|
||
track_top = track_info['y_offset']
|
||
track_bottom = track_top + track_info['height']
|
||
|
||
if track_top <= click_y <= track_bottom:
|
||
# Select this clip
|
||
self.selected_timeline_clip = i
|
||
print(f"🎯 Selected clip: {clip['filename']} on {clip['track']}")
|
||
|
||
# Update timeline to show selection
|
||
self.update_timeline()
|
||
return
|
||
|
||
# No clip found at click position
|
||
self.selected_timeline_clip = None
|
||
print("🎯 Deselected all clips")
|
||
self.update_timeline()
|
||
|
||
def delete_selected_clip(self, event):
|
||
"""Delete the currently selected timeline clip"""
|
||
if (hasattr(self, 'selected_timeline_clip') and
|
||
self.selected_timeline_clip is not None and
|
||
hasattr(self, 'timeline_clips') and
|
||
self.timeline_clips):
|
||
|
||
if 0 <= self.selected_timeline_clip < len(self.timeline_clips):
|
||
deleted_clip = self.timeline_clips[self.selected_timeline_clip]
|
||
del self.timeline_clips[self.selected_timeline_clip]
|
||
|
||
print(f"🗑️ Deleted clip: {deleted_clip['filename']}")
|
||
|
||
# Clear selection
|
||
self.selected_timeline_clip = None
|
||
|
||
# Update timeline display
|
||
self.update_timeline()
|
||
|
||
def timeline_drag(self, event):
|
||
"""Handle timeline dragging"""
|
||
self.timeline_click(event) # Same behavior as click for now
|
||
|
||
def on_timeline_drag_end(self, event):
|
||
"""End timeline drag operation"""
|
||
if hasattr(self, 'dragging_clip') and self.dragging_clip:
|
||
print(f"🎬 Moved clip '{self.dragging_clip['name']}' to {self.dragging_clip['start_time']:.2f}s")
|
||
|
||
# Clear drag state
|
||
if hasattr(self, 'dragging_clip'):
|
||
self.dragging_clip = None
|
||
if hasattr(self, 'drag_start_x'):
|
||
self.drag_start_x = None
|
||
if hasattr(self, 'drag_start_time'):
|
||
self.drag_start_time = None
|
||
if hasattr(self, 'drag_offset'):
|
||
self.drag_offset = 0
|
||
|
||
def on_timeline_right_click(self, event):
|
||
"""Handle right-click context menu"""
|
||
try:
|
||
canvas_x = self.timeline_canvas.canvasx(event.x)
|
||
canvas_y = self.timeline_canvas.canvasy(event.y)
|
||
clicked_clip = self.get_clip_at_position(canvas_x, canvas_y) if hasattr(self, 'get_clip_at_position') else None
|
||
|
||
# Create context menu
|
||
context_menu = tk.Menu(self.root, tearoff=0, bg=self.colors['bg_secondary'],
|
||
fg=self.colors['text_primary'])
|
||
|
||
if clicked_clip:
|
||
# Clip context menu
|
||
self.selected_clip = clicked_clip
|
||
context_menu.add_command(label=f"Cut '{clicked_clip['name']}'",
|
||
command=lambda: self.cut_clip_at_playhead())
|
||
context_menu.add_command(label=f"Delete '{clicked_clip['name']}'",
|
||
command=lambda: self.delete_clip(clicked_clip))
|
||
context_menu.add_separator()
|
||
context_menu.add_command(label="Duplicate Clip",
|
||
command=lambda: self.duplicate_clip(clicked_clip))
|
||
context_menu.add_command(label="Properties",
|
||
command=lambda: self.show_clip_properties(clicked_clip))
|
||
else:
|
||
# Timeline context menu
|
||
click_time = canvas_x / self.timeline_scale if hasattr(self, 'timeline_scale') else 0
|
||
context_menu.add_command(label="Add Marker",
|
||
command=lambda: self.add_marker_at_time(click_time))
|
||
context_menu.add_command(label="Zoom to Fit", command=self.zoom_to_fit)
|
||
|
||
try:
|
||
context_menu.tk_popup(event.x_root, event.y_root)
|
||
finally:
|
||
context_menu.grab_release()
|
||
except Exception as e:
|
||
print(f"Context menu error: {e}")
|
||
|
||
def on_timeline_double_click(self, event):
|
||
"""Handle timeline double-click"""
|
||
try:
|
||
canvas_x = self.timeline_canvas.canvasx(event.x)
|
||
canvas_y = self.timeline_canvas.canvasy(event.y)
|
||
clicked_clip = self.get_clip_at_position(canvas_x, canvas_y) if hasattr(self, 'get_clip_at_position') else None
|
||
|
||
if clicked_clip:
|
||
self.show_clip_properties(clicked_clip)
|
||
else:
|
||
# Add marker on double-click
|
||
click_time = canvas_x / self.timeline_scale if hasattr(self, 'timeline_scale') else 0
|
||
self.add_marker_at_time(click_time)
|
||
except Exception as e:
|
||
print(f"Double-click error: {e}")
|
||
|
||
def update_time_display(self):
|
||
"""Update the time display"""
|
||
current_min = int(self.current_time // 60)
|
||
current_sec = int(self.current_time % 60)
|
||
total_min = int(self.video_duration // 60)
|
||
total_sec = int(self.video_duration % 60)
|
||
|
||
time_text = f"{current_min:02d}:{current_sec:02d} / {total_min:02d}:{total_sec:02d}"
|
||
self.time_display.config(text=time_text)
|
||
|
||
def timeline_play(self):
|
||
"""Start timeline playback"""
|
||
if not self.current_clip:
|
||
return
|
||
|
||
self.timeline_is_playing = True
|
||
self.play_video() # Start actual video playback
|
||
self._start_timeline_playback()
|
||
|
||
def timeline_pause(self):
|
||
"""Pause timeline playback"""
|
||
self.timeline_is_playing = False
|
||
self.pause_video() # Pause actual video
|
||
|
||
def timeline_stop(self):
|
||
"""Stop timeline playback"""
|
||
self.timeline_is_playing = False
|
||
self.stop_video() # Stop actual video
|
||
self.current_time = 0.0
|
||
self.display_frame_at_time(0.0)
|
||
self.update_timeline()
|
||
self.update_time_display()
|
||
|
||
def _start_timeline_playback(self):
|
||
"""Start the timeline playback loop"""
|
||
def playback_loop():
|
||
while self.timeline_is_playing and self.current_time < self.video_duration:
|
||
if not self.is_playing: # Sync with video player state
|
||
break
|
||
|
||
# Update timeline display
|
||
self.editor_window.after(0, self.update_timeline)
|
||
self.editor_window.after(0, self.update_time_display)
|
||
|
||
time.sleep(1/30) # 30 FPS update rate
|
||
|
||
# Playback finished
|
||
self.timeline_is_playing = False
|
||
|
||
if not hasattr(self, 'timeline_thread') or not self.timeline_thread.is_alive():
|
||
self.timeline_thread = threading.Thread(target=playback_loop, daemon=True)
|
||
self.timeline_thread.start()
|
||
|
||
def play_video(self):
|
||
"""Start video playback"""
|
||
if not self.current_clip or self.is_playing:
|
||
return
|
||
|
||
self.is_playing = True
|
||
|
||
def play_thread():
|
||
start_time = time.time()
|
||
start_video_time = self.current_time
|
||
|
||
while self.is_playing and self.current_time < self.video_duration:
|
||
try:
|
||
# Calculate current video time
|
||
elapsed = time.time() - start_time
|
||
self.current_time = start_video_time + elapsed
|
||
|
||
if self.current_time >= self.video_duration:
|
||
self.current_time = self.video_duration
|
||
self.is_playing = False
|
||
break
|
||
|
||
# Update display
|
||
self.display_frame_at_time(self.current_time)
|
||
self.update_time_display()
|
||
|
||
# Check for timeline clips at current playhead position
|
||
self.check_timeline_clips_at_playhead()
|
||
|
||
# Frame rate control (approximately 30 FPS)
|
||
time.sleep(1/30)
|
||
|
||
except Exception as e:
|
||
print(f"⚠️ Playback error: {e}")
|
||
break
|
||
|
||
# Playback finished
|
||
self.is_playing = False
|
||
|
||
self.play_thread = threading.Thread(target=play_thread, daemon=True)
|
||
self.play_thread.start()
|
||
|
||
def pause_video(self):
|
||
"""Pause video playback"""
|
||
self.is_playing = False
|
||
|
||
def stop_video(self):
|
||
"""Stop video and return to beginning"""
|
||
self.is_playing = False
|
||
self.current_time = 0.0
|
||
self.display_frame_at_time(0.0)
|
||
|
||
def apply_trim(self):
|
||
"""Apply trim to the video"""
|
||
if not self.current_clip:
|
||
messagebox.showwarning("No Video", "Please load a video first.")
|
||
return
|
||
|
||
start_time = self.trim_start_var.get()
|
||
end_time = self.trim_end_var.get()
|
||
|
||
if start_time >= end_time:
|
||
messagebox.showerror("Invalid Range", "Start time must be less than end time.")
|
||
return
|
||
|
||
if end_time > self.video_duration:
|
||
messagebox.showerror("Invalid Range", f"End time cannot exceed video duration ({self.video_duration:.1f}s).")
|
||
return
|
||
|
||
try:
|
||
if MOVIEPY_AVAILABLE and hasattr(self.current_clip, 'subclipped'):
|
||
# Apply trim using MoviePy
|
||
self.current_clip = self.current_clip.subclipped(start_time, end_time)
|
||
self.video_duration = self.current_clip.duration
|
||
self.current_time = 0.0
|
||
|
||
# Update UI
|
||
self.trim_end_var.set(self.video_duration)
|
||
self.display_frame_at_time(0.0)
|
||
self.update_timeline()
|
||
self.update_time_display()
|
||
messagebox.showinfo("Success", f"Video trimmed to {start_time:.1f}s - {end_time:.1f}s")
|
||
else:
|
||
# OpenCV mode - export trimmed video
|
||
if not self.current_video:
|
||
messagebox.showwarning("No Video", "No video file loaded.")
|
||
return
|
||
|
||
timestamp = datetime.now().strftime("%H%M%S")
|
||
base_name = os.path.splitext(os.path.basename(self.current_video))[0]
|
||
output_path = os.path.join(os.path.dirname(self.current_video),
|
||
f"{base_name}_trimmed_{timestamp}.mp4")
|
||
|
||
self.apply_trim_opencv(self.current_video, output_path, start_time, end_time)
|
||
self.load_video(output_path)
|
||
messagebox.showinfo("Success", f"Video trimmed ({start_time:.1f}s to {end_time:.1f}s) and saved as:\n{os.path.basename(output_path)}")
|
||
|
||
except Exception as e:
|
||
messagebox.showerror("Trim Error", f"Could not trim video: {e}")
|
||
|
||
def apply_trim_opencv(self, input_path, output_path, start_time, end_time):
|
||
"""Apply trim using OpenCV"""
|
||
cap = cv2.VideoCapture(input_path)
|
||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||
|
||
start_frame = int(start_time * fps)
|
||
end_frame = int(end_time * fps)
|
||
|
||
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
||
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
||
|
||
# Set to start frame
|
||
cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
|
||
|
||
frame_count = start_frame
|
||
while frame_count < end_frame:
|
||
ret, frame = cap.read()
|
||
if not ret:
|
||
break
|
||
|
||
out.write(frame)
|
||
frame_count += 1
|
||
|
||
cap.release()
|
||
out.release()
|
||
|
||
def apply_speed(self):
|
||
"""Apply speed change to the video"""
|
||
if not self.current_clip:
|
||
messagebox.showwarning("No Video", "Please load a video first.")
|
||
return
|
||
|
||
speed_factor = self.speed_var.get()
|
||
|
||
try:
|
||
if MOVIEPY_AVAILABLE and hasattr(self.current_clip, 'with_fps'):
|
||
# Apply speed change using MoviePy
|
||
if speed_factor > 1:
|
||
# Speed up
|
||
self.current_clip = self.current_clip.with_fps(self.current_clip.fps * speed_factor)
|
||
self.current_clip = self.current_clip.subclipped(0, self.current_clip.duration / speed_factor)
|
||
else:
|
||
# Slow down
|
||
self.current_clip = self.current_clip.with_fps(self.current_clip.fps * speed_factor)
|
||
|
||
self.video_duration = self.current_clip.duration
|
||
self.current_time = 0.0
|
||
|
||
# Update UI
|
||
self.trim_end_var.set(self.video_duration)
|
||
self.display_frame_at_time(0.0)
|
||
self.update_timeline()
|
||
self.update_time_display()
|
||
messagebox.showinfo("Success", f"Speed changed to {speed_factor:.1f}x")
|
||
else:
|
||
# OpenCV mode - export video with speed change
|
||
if not self.current_video:
|
||
messagebox.showwarning("No Video", "No video file loaded.")
|
||
return
|
||
|
||
timestamp = datetime.now().strftime("%H%M%S")
|
||
base_name = os.path.splitext(os.path.basename(self.current_video))[0]
|
||
output_path = os.path.join(os.path.dirname(self.current_video),
|
||
f"{base_name}_speed_{speed_factor}x_{timestamp}.mp4")
|
||
|
||
self.apply_speed_opencv(self.current_video, output_path, speed_factor)
|
||
self.load_video(output_path)
|
||
messagebox.showinfo("Success", f"Speed changed to {speed_factor:.1f}x and saved as:\n{os.path.basename(output_path)}")
|
||
|
||
except Exception as e:
|
||
messagebox.showerror("Speed Error", f"Could not change speed: {e}")
|
||
|
||
def apply_speed_opencv(self, input_path, output_path, speed_factor):
|
||
"""Apply speed change using OpenCV"""
|
||
cap = cv2.VideoCapture(input_path)
|
||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||
|
||
# Calculate new FPS for speed change
|
||
new_fps = fps * speed_factor
|
||
|
||
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
||
out = cv2.VideoWriter(output_path, fourcc, new_fps, (width, height))
|
||
|
||
while True:
|
||
ret, frame = cap.read()
|
||
if not ret:
|
||
break
|
||
|
||
out.write(frame)
|
||
|
||
cap.release()
|
||
out.release()
|
||
|
||
def apply_volume(self):
|
||
"""Apply volume adjustment"""
|
||
if not self.current_clip:
|
||
messagebox.showwarning("No Video", "Please load a video first.")
|
||
return
|
||
|
||
# Check for volume factor from either basic or audio effects tab
|
||
volume_factor = 1.0
|
||
if hasattr(self, 'audio_volume_var'):
|
||
volume_factor = self.audio_volume_var.get()
|
||
elif hasattr(self, 'volume_var'):
|
||
volume_factor = self.volume_var.get()
|
||
|
||
try:
|
||
if MOVIEPY_AVAILABLE and hasattr(self.current_clip, 'audio'):
|
||
# Use MoviePy for volume adjustment
|
||
if not self.current_clip.audio:
|
||
messagebox.showwarning("No Audio", "This video has no audio track.")
|
||
return
|
||
|
||
self.current_clip = self.current_clip.with_effects([MultiplyVolume(volume_factor)])
|
||
messagebox.showinfo("Success", f"Volume adjusted to {volume_factor:.1f}x")
|
||
else:
|
||
# OpenCV mode - show helpful message about audio processing
|
||
if not self.current_video:
|
||
messagebox.showwarning("No Video", "No video file loaded.")
|
||
return
|
||
|
||
messagebox.showinfo("Audio Processing",
|
||
f"Volume adjustment to {volume_factor:.1f}x noted.\n\n"
|
||
"Audio processing requires external tools.\n"
|
||
"For audio editing, consider using:\n"
|
||
"• Audacity (free audio editor)\n"
|
||
"• FFmpeg command line\n"
|
||
"• Install MoviePy: pip install moviepy")
|
||
|
||
except Exception as e:
|
||
messagebox.showerror("Volume Error", f"Could not adjust volume: {e}")
|
||
|
||
def apply_fade(self):
|
||
"""Apply fade in/out effects"""
|
||
if not self.current_clip:
|
||
messagebox.showwarning("No Video", "Please load a video first.")
|
||
return
|
||
|
||
try:
|
||
if MOVIEPY_AVAILABLE and hasattr(self.current_clip, 'fadein'):
|
||
# Use MoviePy for advanced fade effects
|
||
fade_duration = min(1.0, self.video_duration / 4)
|
||
|
||
if hasattr(self.current_clip, 'fadein') and hasattr(self.current_clip, 'fadeout'):
|
||
self.current_clip = self.current_clip.fadein(fade_duration).fadeout(fade_duration)
|
||
messagebox.showinfo("Success", f"Fade effects applied ({fade_duration:.1f}s)")
|
||
else:
|
||
messagebox.showinfo("Not Available", "Fade effects not available in this MoviePy version")
|
||
else:
|
||
# OpenCV mode - export video with fade effect
|
||
if not self.current_video:
|
||
messagebox.showwarning("No Video", "No video file loaded.")
|
||
return
|
||
|
||
timestamp = datetime.now().strftime("%H%M%S")
|
||
base_name = os.path.splitext(os.path.basename(self.current_video))[0]
|
||
output_path = os.path.join(os.path.dirname(self.current_video),
|
||
f"{base_name}_faded_{timestamp}.mp4")
|
||
|
||
self.apply_fade_opencv(self.current_video, output_path)
|
||
self.load_video(output_path)
|
||
messagebox.showinfo("Success", f"Fade effects applied and saved as:\n{os.path.basename(output_path)}")
|
||
|
||
except Exception as e:
|
||
messagebox.showerror("Fade Error", f"Could not apply fade effects: {e}")
|
||
|
||
def apply_fade_opencv(self, input_path, output_path):
|
||
"""Apply fade effects using OpenCV"""
|
||
cap = cv2.VideoCapture(input_path)
|
||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||
|
||
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
||
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
||
|
||
# Calculate fade frames (1 second fade in/out)
|
||
fade_frames = min(int(fps), total_frames // 4)
|
||
|
||
frame_count = 0
|
||
while True:
|
||
ret, frame = cap.read()
|
||
if not ret:
|
||
break
|
||
|
||
# Apply fade in
|
||
if frame_count < fade_frames:
|
||
alpha = frame_count / fade_frames
|
||
frame = cv2.convertScaleAbs(frame, alpha=alpha, beta=0)
|
||
|
||
# Apply fade out
|
||
elif frame_count >= total_frames - fade_frames:
|
||
alpha = (total_frames - frame_count) / fade_frames
|
||
frame = cv2.convertScaleAbs(frame, alpha=alpha, beta=0)
|
||
|
||
out.write(frame)
|
||
frame_count += 1
|
||
|
||
cap.release()
|
||
out.release()
|
||
|
||
def apply_text(self):
|
||
"""Apply text overlay"""
|
||
if not self.current_clip:
|
||
messagebox.showwarning("No Video", "Please load a video first.")
|
||
return
|
||
|
||
# Get text from the text variable
|
||
text_content = self.text_var.get().strip() if hasattr(self, 'text_var') and self.text_var.get().strip() else "Sample Text"
|
||
|
||
if not text_content:
|
||
messagebox.showwarning("No Text", "Please enter text to overlay.")
|
||
return
|
||
|
||
try:
|
||
if MOVIEPY_AVAILABLE and hasattr(self.current_clip, 'duration'):
|
||
# Use MoviePy for text overlay
|
||
text_clip = TextClip(text_content, fontsize=50, color='white', font='Arial-Bold')
|
||
text_clip = text_clip.with_duration(self.current_clip.duration)
|
||
text_clip = text_clip.with_position(('center', 'bottom'))
|
||
|
||
# Composite with video
|
||
self.current_clip = CompositeVideoClip([self.current_clip, text_clip])
|
||
messagebox.showinfo("Success", f"Text overlay added: '{text_content}'")
|
||
else:
|
||
# OpenCV mode - export video with text overlay
|
||
if not self.current_video:
|
||
messagebox.showwarning("No Video", "No video file loaded.")
|
||
return
|
||
|
||
timestamp = datetime.now().strftime("%H%M%S")
|
||
base_name = os.path.splitext(os.path.basename(self.current_video))[0]
|
||
output_path = os.path.join(os.path.dirname(self.current_video),
|
||
f"{base_name}_with_text_{timestamp}.mp4")
|
||
|
||
self.apply_text_opencv(self.current_video, output_path, text_content)
|
||
self.load_video(output_path)
|
||
messagebox.showinfo("Success", f"Text '{text_content}' added and saved as:\n{os.path.basename(output_path)}")
|
||
|
||
except Exception as e:
|
||
messagebox.showerror("Text Error", f"Could not add text overlay: {e}")
|
||
|
||
def apply_text_opencv(self, input_path, output_path, text):
|
||
"""Apply text overlay using OpenCV"""
|
||
cap = cv2.VideoCapture(input_path)
|
||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||
|
||
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
||
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
||
|
||
# Text settings
|
||
font = cv2.FONT_HERSHEY_SIMPLEX
|
||
font_scale = min(width, height) / 800 # Scale font based on video size
|
||
color = (255, 255, 255) # White color
|
||
thickness = max(1, int(font_scale * 2))
|
||
|
||
# Get text size for positioning
|
||
(text_width, text_height), baseline = cv2.getTextSize(text, font, font_scale, thickness)
|
||
x = (width - text_width) // 2
|
||
y = height - 50 # Bottom position
|
||
|
||
while True:
|
||
ret, frame = cap.read()
|
||
if not ret:
|
||
break
|
||
|
||
# Add black outline for better visibility
|
||
cv2.putText(frame, text, (x-2, y-2), font, font_scale, (0, 0, 0), thickness+2, cv2.LINE_AA)
|
||
cv2.putText(frame, text, (x+2, y+2), font, font_scale, (0, 0, 0), thickness+2, cv2.LINE_AA)
|
||
|
||
# Add white text
|
||
cv2.putText(frame, text, (x, y), font, font_scale, color, thickness, cv2.LINE_AA)
|
||
|
||
out.write(frame)
|
||
|
||
cap.release()
|
||
out.release()
|
||
|
||
def apply_resize(self, target_width, target_height):
|
||
"""Apply resize to video"""
|
||
if not self.current_clip:
|
||
messagebox.showwarning("No Video", "Please load a video first.")
|
||
return
|
||
|
||
try:
|
||
if MOVIEPY_AVAILABLE and hasattr(self.current_clip, 'resize'):
|
||
# Use MoviePy for resizing
|
||
self.current_clip = self.current_clip.resize((target_width, target_height))
|
||
messagebox.showinfo("Success", f"Video resized to {target_width}x{target_height}")
|
||
else:
|
||
# OpenCV mode - export resized video
|
||
if not self.current_video:
|
||
messagebox.showwarning("No Video", "No video file loaded.")
|
||
return
|
||
|
||
timestamp = datetime.now().strftime("%H%M%S")
|
||
base_name = os.path.splitext(os.path.basename(self.current_video))[0]
|
||
output_path = os.path.join(os.path.dirname(self.current_video),
|
||
f"{base_name}_resized_{target_width}x{target_height}_{timestamp}.mp4")
|
||
|
||
self.apply_resize_opencv(self.current_video, output_path, target_width, target_height)
|
||
self.load_video(output_path)
|
||
messagebox.showinfo("Success", f"Video resized to {target_width}x{target_height} and saved as:\n{os.path.basename(output_path)}")
|
||
|
||
except Exception as e:
|
||
messagebox.showerror("Resize Error", f"Could not resize video: {e}")
|
||
|
||
def apply_resize_opencv(self, input_path, output_path, target_width, target_height):
|
||
"""Apply resize using OpenCV"""
|
||
cap = cv2.VideoCapture(input_path)
|
||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||
|
||
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
||
out = cv2.VideoWriter(output_path, fourcc, fps, (target_width, target_height))
|
||
|
||
while True:
|
||
ret, frame = cap.read()
|
||
if not ret:
|
||
break
|
||
|
||
# Resize frame
|
||
resized_frame = cv2.resize(frame, (target_width, target_height))
|
||
out.write(resized_frame)
|
||
|
||
cap.release()
|
||
out.release()
|
||
|
||
def reset_video(self):
|
||
"""Reset video to original state"""
|
||
if not self.current_video:
|
||
messagebox.showwarning("No Video", "No video loaded.")
|
||
return
|
||
|
||
if messagebox.askyesno("Reset Video", "Reset all changes and reload original video?"):
|
||
self.load_video(self.current_video)
|
||
|
||
def export_video(self):
|
||
"""Export the edited video"""
|
||
if not MOVIEPY_AVAILABLE:
|
||
messagebox.showwarning("Feature Unavailable",
|
||
"Video export requires MoviePy.\nInstall with: pip install moviepy")
|
||
return
|
||
|
||
if not self.current_clip:
|
||
messagebox.showwarning("No Video", "Please load a video first.")
|
||
return
|
||
|
||
# Get output filename
|
||
timestamp = datetime.now().strftime("%H%M%S")
|
||
default_name = f"edited_video_{timestamp}.mp4"
|
||
|
||
output_path = filedialog.asksaveasfilename(
|
||
title="Save Edited Video",
|
||
defaultextension=".mp4",
|
||
filetypes=[("MP4 files", "*.mp4"), ("All files", "*.*")],
|
||
initialname=default_name
|
||
)
|
||
|
||
if not output_path:
|
||
return
|
||
|
||
# Create progress window
|
||
progress_window = tk.Toplevel(self.editor_window)
|
||
progress_window.title("Exporting Video")
|
||
progress_window.geometry("400x150")
|
||
progress_window.configure(bg=self.colors['bg_primary'])
|
||
progress_window.transient(self.editor_window)
|
||
progress_window.grab_set()
|
||
|
||
progress_label = tk.Label(progress_window, text="Exporting video...",
|
||
font=self.fonts['body'], bg=self.colors['bg_primary'],
|
||
fg=self.colors['text_primary'])
|
||
progress_label.pack(pady=20)
|
||
|
||
progress_bar = ttk.Progressbar(progress_window, mode='indeterminate')
|
||
progress_bar.pack(pady=10, padx=20, fill="x")
|
||
progress_bar.start()
|
||
|
||
def export_thread():
|
||
try:
|
||
# Export video
|
||
self.current_clip.write_videofile(
|
||
output_path,
|
||
codec="libx264",
|
||
audio_codec="aac",
|
||
verbose=False,
|
||
logger=None
|
||
)
|
||
|
||
def show_success():
|
||
progress_window.destroy()
|
||
messagebox.showinfo("Export Complete", f"Video exported successfully!\n\nSaved to: {output_path}")
|
||
|
||
self.editor_window.after(0, show_success)
|
||
|
||
except Exception as e:
|
||
def show_error():
|
||
progress_window.destroy()
|
||
messagebox.showerror("Export Error", f"Could not export video: {e}")
|
||
|
||
self.editor_window.after(0, show_error)
|
||
|
||
# Start export in background thread
|
||
threading.Thread(target=export_thread, daemon=True).start()
|
||
|
||
# Professional timeline helper methods
|
||
def get_clip_at_position(self, x, y):
|
||
"""Get the clip at the given canvas position"""
|
||
time_pos = x / self.timeline_scale
|
||
|
||
for clip in self.timeline_clips:
|
||
if clip['start_time'] <= time_pos <= clip['end_time']:
|
||
# Check if Y position is within the clip's track
|
||
track_info = self.tracks[clip['track']]
|
||
if track_info['y_offset'] <= y <= track_info['y_offset'] + track_info['height']:
|
||
return clip
|
||
return None
|
||
|
||
def snap_to_grid(self, time_value):
|
||
"""Snap time value to grid"""
|
||
if self.snap_enabled and self.grid_size > 0:
|
||
return round(time_value / self.grid_size) * self.grid_size
|
||
return time_value
|
||
|
||
def magnetic_snap(self, new_time, dragging_clip):
|
||
"""Apply magnetic timeline snapping to other clips"""
|
||
if not self.magnetic_timeline:
|
||
return new_time
|
||
|
||
snap_distance = 0.2 # 200ms snap distance
|
||
clip_duration = dragging_clip['end_time'] - dragging_clip['start_time']
|
||
|
||
for clip in self.timeline_clips:
|
||
if clip == dragging_clip or clip['track'] != dragging_clip['track']:
|
||
continue
|
||
|
||
# Snap to start of other clips
|
||
if abs(new_time - clip['start_time']) < snap_distance:
|
||
return clip['start_time']
|
||
|
||
# Snap to end of other clips
|
||
if abs(new_time - clip['end_time']) < snap_distance:
|
||
return clip['end_time']
|
||
|
||
# Snap end of dragging clip to start of other clips
|
||
if abs((new_time + clip_duration) - clip['start_time']) < snap_distance:
|
||
return clip['start_time'] - clip_duration
|
||
|
||
return new_time
|
||
|
||
def cut_clip_at_position(self, clip, cut_time):
|
||
"""Cut a clip at the specified time"""
|
||
if cut_time <= clip['start_time'] or cut_time >= clip['end_time']:
|
||
return
|
||
|
||
# Create two new clips
|
||
first_clip = clip.copy()
|
||
first_clip['id'] = len(self.timeline_clips) + 1
|
||
first_clip['end_time'] = cut_time
|
||
first_clip['name'] = f"{clip['name']} (1)"
|
||
|
||
second_clip = clip.copy()
|
||
second_clip['id'] = len(self.timeline_clips) + 2
|
||
second_clip['start_time'] = cut_time
|
||
second_clip['name'] = f"{clip['name']} (2)"
|
||
|
||
# Remove original clip and add new ones
|
||
self.timeline_clips.remove(clip)
|
||
self.timeline_clips.extend([first_clip, second_clip])
|
||
|
||
self.selected_clip = first_clip
|
||
self.update_timeline()
|
||
print(f"✂️ Cut clip at {cut_time:.2f}s")
|
||
|
||
def cut_clip_at_playhead(self):
|
||
"""Cut selected clip at current playhead position"""
|
||
if self.selected_clip:
|
||
self.cut_clip_at_position(self.selected_clip, self.current_time)
|
||
|
||
def delete_clip(self, clip):
|
||
"""Delete a clip from timeline"""
|
||
if clip in self.timeline_clips:
|
||
self.timeline_clips.remove(clip)
|
||
if self.selected_clip == clip:
|
||
self.selected_clip = None
|
||
self.update_timeline()
|
||
print(f"🗑️ Deleted clip: {clip['name']}")
|
||
|
||
def duplicate_clip(self, clip):
|
||
"""Duplicate a clip"""
|
||
new_clip = clip.copy()
|
||
new_clip['id'] = len(self.timeline_clips) + 1
|
||
new_clip['name'] = f"{clip['name']} (Copy)"
|
||
|
||
# Place after original clip
|
||
duration = clip['end_time'] - clip['start_time']
|
||
new_clip['start_time'] = clip['end_time']
|
||
new_clip['end_time'] = clip['end_time'] + duration
|
||
|
||
self.timeline_clips.append(new_clip)
|
||
self.selected_clip = new_clip
|
||
self.update_timeline()
|
||
print(f"📄 Duplicated clip: {new_clip['name']}")
|
||
|
||
def add_marker_at_time(self, time):
|
||
"""Add a marker at specified time"""
|
||
marker = {
|
||
'time': time,
|
||
'name': f"Marker {len(self.markers) + 1}",
|
||
'color': '#ffeb3b'
|
||
}
|
||
self.markers.append(marker)
|
||
self.update_timeline()
|
||
print(f"📍 Added marker at {time:.2f}s")
|
||
|
||
def show_clip_properties(self, clip):
|
||
"""Show clip properties dialog"""
|
||
props_window = tk.Toplevel(self.root)
|
||
props_window.title(f"Clip Properties - {clip['name']}")
|
||
props_window.configure(bg=self.colors['bg_primary'])
|
||
props_window.geometry("400x300")
|
||
|
||
# Clip info
|
||
tk.Label(props_window, text=f"Clip: {clip['name']}",
|
||
font=self.fonts['heading'], bg=self.colors['bg_primary'],
|
||
fg=self.colors['text_primary']).pack(pady=10)
|
||
|
||
info_frame = tk.Frame(props_window, bg=self.colors['bg_primary'])
|
||
info_frame.pack(fill='x', padx=20)
|
||
|
||
# Duration, start time, etc.
|
||
duration = clip['end_time'] - clip['start_time']
|
||
tk.Label(info_frame, text=f"Duration: {duration:.2f}s",
|
||
bg=self.colors['bg_primary'], fg=self.colors['text_secondary']).pack(anchor='w')
|
||
tk.Label(info_frame, text=f"Start: {clip['start_time']:.2f}s",
|
||
bg=self.colors['bg_primary'], fg=self.colors['text_secondary']).pack(anchor='w')
|
||
tk.Label(info_frame, text=f"End: {clip['end_time']:.2f}s",
|
||
bg=self.colors['bg_primary'], fg=self.colors['text_secondary']).pack(anchor='w')
|
||
tk.Label(info_frame, text=f"Track: {clip['track']}",
|
||
bg=self.colors['bg_primary'], fg=self.colors['text_secondary']).pack(anchor='w')
|
||
|
||
def zoom_to_fit(self):
|
||
"""Zoom timeline to fit all content"""
|
||
if not self.timeline_clips:
|
||
return
|
||
|
||
# Find the last clip end time
|
||
max_time = max(clip['end_time'] for clip in self.timeline_clips)
|
||
canvas_width = self.timeline_canvas.winfo_width()
|
||
|
||
if max_time > 0 and canvas_width > 100:
|
||
zoom_level = (canvas_width - 100) / (max_time * 50) # 50 is base scale
|
||
self.zoom_var.set(max(0.1, min(5.0, zoom_level)))
|
||
self.on_zoom_change(zoom_level)
|
||
print(f"🔍 Zoomed to fit content ({zoom_level:.1f}x)")
|
||
|
||
def open_shorts_editor(shorts_folder="shorts"):
|
||
"""Open the shorts editor as a standalone application"""
|
||
editor = ShortsEditorGUI(shorts_folder=shorts_folder)
|
||
editor.open_editor()
|
||
|
||
if __name__ == "__main__":
|
||
# Run as standalone application
|
||
open_shorts_editor()
|