diff --git a/src/AudioReaderSource.cpp b/src/AudioReaderSource.cpp
index 2c50e873d..d08487de2 100644
--- a/src/AudioReaderSource.cpp
+++ b/src/AudioReaderSource.cpp
@@ -42,6 +42,8 @@ void AudioReaderSource::getNextAudioBlock(const juce::AudioSourceChannelInfo& in
}
while (remaining_samples > 0) {
+ const int previous_remaining = remaining_samples;
+ frame.reset();
try {
// Get current frame object
if (reader) {
@@ -53,9 +55,19 @@ void AudioReaderSource::getNextAudioBlock(const juce::AudioSourceChannelInfo& in
// Get audio samples
if (reader && frame) {
+ const int frame_samples = frame->GetAudioSamplesCount();
+ const int frame_channels = frame->GetAudioChannelsCount();
+
+ // Corrupt/unsupported streams can yield frames without audio data.
+ // Avoid a tight loop that never consumes remaining_samples.
+ if (frame_samples <= 0 || frame_channels <= 0) {
+ info.buffer->clear(remaining_position, remaining_samples);
+ break;
+ }
+
if (sample_position + remaining_samples <= frame->GetAudioSamplesCount()) {
// Success, we have enough samples
- for (int channel = 0; channel < frame->GetAudioChannelsCount(); channel++) {
+ for (int channel = 0; channel < frame_channels; channel++) {
if (channel < info.buffer->getNumChannels()) {
info.buffer->addFrom(channel, remaining_position, *frame->GetAudioSampleBuffer(),
channel, sample_position, remaining_samples);
@@ -68,7 +80,12 @@ void AudioReaderSource::getNextAudioBlock(const juce::AudioSourceChannelInfo& in
} else if (sample_position + remaining_samples > frame->GetAudioSamplesCount()) {
// Not enough samples, take what we can
int amount_to_copy = frame->GetAudioSamplesCount() - sample_position;
- for (int channel = 0; channel < frame->GetAudioChannelsCount(); channel++) {
+ if (amount_to_copy <= 0) {
+ info.buffer->clear(remaining_position, remaining_samples);
+ break;
+ }
+
+ for (int channel = 0; channel < frame_channels; channel++) {
if (channel < info.buffer->getNumChannels()) {
info.buffer->addFrom(channel, remaining_position, *frame->GetAudioSampleBuffer(), channel,
sample_position, amount_to_copy);
@@ -84,7 +101,14 @@ void AudioReaderSource::getNextAudioBlock(const juce::AudioSourceChannelInfo& in
frame_position += speed;
sample_position = 0; // reset for new frame
}
+ } else {
+ info.buffer->clear(remaining_position, remaining_samples);
+ break;
+ }
+ if (remaining_samples == previous_remaining) {
+ info.buffer->clear(remaining_position, remaining_samples);
+ break;
}
}
}
diff --git a/src/CacheMemory.cpp b/src/CacheMemory.cpp
index 563570384..a89c9f7ee 100644
--- a/src/CacheMemory.cpp
+++ b/src/CacheMemory.cpp
@@ -70,6 +70,9 @@ void CacheMemory::Add(std::shared_ptr frame)
// Check if frame is already contained in cache
bool CacheMemory::Contains(int64_t frame_number) {
+ // Create a scoped lock, to protect the cache from multiple threads
+ const std::lock_guard lock(*cacheMutex);
+
if (frames.count(frame_number) > 0) {
return true;
} else {
diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp
index ba726e9ed..18a9084c1 100644
--- a/src/FFmpegReader.cpp
+++ b/src/FFmpegReader.cpp
@@ -561,23 +561,83 @@ void FFmpegReader::Open() {
// Audio encoding does not typically use more than 2 threads (most codecs use 1 thread)
aCodecCtx->thread_count = std::min(FF_AUDIO_NUM_PROCESSORS, 2);
- if (aCodec == NULL) {
- throw InvalidCodec("A valid audio codec could not be found for this file.", path);
- }
+ bool audio_opened = false;
+ if (aCodec != NULL) {
+ // Init options
+ AVDictionary *opts = NULL;
+ av_dict_set(&opts, "strict", "experimental", 0);
- // Init options
- AVDictionary *opts = NULL;
- av_dict_set(&opts, "strict", "experimental", 0);
+ // Open audio codec
+ audio_opened = (avcodec_open2(aCodecCtx, aCodec, &opts) >= 0);
- // Open audio codec
- if (avcodec_open2(aCodecCtx, aCodec, &opts) < 0)
- throw InvalidCodec("An audio codec was found, but could not be opened.", path);
+ // Free options
+ av_dict_free(&opts);
+ }
- // Free options
- av_dict_free(&opts);
+ if (audio_opened) {
+ // Update the File Info struct with audio details (if an audio stream is found)
+ UpdateAudioInfo();
+
+ // Disable malformed audio stream metadata (prevents divide-by-zero / invalid resampling math)
+ const bool invalid_audio_info =
+ (info.channels <= 0) ||
+ (info.sample_rate <= 0) ||
+ (info.audio_timebase.num <= 0) ||
+ (info.audio_timebase.den <= 0) ||
+ (aCodecCtx->sample_fmt == AV_SAMPLE_FMT_NONE);
+ if (invalid_audio_info) {
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FFmpegReader::Open (Disable invalid audio stream)",
+ "channels", info.channels,
+ "sample_rate", info.sample_rate,
+ "audio_timebase.num", info.audio_timebase.num,
+ "audio_timebase.den", info.audio_timebase.den,
+ "sample_fmt", static_cast(aCodecCtx ? aCodecCtx->sample_fmt : AV_SAMPLE_FMT_NONE));
+ info.has_audio = false;
+ info.audio_stream_index = -1;
+ audioStream = -1;
+ packet_status.audio_eof = true;
+ if (aCodecCtx) {
+ if (avcodec_is_open(aCodecCtx)) {
+ avcodec_flush_buffers(aCodecCtx);
+ }
+ AV_FREE_CONTEXT(aCodecCtx);
+ aCodecCtx = nullptr;
+ }
+ aStream = nullptr;
+ }
+ } else {
+ // Keep decoding video, but disable bad/unsupported audio stream.
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FFmpegReader::Open (Audio codec unavailable; disabling audio)",
+ "audioStream", audioStream);
+ info.has_audio = false;
+ info.audio_stream_index = -1;
+ audioStream = -1;
+ packet_status.audio_eof = true;
+ if (aCodecCtx) {
+ AV_FREE_CONTEXT(aCodecCtx);
+ aCodecCtx = nullptr;
+ }
+ aStream = nullptr;
+ }
+ }
- // Update the File Info struct with audio details (if an audio stream is found)
- UpdateAudioInfo();
+ // Guard invalid frame-rate / timebase values from malformed streams.
+ if (info.fps.num <= 0 || info.fps.den <= 0) {
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FFmpegReader::Open (Invalid FPS detected; applying fallback)",
+ "fps.num", info.fps.num,
+ "fps.den", info.fps.den);
+ info.fps.num = 30;
+ info.fps.den = 1;
+ }
+ if (info.video_timebase.num <= 0 || info.video_timebase.den <= 0) {
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FFmpegReader::Open (Invalid video_timebase detected; applying fallback)",
+ "video_timebase.num", info.video_timebase.num,
+ "video_timebase.den", info.video_timebase.den);
+ info.video_timebase = info.fps.Reciprocal();
}
// Add format metadata (if any)
@@ -835,12 +895,20 @@ void FFmpegReader::ApplyDurationStrategy() {
}
void FFmpegReader::UpdateAudioInfo() {
+ const int codec_channels =
+#if HAVE_CH_LAYOUT
+ AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout.nb_channels;
+#else
+ AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channels;
+#endif
+
// Set default audio channel layout (if needed)
#if HAVE_CH_LAYOUT
- if (!av_channel_layout_check(&(AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout)))
+ if (codec_channels > 0 &&
+ !av_channel_layout_check(&(AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout)))
AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->ch_layout = (AVChannelLayout) AV_CHANNEL_LAYOUT_STEREO;
#else
- if (AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout == 0)
+ if (codec_channels > 0 && AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout == 0)
AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channel_layout = av_get_default_channel_layout(AV_GET_CODEC_ATTRIBUTES(aStream, aCodecCtx)->channels);
#endif
@@ -2236,12 +2304,17 @@ void FFmpegReader::UpdatePTSOffset() {
int64_t FFmpegReader::ConvertVideoPTStoFrame(int64_t pts) {
// Apply PTS offset
int64_t previous_video_frame = current_video_frame;
+ const double fps_value = (info.fps.num > 0 && info.fps.den > 0) ? info.fps.ToDouble() : 30.0;
+ const double video_timebase_value =
+ (info.video_timebase.num > 0 && info.video_timebase.den > 0)
+ ? info.video_timebase.ToDouble()
+ : (1.0 / 30.0);
// Get the video packet start time (in seconds)
- double video_seconds = (double(pts) * info.video_timebase.ToDouble()) + pts_offset_seconds;
+ double video_seconds = (double(pts) * video_timebase_value) + pts_offset_seconds;
// Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
- int64_t frame = round(video_seconds * info.fps.ToDouble()) + 1;
+ int64_t frame = round(video_seconds * fps_value) + 1;
// Keep track of the expected video frame #
if (current_video_frame == 0)
@@ -2264,11 +2337,17 @@ int64_t FFmpegReader::ConvertVideoPTStoFrame(int64_t pts) {
// Convert Frame Number into Video PTS
int64_t FFmpegReader::ConvertFrameToVideoPTS(int64_t frame_number) {
+ const double fps_value = (info.fps.num > 0 && info.fps.den > 0) ? info.fps.ToDouble() : 30.0;
+ const double video_timebase_value =
+ (info.video_timebase.num > 0 && info.video_timebase.den > 0)
+ ? info.video_timebase.ToDouble()
+ : (1.0 / 30.0);
+
// Get timestamp of this frame (in seconds)
- double seconds = (double(frame_number - 1) / info.fps.ToDouble()) + pts_offset_seconds;
+ double seconds = (double(frame_number - 1) / fps_value) + pts_offset_seconds;
// Calculate the # of video packets in this timestamp
- int64_t video_pts = round(seconds / info.video_timebase.ToDouble());
+ int64_t video_pts = round(seconds / video_timebase_value);
// Apply PTS offset (opposite)
return video_pts;
@@ -2276,11 +2355,17 @@ int64_t FFmpegReader::ConvertFrameToVideoPTS(int64_t frame_number) {
// Convert Frame Number into Video PTS
int64_t FFmpegReader::ConvertFrameToAudioPTS(int64_t frame_number) {
+ const double fps_value = (info.fps.num > 0 && info.fps.den > 0) ? info.fps.ToDouble() : 30.0;
+ const double audio_timebase_value =
+ (info.audio_timebase.num > 0 && info.audio_timebase.den > 0)
+ ? info.audio_timebase.ToDouble()
+ : (1.0 / 48000.0);
+
// Get timestamp of this frame (in seconds)
- double seconds = (double(frame_number - 1) / info.fps.ToDouble()) + pts_offset_seconds;
+ double seconds = (double(frame_number - 1) / fps_value) + pts_offset_seconds;
// Calculate the # of audio packets in this timestamp
- int64_t audio_pts = round(seconds / info.audio_timebase.ToDouble());
+ int64_t audio_pts = round(seconds / audio_timebase_value);
// Apply PTS offset (opposite)
return audio_pts;
@@ -2288,11 +2373,17 @@ int64_t FFmpegReader::ConvertFrameToAudioPTS(int64_t frame_number) {
// Calculate Starting video frame and sample # for an audio PTS
AudioLocation FFmpegReader::GetAudioPTSLocation(int64_t pts) {
+ const double audio_timebase_value =
+ (info.audio_timebase.num > 0 && info.audio_timebase.den > 0)
+ ? info.audio_timebase.ToDouble()
+ : (1.0 / 48000.0);
+ const double fps_value = (info.fps.num > 0 && info.fps.den > 0) ? info.fps.ToDouble() : 30.0;
+
// Get the audio packet start time (in seconds)
- double audio_seconds = (double(pts) * info.audio_timebase.ToDouble()) + pts_offset_seconds;
+ double audio_seconds = (double(pts) * audio_timebase_value) + pts_offset_seconds;
// Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
- double frame = (audio_seconds * info.fps.ToDouble()) + 1;
+ double frame = (audio_seconds * fps_value) + 1;
// Frame # as a whole number (no more decimals)
int64_t whole_frame = int64_t(frame);
diff --git a/src/Frame.cpp b/src/Frame.cpp
index 2e785fb2a..b24a913d5 100644
--- a/src/Frame.cpp
+++ b/src/Frame.cpp
@@ -455,9 +455,9 @@ void Frame::SetFrameNumber(int64_t new_number)
// Calculate the # of samples per video frame (for a specific frame number and frame rate)
int Frame::GetSamplesPerFrame(int64_t number, Fraction fps, int sample_rate, int channels)
{
- // Directly return 0 if there are no channels
+ // Directly return 0 for invalid audio/frame-rate parameters
// so that we do not need to deal with NaNs later
- if (channels == 0) return 0;
+ if (channels <= 0 || sample_rate <= 0 || fps.num <= 0 || fps.den <= 0) return 0;
// Get the total # of samples for the previous frame, and the current frame (rounded)
double fps_rate = fps.Reciprocal().ToDouble();
diff --git a/src/Qt/VideoCacheThread.cpp b/src/Qt/VideoCacheThread.cpp
index 8e126dc59..6513e6921 100644
--- a/src/Qt/VideoCacheThread.cpp
+++ b/src/Qt/VideoCacheThread.cpp
@@ -53,10 +53,13 @@ namespace openshot
return false;
}
- if (min_frames_ahead < 0) {
+ const int64_t ready_min = min_frames_ahead.load();
+ if (ready_min < 0) {
return true;
}
+ const int64_t cached_index = last_cached_index.load();
+ const int64_t playhead = requested_display_frame.load();
int dir = computeDirection();
// Near timeline boundaries, don't require more pre-roll than can exist.
@@ -71,29 +74,26 @@ namespace openshot
return false;
}
- int64_t required_ahead = min_frames_ahead;
- if (required_ahead < 0) {
- required_ahead = 0;
- }
+ int64_t required_ahead = ready_min;
int64_t available_ahead = (dir > 0)
- ? std::max(0, max_frame - requested_display_frame)
- : std::max(0, requested_display_frame - 1);
+ ? std::max(0, max_frame - playhead)
+ : std::max(0, playhead - 1);
required_ahead = std::min(required_ahead, available_ahead);
if (dir > 0) {
- return (last_cached_index >= requested_display_frame + required_ahead);
+ return (cached_index >= playhead + required_ahead);
}
- return (last_cached_index <= requested_display_frame - required_ahead);
+ return (cached_index <= playhead - required_ahead);
}
void VideoCacheThread::setSpeed(int new_speed)
{
// Only update last_speed and last_dir when new_speed != 0
if (new_speed != 0) {
- last_speed = new_speed;
- last_dir = (new_speed > 0 ? 1 : -1);
+ last_speed.store(new_speed);
+ last_dir.store(new_speed > 0 ? 1 : -1);
}
- speed = new_speed;
+ speed.store(new_speed);
}
// Get the size in bytes of a frame (rough estimate)
@@ -128,29 +128,38 @@ namespace openshot
void VideoCacheThread::Seek(int64_t new_position, bool start_preroll)
{
- if (start_preroll) {
- userSeeked = true;
+ bool should_mark_seek = false;
+ bool should_preroll = false;
+ int64_t new_cached_count = cached_frame_count.load();
+ if (start_preroll) {
+ should_mark_seek = true;
CacheBase* cache = reader ? reader->GetCache() : nullptr;
if (cache && !cache->Contains(new_position))
{
// If user initiated seek, and current frame not found (
- Timeline* timeline = static_cast(reader);
- timeline->ClearAllCache();
- cached_frame_count = 0;
- preroll_on_next_fill = true;
+ if (Timeline* timeline = dynamic_cast(reader)) {
+ timeline->ClearAllCache();
+ }
+ new_cached_count = 0;
+ should_preroll = true;
}
else if (cache)
{
- cached_frame_count = cache->Count();
- preroll_on_next_fill = false;
+ new_cached_count = cache->Count();
}
- else {
- preroll_on_next_fill = false;
+ }
+
+ {
+ std::lock_guard guard(seek_state_mutex);
+ requested_display_frame.store(new_position);
+ cached_frame_count.store(new_cached_count);
+ if (start_preroll) {
+ preroll_on_next_fill.store(should_preroll);
+ userSeeked.store(should_mark_seek);
}
}
- requested_display_frame = new_position;
}
void VideoCacheThread::Seek(int64_t new_position)
@@ -161,13 +170,17 @@ namespace openshot
int VideoCacheThread::computeDirection() const
{
// If speed ≠ 0, use its sign; if speed==0, keep last_dir
- return (speed != 0 ? (speed > 0 ? 1 : -1) : last_dir);
+ const int current_speed = speed.load();
+ if (current_speed != 0) {
+ return (current_speed > 0 ? 1 : -1);
+ }
+ return last_dir.load();
}
void VideoCacheThread::handleUserSeek(int64_t playhead, int dir)
{
// Place last_cached_index just “behind” playhead in the given dir
- last_cached_index = playhead - dir;
+ last_cached_index.store(playhead - dir);
}
void VideoCacheThread::handleUserSeekWithPreroll(int64_t playhead,
@@ -184,7 +197,7 @@ namespace openshot
preroll_start = std::min(timeline_end, playhead + preroll_frames);
}
}
- last_cached_index = preroll_start - dir;
+ last_cached_index.store(preroll_start - dir);
}
int64_t VideoCacheThread::computePrerollFrames(const Settings* settings) const
@@ -209,9 +222,10 @@ namespace openshot
{
if (paused && !cache->Contains(playhead)) {
// If paused and playhead not in cache, clear everything
- Timeline* timeline = static_cast(reader);
- timeline->ClearAllCache();
- cached_frame_count = 0;
+ if (Timeline* timeline = dynamic_cast(reader)) {
+ timeline->ClearAllCache();
+ }
+ cached_frame_count.store(0);
return true;
}
return false;
@@ -246,7 +260,7 @@ namespace openshot
ReaderBase* reader)
{
bool window_full = true;
- int64_t next_frame = last_cached_index + dir;
+ int64_t next_frame = last_cached_index.load() + dir;
// Advance from last_cached_index toward window boundary
while ((dir > 0 && next_frame <= window_end) ||
@@ -256,7 +270,7 @@ namespace openshot
break;
}
// If a Seek was requested mid-caching, bail out immediately
- if (userSeeked) {
+ if (userSeeked.load()) {
break;
}
@@ -265,7 +279,7 @@ namespace openshot
try {
auto framePtr = reader->GetFrame(next_frame);
cache->Add(framePtr);
- cached_frame_count = cache->Count();
+ cached_frame_count.store(cache->Count());
}
catch (const OutOfBoundsFrame&) {
break;
@@ -276,7 +290,7 @@ namespace openshot
cache->Touch(next_frame);
}
- last_cached_index = next_frame;
+ last_cached_index.store(next_frame);
next_frame += dir;
}
@@ -294,27 +308,31 @@ namespace openshot
// If caching disabled or no reader, mark cache as ready and sleep briefly
if (!settings->ENABLE_PLAYBACK_CACHING || !cache) {
- cached_frame_count = (cache ? cache->Count() : 0);
- min_frames_ahead = -1;
+ cached_frame_count.store(cache ? cache->Count() : 0);
+ min_frames_ahead.store(-1);
std::this_thread::sleep_for(double_micro_sec(50000));
continue;
}
// init local vars
- min_frames_ahead = settings->VIDEO_CACHE_MIN_PREROLL_FRAMES;
+ min_frames_ahead.store(settings->VIDEO_CACHE_MIN_PREROLL_FRAMES);
- Timeline* timeline = static_cast(reader);
+ Timeline* timeline = dynamic_cast(reader);
+ if (!timeline) {
+ std::this_thread::sleep_for(double_micro_sec(50000));
+ continue;
+ }
int64_t timeline_end = timeline->GetMaxFrame();
- int64_t playhead = requested_display_frame;
- bool paused = (speed == 0);
+ int64_t playhead = requested_display_frame.load();
+ bool paused = (speed.load() == 0);
int64_t preroll_frames = computePrerollFrames(settings);
- cached_frame_count = cache->Count();
+ cached_frame_count.store(cache->Count());
// Compute effective direction (±1)
int dir = computeDirection();
- if (speed != 0) {
- last_dir = dir;
+ if (speed.load() != 0) {
+ last_dir.store(dir);
}
// Compute bytes_per_frame, max_bytes, and capacity once
@@ -335,16 +353,25 @@ namespace openshot
}
// Handle a user-initiated seek
- bool use_preroll = preroll_on_next_fill;
- if (userSeeked) {
+ bool did_user_seek = false;
+ bool use_preroll = false;
+ {
+ std::lock_guard guard(seek_state_mutex);
+ playhead = requested_display_frame.load();
+ did_user_seek = userSeeked.load();
+ use_preroll = preroll_on_next_fill.load();
+ if (did_user_seek) {
+ userSeeked.store(false);
+ preroll_on_next_fill.store(false);
+ }
+ }
+ if (did_user_seek) {
if (use_preroll) {
handleUserSeekWithPreroll(playhead, dir, timeline_end, preroll_frames);
}
else {
handleUserSeek(playhead, dir);
}
- userSeeked = false;
- preroll_on_next_fill = false;
}
else if (!paused && capacity >= 1) {
// In playback mode, check if last_cached_index drifted outside the new window
@@ -361,8 +388,8 @@ namespace openshot
);
bool outside_window =
- (dir > 0 && last_cached_index > window_end) ||
- (dir < 0 && last_cached_index < window_begin);
+ (dir > 0 && last_cached_index.load() > window_end) ||
+ (dir < 0 && last_cached_index.load() < window_begin);
if (outside_window) {
handleUserSeek(playhead, dir);
}
@@ -384,7 +411,7 @@ namespace openshot
ready_target = 0;
}
int64_t configured_min = settings->VIDEO_CACHE_MIN_PREROLL_FRAMES;
- min_frames_ahead = std::min(configured_min, ready_target);
+ min_frames_ahead.store(std::min(configured_min, ready_target));
// If paused and playhead is no longer in cache, clear everything
bool did_clear = clearCacheIfPaused(playhead, paused, cache);
diff --git a/src/Qt/VideoCacheThread.h b/src/Qt/VideoCacheThread.h
index 121b596a5..a9dc2dd63 100644
--- a/src/Qt/VideoCacheThread.h
+++ b/src/Qt/VideoCacheThread.h
@@ -17,6 +17,8 @@
#include
#include
+#include
+#include
#include
namespace openshot
@@ -57,7 +59,7 @@ namespace openshot
void setSpeed(int new_speed);
/// @return The current speed (1=normal, 2=fast, –1=rewind, etc.)
- int getSpeed() const { return speed; }
+ int getSpeed() const { return speed.load(); }
/// Seek to a specific frame (no preroll).
void Seek(int64_t new_position);
@@ -175,23 +177,24 @@ namespace openshot
std::shared_ptr last_cached_frame; ///< Last frame pointer added to cache.
- int speed; ///< Current playback speed (0=paused, >0 forward, <0 backward).
- int last_speed; ///< Last non-zero speed (for tracking).
- int last_dir; ///< Last direction sign (+1 forward, –1 backward).
- bool userSeeked; ///< True if Seek(..., true) was called (forces a cache reset).
- bool preroll_on_next_fill; ///< True if next cache rebuild should include preroll offset.
+ std::atomic speed; ///< Current playback speed (0=paused, >0 forward, <0 backward).
+ std::atomic last_speed; ///< Last non-zero speed (for tracking).
+ std::atomic last_dir; ///< Last direction sign (+1 forward, –1 backward).
+ std::atomic userSeeked; ///< True if Seek(..., true) was called (forces a cache reset).
+ std::atomic preroll_on_next_fill; ///< True if next cache rebuild should include preroll offset.
- int64_t requested_display_frame; ///< Frame index the user requested.
+ std::atomic requested_display_frame; ///< Frame index the user requested.
int64_t current_display_frame; ///< Currently displayed frame (unused here, reserved).
- int64_t cached_frame_count; ///< Estimated count of frames currently stored in cache.
+ std::atomic cached_frame_count; ///< Estimated count of frames currently stored in cache.
- int64_t min_frames_ahead; ///< Minimum number of frames considered “ready” (pre-roll).
+ std::atomic min_frames_ahead; ///< Minimum number of frames considered “ready” (pre-roll).
int64_t timeline_max_frame; ///< Highest valid frame index in the timeline.
ReaderBase* reader; ///< The source reader (e.g., Timeline, FFmpegReader).
bool force_directional_cache; ///< (Reserved for future use).
- int64_t last_cached_index; ///< Index of the most recently cached frame.
+ std::atomic last_cached_index; ///< Index of the most recently cached frame.
+ mutable std::mutex seek_state_mutex; ///< Protects coherent seek state updates/consumption.
};
} // namespace openshot
diff --git a/src/Timeline.cpp b/src/Timeline.cpp
index c32c19dff..3cf0bd670 100644
--- a/src/Timeline.cpp
+++ b/src/Timeline.cpp
@@ -1461,6 +1461,11 @@ void Timeline::apply_json_to_clips(Json::Value change) {
// Add clip to timeline
AddClip(clip);
+ // Calculate start and end frames that this impacts, and remove those frames from the cache
+ int64_t new_starting_frame = (clip->Position() * info.fps.ToDouble()) + 1;
+ int64_t new_ending_frame = ((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
+ final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
+
} else if (change_type == "update") {
// Update existing clip
@@ -1747,6 +1752,8 @@ void Timeline::apply_json_to_timeline(Json::Value change) {
// Clear all caches
void Timeline::ClearAllCache(bool deep) {
+ // Get lock (prevent getting frames while this happens)
+ const std::lock_guard guard(getFrameMutex);
// Clear primary cache
if (final_cache) {
diff --git a/tests/Frame.cpp b/tests/Frame.cpp
index ffe4d84dd..d4d58ab9a 100644
--- a/tests/Frame.cpp
+++ b/tests/Frame.cpp
@@ -138,6 +138,14 @@ TEST_CASE( "Copy_Constructor", "[libopenshot][frame]" )
CHECK(f1.GetAudioSamplesCount() == f2.GetAudioSamplesCount());
}
+TEST_CASE( "GetSamplesPerFrame invalid rate inputs", "[libopenshot][frame]" )
+{
+ CHECK(Frame::GetSamplesPerFrame(/*frame_number=*/1, Fraction(0, 1), /*sample_rate=*/44100, /*channels=*/2) == 0);
+ CHECK(Frame::GetSamplesPerFrame(/*frame_number=*/1, Fraction(30, 0), /*sample_rate=*/44100, /*channels=*/2) == 0);
+ CHECK(Frame::GetSamplesPerFrame(/*frame_number=*/1, Fraction(30, 1), /*sample_rate=*/0, /*channels=*/2) == 0);
+ CHECK(Frame::GetSamplesPerFrame(/*frame_number=*/1, Fraction(30, 1), /*sample_rate=*/44100, /*channels=*/0) == 0);
+}
+
#ifdef USE_OPENCV
TEST_CASE( "Convert_Image", "[libopenshot][opencv][frame]" )
{
diff --git a/tests/Timeline.cpp b/tests/Timeline.cpp
index df9983a02..0c90fbd7c 100644
--- a/tests/Timeline.cpp
+++ b/tests/Timeline.cpp
@@ -1044,6 +1044,29 @@ TEST_CASE( "ApplyJSONDiff and FrameMappers", "[libopenshot][timeline]" )
CHECK(clip1.Reader()->Name() == "QtImageReader");
}
+TEST_CASE( "ApplyJSONDiff insert invalidates overlapping timeline cache", "[libopenshot][timeline]" )
+{
+ // Create timeline with no clips so cached frames are black placeholders
+ Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO);
+ t.Open();
+
+ // Cache a frame in the area where we'll insert a new clip
+ std::shared_ptr cached_before = t.GetFrame(10);
+ REQUIRE(cached_before != nullptr);
+ REQUIRE(t.GetCache() != nullptr);
+ REQUIRE(t.GetCache()->Contains(10));
+
+ // Insert clip via JSON diff overlapping frame 10
+ std::stringstream path1;
+ path1 << TEST_MEDIA_PATH << "interlaced.png";
+ std::stringstream json_change;
+ json_change << "[{\"type\":\"insert\",\"key\":[\"clips\"],\"value\":{\"id\":\"INSERT_CACHE_INVALIDATE\",\"layer\":1,\"position\":0.0,\"start\":0,\"end\":10,\"reader\":{\"acodec\":\"\",\"audio_bit_rate\":0,\"audio_stream_index\":-1,\"audio_timebase\":{\"den\":1,\"num\":1},\"channel_layout\":4,\"channels\":0,\"display_ratio\":{\"den\":1,\"num\":1},\"duration\":3600.0,\"file_size\":\"160000\",\"fps\":{\"den\":1,\"num\":30},\"has_audio\":false,\"has_single_image\":true,\"has_video\":true,\"height\":200,\"interlaced_frame\":false,\"metadata\":{},\"path\":\"" << path1.str() << "\",\"pixel_format\":-1,\"pixel_ratio\":{\"den\":1,\"num\":1},\"sample_rate\":0,\"top_field_first\":true,\"type\":\"QtImageReader\",\"vcodec\":\"\",\"video_bit_rate\":0,\"video_length\":\"108000\",\"video_stream_index\":-1,\"video_timebase\":{\"den\":30,\"num\":1},\"width\":200}},\"partial\":false}]";
+ t.ApplyJsonDiff(json_change.str());
+
+ // Overlapping cached frame should be invalidated
+ CHECK(!t.GetCache()->Contains(10));
+}
+
TEST_CASE( "ApplyJSONDiff Update Reader Info", "[libopenshot][timeline]" )
{
// Create a timeline
diff --git a/tests/VideoCacheThread.cpp b/tests/VideoCacheThread.cpp
index 31d3cc51b..5493a1414 100644
--- a/tests/VideoCacheThread.cpp
+++ b/tests/VideoCacheThread.cpp
@@ -36,12 +36,12 @@ class TestableVideoCacheThread : public VideoCacheThread {
using VideoCacheThread::handleUserSeekWithPreroll;
using VideoCacheThread::computePrerollFrames;
- int64_t getLastCachedIndex() const { return last_cached_index; }
- void setLastCachedIndex(int64_t v) { last_cached_index = v; }
- void setPlayhead(int64_t v) { requested_display_frame = v; }
- void setMinFramesAhead(int64_t v) { min_frames_ahead = v; }
- void setLastDir(int d) { last_dir = d; }
- void forceUserSeekFlag() { userSeeked = true; }
+ int64_t getLastCachedIndex() const { return last_cached_index.load(); }
+ void setLastCachedIndex(int64_t v) { last_cached_index.store(v); }
+ void setPlayhead(int64_t v) { requested_display_frame.store(v); }
+ void setMinFramesAhead(int64_t v) { min_frames_ahead.store(v); }
+ void setLastDir(int d) { last_dir.store(d); }
+ void forceUserSeekFlag() { userSeeked.store(true); }
};
// ----------------------------------------------------------------------------