OpenShot Library | libopenshot 0.3.2
Loading...
Searching...
No Matches
Clip.cpp
Go to the documentation of this file.
1
9// Copyright (c) 2008-2019 OpenShot Studios, LLC
10//
11// SPDX-License-Identifier: LGPL-3.0-or-later
12
13#include "Clip.h"
14
15#include "AudioResampler.h"
16#include "Exceptions.h"
17#include "FFmpegReader.h"
18#include "FrameMapper.h"
19#include "QtImageReader.h"
20#include "ChunkReader.h"
21#include "DummyReader.h"
22#include "Timeline.h"
23#include "ZmqLogger.h"
24
25#ifdef USE_IMAGEMAGICK
26 #include "MagickUtilities.h"
27 #include "ImageReader.h"
28 #include "TextReader.h"
29#endif
30
31#include <Qt>
32
33using namespace openshot;
34
35// Init default settings for a clip
37{
38 // Init clip settings
39 Position(0.0);
40 Layer(0);
41 Start(0.0);
42 ClipBase::End(0.0);
48 waveform = false;
50 parentObjectId = "";
51
52 // Init scale curves
53 scale_x = Keyframe(1.0);
54 scale_y = Keyframe(1.0);
55
56 // Init location curves
57 location_x = Keyframe(0.0);
58 location_y = Keyframe(0.0);
59
60 // Init alpha
61 alpha = Keyframe(1.0);
62
63 // Init time & volume
64 time = Keyframe(1.0);
65 volume = Keyframe(1.0);
66
67 // Init audio waveform color
68 wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
69
70 // Init shear and perspective curves
71 shear_x = Keyframe(0.0);
72 shear_y = Keyframe(0.0);
73 origin_x = Keyframe(0.5);
74 origin_y = Keyframe(0.5);
83
84 // Init audio channel filter and mappings
87
88 // Init audio and video overrides
89 has_audio = Keyframe(-1.0);
90 has_video = Keyframe(-1.0);
91
92 // Initialize the attached object and attached clip as null pointers
93 parentTrackedObject = nullptr;
94 parentClipObject = NULL;
95
96 // Init reader info struct
98}
99
100// Init reader info details
102 if (reader) {
103 // Init rotation (if any)
105
106 // Initialize info struct
107 info = reader->info;
108
109 // Init cache
111 }
112}
113
114// Init reader's rotation (if any)
116 // Dont init rotation if clip has keyframes
117 if (rotation.GetCount() > 0)
118 return;
119
120 // Init rotation
121 if (reader && reader->info.metadata.count("rotate") > 0) {
122 // Use reader metadata rotation (if any)
123 // This is typical with cell phone videos filmed in different orientations
124 try {
125 float rotate_metadata = strtof(reader->info.metadata["rotate"].c_str(), 0);
126 rotation = Keyframe(rotate_metadata);
127 } catch (const std::exception& e) {}
128 }
129 else
130 // Default no rotation
131 rotation = Keyframe(0.0);
132}
133
134// Default Constructor for a clip
135Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
136{
137 // Init all default settings
139}
140
141// Constructor with reader
142Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
143{
144 // Init all default settings
146
147 // Open and Close the reader (to set the duration of the clip)
148 Open();
149 Close();
150
151 // Update duration and set parent
152 if (reader) {
153 ClipBase::End(reader->info.duration);
154 reader->ParentClip(this);
155 // Init reader info struct
157 }
158}
159
160// Constructor with filepath
161Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
162{
163 // Init all default settings
165
166 // Get file extension (and convert to lower case)
167 std::string ext = get_file_extension(path);
168 std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
169
170 // Determine if common video formats (or image sequences)
171 if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
172 ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob" || path.find("%") != std::string::npos)
173 {
174 try
175 {
176 // Open common video format
177 reader = new openshot::FFmpegReader(path);
178
179 } catch(...) { }
180 }
181 if (ext=="osp")
182 {
183 try
184 {
185 // Open common video format
186 reader = new openshot::Timeline(path, true);
187
188 } catch(...) { }
189 }
190
191
192 // If no video found, try each reader
193 if (!reader)
194 {
195 try
196 {
197 // Try an image reader
198 reader = new openshot::QtImageReader(path);
199
200 } catch(...) {
201 try
202 {
203 // Try a video reader
204 reader = new openshot::FFmpegReader(path);
205
206 } catch(...) { }
207 }
208 }
209
210 // Update duration and set parent
211 if (reader) {
212 ClipBase::End(reader->info.duration);
213 reader->ParentClip(this);
214 allocated_reader = reader;
215 // Init reader info struct
217 }
218}
219
220// Destructor
222{
223 // Delete the reader if clip created it
224 if (allocated_reader) {
225 delete allocated_reader;
226 allocated_reader = NULL;
227 reader = NULL;
228 }
229
230 // Close the resampler
231 if (resampler) {
232 delete resampler;
233 resampler = NULL;
234 }
235
236 // Close clip
237 Close();
238}
239
240// Attach clip to bounding box
241void Clip::AttachToObject(std::string object_id)
242{
243 // Search for the tracked object on the timeline
244 Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
245
246 if (parentTimeline) {
247 // Create a smart pointer to the tracked object from the timeline
248 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->GetTrackedObject(object_id);
249 Clip* clipObject = parentTimeline->GetClip(object_id);
250
251 // Check for valid tracked object
252 if (trackedObject){
253 SetAttachedObject(trackedObject);
254 }
255 else if (clipObject) {
256 SetAttachedClip(clipObject);
257 }
258 }
259}
260
261// Set the pointer to the trackedObject this clip is attached to
262void Clip::SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
263 parentTrackedObject = trackedObject;
264}
265
266// Set the pointer to the clip this clip is attached to
267void Clip::SetAttachedClip(Clip* clipObject){
268 parentClipObject = clipObject;
269}
270
272void Clip::Reader(ReaderBase* new_reader)
273{
274 // Delete previously allocated reader (if not related to new reader)
275 // FrameMappers that point to the same allocated reader are ignored
276 bool is_same_reader = false;
277 if (new_reader && allocated_reader) {
278 if (new_reader->Name() == "FrameMapper") {
279 // Determine if FrameMapper is pointing at the same allocated ready
280 FrameMapper* clip_mapped_reader = static_cast<FrameMapper*>(new_reader);
281 if (allocated_reader == clip_mapped_reader->Reader()) {
282 is_same_reader = true;
283 }
284 }
285 }
286 // Clear existing allocated reader (if different)
287 if (allocated_reader && !is_same_reader) {
288 reader->Close();
289 allocated_reader->Close();
290 delete allocated_reader;
291 reader = NULL;
292 allocated_reader = NULL;
293 }
294
295 // set reader pointer
296 reader = new_reader;
297
298 // set parent
299 if (reader) {
300 reader->ParentClip(this);
301
302 // Init reader info struct
304 }
305}
306
309{
310 if (reader)
311 return reader;
312 else
313 // Throw error if reader not initialized
314 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
315}
316
317// Open the internal reader
319{
320 if (reader)
321 {
322 // Open the reader
323 reader->Open();
324 is_open = true;
325
326 // Copy Reader info to Clip
327 info = reader->info;
328
329 // Set some clip properties from the file reader
330 if (end == 0.0)
331 ClipBase::End(reader->info.duration);
332 }
333 else
334 // Throw error if reader not initialized
335 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
336}
337
338// Close the internal reader
340{
341 if (is_open && reader) {
342 ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
343
344 // Close the reader
345 reader->Close();
346 }
347
348 // Clear cache
349 final_cache.Clear();
350 is_open = false;
351}
352
353// Get end position of clip (trim end of video), which can be affected by the time curve.
354float Clip::End() const
355{
356 // if a time curve is present, use its length
357 if (time.GetCount() > 1)
358 {
359 // Determine the FPS fo this clip
360 float fps = 24.0;
361 if (reader)
362 // file reader
363 fps = reader->info.fps.ToFloat();
364 else
365 // Throw error if reader not initialized
366 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
367
368 return float(time.GetLength()) / fps;
369 }
370 else
371 // just use the duration (as detected by the reader)
372 return end;
373}
374
375// Override End() position
376void Clip::End(float value) {
377 ClipBase::End(value);
378}
379
380// Set associated Timeline pointer
382 timeline = new_timeline;
383
384 // Clear cache (it might have changed)
385 final_cache.Clear();
386}
387
388// Create an openshot::Frame object for a specific frame number of this reader.
389std::shared_ptr<Frame> Clip::GetFrame(int64_t clip_frame_number)
390{
391 // Call override of GetFrame
392 return GetFrame(NULL, clip_frame_number, NULL);
393}
394
395// Create an openshot::Frame object for a specific frame number of this reader.
396// NOTE: background_frame is ignored in this method (this method is only used by Effect classes)
397std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
398{
399 // Call override of GetFrame
400 return GetFrame(background_frame, clip_frame_number, NULL);
401}
402
403// Use an existing openshot::Frame object and draw this Clip's frame onto it
404std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number, openshot::TimelineInfoStruct* options)
405{
406 // Check for open reader (or throw exception)
407 if (!is_open)
408 throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
409
410 if (reader)
411 {
412 // Get frame object
413 std::shared_ptr<Frame> frame = NULL;
414
415 // Check cache
416 frame = final_cache.GetFrame(clip_frame_number);
417 if (frame) {
418 // Debug output
420 "Clip::GetFrame (Cached frame found)",
421 "requested_frame", clip_frame_number);
422
423 // Return cached frame
424 return frame;
425 }
426
427 // Generate clip frame
428 frame = GetOrCreateFrame(clip_frame_number);
429
430 if (!background_frame) {
431 // Create missing background_frame w/ transparent color (if needed)
432 background_frame = std::make_shared<Frame>(clip_frame_number, frame->GetWidth(), frame->GetHeight(),
433 "#00000000", frame->GetAudioSamplesCount(),
434 frame->GetAudioChannelsCount());
435 }
436
437 // Get time mapped frame object (used to increase speed, change direction, etc...)
438 apply_timemapping(frame);
439
440 // Apply waveform image (if any)
441 apply_waveform(frame, background_frame);
442
443 // Apply local effects to the frame (if any)
444 apply_effects(frame);
445
446 // Apply global timeline effects (i.e. transitions & masks... if any)
447 if (timeline != NULL && options != NULL) {
448 if (options->is_top_clip) {
449 // Apply global timeline effects (only to top clip... if overlapping, pass in timeline frame number)
450 Timeline* timeline_instance = static_cast<Timeline*>(timeline);
451 frame = timeline_instance->apply_effects(frame, background_frame->number, Layer());
452 }
453 }
454
455 // Apply keyframe / transforms
456 apply_keyframes(frame, background_frame);
457
458 // Add final frame to cache
459 final_cache.Add(frame);
460
461 // Return processed 'frame'
462 return frame;
463 }
464 else
465 // Throw error if reader not initialized
466 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
467}
468
469// Look up an effect by ID
471{
472 // Find the matching effect (if any)
473 for (const auto& effect : effects) {
474 if (effect->Id() == id) {
475 return effect;
476 }
477 }
478 return nullptr;
479}
480
481// Get file extension
482std::string Clip::get_file_extension(std::string path)
483{
484 // return last part of path
485 return path.substr(path.find_last_of(".") + 1);
486}
487
488// Reverse an audio buffer
489void Clip::reverse_buffer(juce::AudioBuffer<float>* buffer)
490{
491 int number_of_samples = buffer->getNumSamples();
492 int channels = buffer->getNumChannels();
493
494 // Reverse array (create new buffer to hold the reversed version)
495 auto *reversed = new juce::AudioBuffer<float>(channels, number_of_samples);
496 reversed->clear();
497
498 for (int channel = 0; channel < channels; channel++)
499 {
500 int n=0;
501 for (int s = number_of_samples - 1; s >= 0; s--, n++)
502 reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
503 }
504
505 // Copy the samples back to the original array
506 buffer->clear();
507 // Loop through channels, and get audio samples
508 for (int channel = 0; channel < channels; channel++)
509 // Get the audio samples for this channel
510 buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
511
512 delete reversed;
513}
514
515// Adjust the audio and image of a time mapped frame
516void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
517{
518 // Check for valid reader
519 if (!reader)
520 // Throw error if reader not initialized
521 throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
522
523 // Check for a valid time map curve
524 if (time.GetLength() > 1)
525 {
526 const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
527
528 int64_t clip_frame_number = frame->number;
529 int64_t new_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
530
531 // create buffer
532 juce::AudioBuffer<float> *source_samples = nullptr;
533
534 // Get delta (difference from this frame to the next time mapped frame: Y value)
535 double delta = time.GetDelta(clip_frame_number + 1);
536 bool is_increasing = time.IsIncreasing(clip_frame_number + 1);
537
538 // Determine length of source audio (in samples)
539 // A delta of 1.0 == normal expected samples
540 // A delta of 0.5 == 50% of normal expected samples
541 // A delta of 2.0 == 200% of normal expected samples
542 int target_sample_count = Frame::GetSamplesPerFrame(adjust_timeline_framenumber(clip_frame_number), Reader()->info.fps,
544 Reader()->info.channels);
545 int source_sample_count = round(target_sample_count * fabs(delta));
546
547 // Determine starting audio location
548 AudioLocation location;
549 if (previous_location.frame == 0 || abs(new_frame_number - previous_location.frame) > 2) {
550 // No previous location OR gap detected
551 location.frame = new_frame_number;
552 location.sample_start = 0;
553
554 // Create / Reset resampler
555 // We don't want to interpolate between unrelated audio data
556 if (resampler) {
557 delete resampler;
558 }
559 // Init resampler with # channels from Reader (should match the timeline)
560 resampler = new AudioResampler(Reader()->info.channels);
561
562 // Allocate buffer of silence to initialize some data inside the resampler
563 // To prevent it from becoming input limited
564 juce::AudioBuffer<float> init_samples(Reader()->info.channels, 64);
565 init_samples.clear();
566 resampler->SetBuffer(&init_samples, 1.0);
567 resampler->GetResampledBuffer();
568
569 } else {
570 // Use previous location
571 location = previous_location;
572 }
573
574 if (source_sample_count <= 0) {
575 // Add silence and bail (we don't need any samples)
576 frame->AddAudioSilence(target_sample_count);
577 return;
578 }
579
580 // Allocate a new sample buffer for these delta frames
581 source_samples = new juce::AudioBuffer<float>(Reader()->info.channels, source_sample_count);
582 source_samples->clear();
583
584 // Determine ending audio location
585 int remaining_samples = source_sample_count;
586 int source_pos = 0;
587 while (remaining_samples > 0) {
588 std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.frame, false);
589 int frame_sample_count = source_frame->GetAudioSamplesCount() - location.sample_start;
590
591 if (frame_sample_count == 0) {
592 // No samples found in source frame (fill with silence)
593 if (is_increasing) {
594 location.frame++;
595 } else {
596 location.frame--;
597 }
598 location.sample_start = 0;
599 break;
600 }
601 if (remaining_samples - frame_sample_count >= 0) {
602 // Use all frame samples & increment location
603 for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
604 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, frame_sample_count, 1.0f);
605 }
606 if (is_increasing) {
607 location.frame++;
608 } else {
609 location.frame--;
610 }
611 location.sample_start = 0;
612 remaining_samples -= frame_sample_count;
613 source_pos += frame_sample_count;
614
615 } else {
616 // Use just what is needed (and reverse samples)
617 for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
618 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, remaining_samples, 1.0f);
619 }
620 location.sample_start += remaining_samples;
621 remaining_samples = 0;
622 source_pos += remaining_samples;
623 }
624
625 }
626
627 // Resize audio for current frame object + fill with silence
628 // We are fixing to clobber this with actual audio data (possibly resampled)
629 frame->AddAudioSilence(target_sample_count);
630
631 if (source_sample_count != target_sample_count) {
632 // Resample audio (if needed)
633 double resample_ratio = double(source_sample_count) / double(target_sample_count);
634 resampler->SetBuffer(source_samples, resample_ratio);
635
636 // Resample the data
637 juce::AudioBuffer<float> *resampled_buffer = resampler->GetResampledBuffer();
638
639 // Fill the frame with resampled data
640 for (int channel = 0; channel < Reader()->info.channels; channel++) {
641 // Add new (slower) samples, to the frame object
642 frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
643 }
644 } else {
645 // Fill the frame
646 for (int channel = 0; channel < Reader()->info.channels; channel++) {
647 // Add new (slower) samples, to the frame object
648 frame->AddAudio(true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
649 }
650 }
651
652 // Clean up
653 delete source_samples;
654
655 // Set previous location
656 previous_location = location;
657 }
658}
659
660// Adjust frame number minimum value
661int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
662{
663 // Never return a frame number 0 or below
664 if (frame_number < 1)
665 return 1;
666 else
667 return frame_number;
668
669}
670
671// Get or generate a blank frame
672std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number, bool enable_time)
673{
674 try {
675 // Init to requested frame
676 int64_t clip_frame_number = adjust_frame_number_minimum(number);
677
678 // Adjust for time-mapping (if any)
679 if (enable_time && time.GetLength() > 1) {
680 clip_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
681 }
682
683 // Debug output
685 "Clip::GetOrCreateFrame (from reader)",
686 "number", number, "clip_frame_number", clip_frame_number);
687
688 // Attempt to get a frame (but this could fail if a reader has just been closed)
689 auto reader_frame = reader->GetFrame(clip_frame_number);
690 reader_frame->number = number; // Override frame # (due to time-mapping might change it)
691
692 // Return real frame
693 if (reader_frame) {
694 // Create a new copy of reader frame
695 // This allows a clip to modify the pixels and audio of this frame without
696 // changing the underlying reader's frame data
697 auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
698 if (has_video.GetInt(number) == 0) {
699 // No video, so add transparent pixels
700 reader_copy->AddColor(QColor(Qt::transparent));
701 }
702 if (has_audio.GetInt(number) == 0 || number > reader->info.video_length) {
703 // No audio, so include silence (also, mute audio if past end of reader)
704 reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
705 }
706 return reader_copy;
707 }
708
709 } catch (const ReaderClosed & e) {
710 // ...
711 } catch (const OutOfBoundsFrame & e) {
712 // ...
713 }
714
715 // Estimate # of samples needed for this frame
716 int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
717
718 // Debug output
720 "Clip::GetOrCreateFrame (create blank)",
721 "number", number,
722 "estimated_samples_in_frame", estimated_samples_in_frame);
723
724 // Create blank frame
725 auto new_frame = std::make_shared<Frame>(
726 number, reader->info.width, reader->info.height,
727 "#000000", estimated_samples_in_frame, reader->info.channels);
728 new_frame->SampleRate(reader->info.sample_rate);
729 new_frame->ChannelsLayout(reader->info.channel_layout);
730 new_frame->AddAudioSilence(estimated_samples_in_frame);
731 return new_frame;
732}
733
734// Generate JSON string of this object
735std::string Clip::Json() const {
736
737 // Return formatted string
738 return JsonValue().toStyledString();
739}
740
741// Get all properties for a specific frame
742std::string Clip::PropertiesJSON(int64_t requested_frame) const {
743
744 // Generate JSON properties list
745 Json::Value root;
746 root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
747 root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
748 root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
749 root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
750 root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
751 root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
752 root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
753 root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
754 root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
755 root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
756 root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
757 root["parentObjectId"] = add_property_json("Parent", 0.0, "string", parentObjectId, NULL, -1, -1, false, requested_frame);
758
759 // Add gravity choices (dropdown style)
760 root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
761 root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
762 root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
763 root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
764 root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
765 root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
766 root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
767 root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
768 root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
769
770 // Add scale choices (dropdown style)
771 root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
772 root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
773 root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
774 root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
775
776 // Add frame number display choices (dropdown style)
777 root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
778 root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
779 root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
780 root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
781
782 // Add volume mixing choices (dropdown style)
783 root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
784 root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
785 root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
786
787 // Add waveform choices (dropdown style)
788 root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
789 root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
790
791 // Add the parentTrackedObject's properties
792 if (parentTrackedObject && parentClipObject)
793 {
794 // Convert Clip's frame position to Timeline's frame position
795 long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
796 long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
797 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
798
799 // Get attached object's parent clip properties
800 std::map< std::string, float > trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
801 double parentObject_frame_number = trackedObjectParentClipProperties["frame_number"];
802 // Get attached object properties
803 std::map< std::string, float > trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
804
805 // Correct the parent Tracked Object properties by the clip's reference system
806 float parentObject_location_x = trackedObjectProperties["cx"] - 0.5 + trackedObjectParentClipProperties["cx"];
807 float parentObject_location_y = trackedObjectProperties["cy"] - 0.5 + trackedObjectParentClipProperties["cy"];
808 float parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
809 float parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
810 float parentObject_rotation = trackedObjectProperties["r"] + trackedObjectParentClipProperties["r"];
811
812 // Add the parent Tracked Object properties to JSON
813 root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
814 root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
815 root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
816 root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
817 root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
818 root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
819 root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
820 }
821 // Add the parentClipObject's properties
822 else if (parentClipObject)
823 {
824 // Convert Clip's frame position to Timeline's frame position
825 long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
826 long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
827 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
828
829 // Correct the parent Clip Object properties by the clip's reference system
830 float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
831 float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
832 float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
833 float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
834 float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
835 float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
836 float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
837
838 // Add the parent Clip Object properties to JSON
839 root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
840 root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
841 root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
842 root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
843 root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
844 root["shear_x"] = add_property_json("Shear X", parentObject_shear_x, "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
845 root["shear_y"] = add_property_json("Shear Y", parentObject_shear_y, "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
846 }
847 else
848 {
849 // Add this own clip's properties to JSON
850 root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
851 root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
852 root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
853 root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
854 root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
855 root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
856 root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
857 }
858
859 // Keyframes
860 root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
861 root["origin_x"] = add_property_json("Origin X", origin_x.GetValue(requested_frame), "float", "", &origin_x, 0.0, 1.0, false, requested_frame);
862 root["origin_y"] = add_property_json("Origin Y", origin_y.GetValue(requested_frame), "float", "", &origin_y, 0.0, 1.0, false, requested_frame);
863 root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
864 root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
865 root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
866 root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
867 root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
868 root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
869
870 // Add enable audio/video choices (dropdown style)
871 root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
872 root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
873 root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
874 root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
875 root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
876 root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
877
878 root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
879 root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
880 root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
881 root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
882
883
884 // Return formatted string
885 return root.toStyledString();
886}
887
888// Generate Json::Value for this object
889Json::Value Clip::JsonValue() const {
890
891 // Create root json object
892 Json::Value root = ClipBase::JsonValue(); // get parent properties
893 root["parentObjectId"] = parentObjectId;
894 root["gravity"] = gravity;
895 root["scale"] = scale;
896 root["anchor"] = anchor;
897 root["display"] = display;
898 root["mixing"] = mixing;
899 root["waveform"] = waveform;
900 root["scale_x"] = scale_x.JsonValue();
901 root["scale_y"] = scale_y.JsonValue();
902 root["location_x"] = location_x.JsonValue();
903 root["location_y"] = location_y.JsonValue();
904 root["alpha"] = alpha.JsonValue();
905 root["rotation"] = rotation.JsonValue();
906 root["time"] = time.JsonValue();
907 root["volume"] = volume.JsonValue();
908 root["wave_color"] = wave_color.JsonValue();
909 root["shear_x"] = shear_x.JsonValue();
910 root["shear_y"] = shear_y.JsonValue();
911 root["origin_x"] = origin_x.JsonValue();
912 root["origin_y"] = origin_y.JsonValue();
913 root["channel_filter"] = channel_filter.JsonValue();
914 root["channel_mapping"] = channel_mapping.JsonValue();
915 root["has_audio"] = has_audio.JsonValue();
916 root["has_video"] = has_video.JsonValue();
917 root["perspective_c1_x"] = perspective_c1_x.JsonValue();
918 root["perspective_c1_y"] = perspective_c1_y.JsonValue();
919 root["perspective_c2_x"] = perspective_c2_x.JsonValue();
920 root["perspective_c2_y"] = perspective_c2_y.JsonValue();
921 root["perspective_c3_x"] = perspective_c3_x.JsonValue();
922 root["perspective_c3_y"] = perspective_c3_y.JsonValue();
923 root["perspective_c4_x"] = perspective_c4_x.JsonValue();
924 root["perspective_c4_y"] = perspective_c4_y.JsonValue();
925
926 // Add array of effects
927 root["effects"] = Json::Value(Json::arrayValue);
928
929 // loop through effects
930 for (auto existing_effect : effects)
931 {
932 root["effects"].append(existing_effect->JsonValue());
933 }
934
935 if (reader)
936 root["reader"] = reader->JsonValue();
937 else
938 root["reader"] = Json::Value(Json::objectValue);
939
940 // return JsonValue
941 return root;
942}
943
944// Load JSON string into this object
945void Clip::SetJson(const std::string value) {
946
947 // Parse JSON string into JSON objects
948 try
949 {
950 const Json::Value root = openshot::stringToJson(value);
951 // Set all values that match
952 SetJsonValue(root);
953 }
954 catch (const std::exception& e)
955 {
956 // Error parsing JSON (or missing keys)
957 throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
958 }
959}
960
961// Load Json::Value into this object
962void Clip::SetJsonValue(const Json::Value root) {
963
964 // Set parent data
966
967 // Set data from Json (if key is found)
968 if (!root["parentObjectId"].isNull()){
969 parentObjectId = root["parentObjectId"].asString();
970 if (parentObjectId.size() > 0 && parentObjectId != ""){
971 AttachToObject(parentObjectId);
972 } else{
973 parentTrackedObject = nullptr;
974 parentClipObject = NULL;
975 }
976 }
977 if (!root["gravity"].isNull())
978 gravity = (GravityType) root["gravity"].asInt();
979 if (!root["scale"].isNull())
980 scale = (ScaleType) root["scale"].asInt();
981 if (!root["anchor"].isNull())
982 anchor = (AnchorType) root["anchor"].asInt();
983 if (!root["display"].isNull())
984 display = (FrameDisplayType) root["display"].asInt();
985 if (!root["mixing"].isNull())
986 mixing = (VolumeMixType) root["mixing"].asInt();
987 if (!root["waveform"].isNull())
988 waveform = root["waveform"].asBool();
989 if (!root["scale_x"].isNull())
990 scale_x.SetJsonValue(root["scale_x"]);
991 if (!root["scale_y"].isNull())
992 scale_y.SetJsonValue(root["scale_y"]);
993 if (!root["location_x"].isNull())
994 location_x.SetJsonValue(root["location_x"]);
995 if (!root["location_y"].isNull())
996 location_y.SetJsonValue(root["location_y"]);
997 if (!root["alpha"].isNull())
998 alpha.SetJsonValue(root["alpha"]);
999 if (!root["rotation"].isNull())
1000 rotation.SetJsonValue(root["rotation"]);
1001 if (!root["time"].isNull())
1002 time.SetJsonValue(root["time"]);
1003 if (!root["volume"].isNull())
1004 volume.SetJsonValue(root["volume"]);
1005 if (!root["wave_color"].isNull())
1006 wave_color.SetJsonValue(root["wave_color"]);
1007 if (!root["shear_x"].isNull())
1008 shear_x.SetJsonValue(root["shear_x"]);
1009 if (!root["shear_y"].isNull())
1010 shear_y.SetJsonValue(root["shear_y"]);
1011 if (!root["origin_x"].isNull())
1012 origin_x.SetJsonValue(root["origin_x"]);
1013 if (!root["origin_y"].isNull())
1014 origin_y.SetJsonValue(root["origin_y"]);
1015 if (!root["channel_filter"].isNull())
1016 channel_filter.SetJsonValue(root["channel_filter"]);
1017 if (!root["channel_mapping"].isNull())
1018 channel_mapping.SetJsonValue(root["channel_mapping"]);
1019 if (!root["has_audio"].isNull())
1020 has_audio.SetJsonValue(root["has_audio"]);
1021 if (!root["has_video"].isNull())
1022 has_video.SetJsonValue(root["has_video"]);
1023 if (!root["perspective_c1_x"].isNull())
1024 perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
1025 if (!root["perspective_c1_y"].isNull())
1026 perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
1027 if (!root["perspective_c2_x"].isNull())
1028 perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
1029 if (!root["perspective_c2_y"].isNull())
1030 perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
1031 if (!root["perspective_c3_x"].isNull())
1032 perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
1033 if (!root["perspective_c3_y"].isNull())
1034 perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
1035 if (!root["perspective_c4_x"].isNull())
1036 perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
1037 if (!root["perspective_c4_y"].isNull())
1038 perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
1039 if (!root["effects"].isNull()) {
1040
1041 // Clear existing effects
1042 effects.clear();
1043
1044 // loop through effects
1045 for (const auto existing_effect : root["effects"]) {
1046 // Create Effect
1047 EffectBase *e = NULL;
1048 if (!existing_effect["type"].isNull()) {
1049
1050 // Create instance of effect
1051 if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
1052
1053 // Load Json into Effect
1054 e->SetJsonValue(existing_effect);
1055
1056 // Add Effect to Timeline
1057 AddEffect(e);
1058 }
1059 }
1060 }
1061 }
1062 if (!root["reader"].isNull()) // does Json contain a reader?
1063 {
1064 if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
1065 {
1066 // Close previous reader (if any)
1067 bool already_open = false;
1068 if (reader)
1069 {
1070 // Track if reader was open
1071 already_open = reader->IsOpen();
1072
1073 // Close and delete existing allocated reader (if any)
1074 Reader(NULL);
1075 }
1076
1077 // Create new reader (and load properties)
1078 std::string type = root["reader"]["type"].asString();
1079
1080 if (type == "FFmpegReader") {
1081
1082 // Create new reader
1083 reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false);
1084 reader->SetJsonValue(root["reader"]);
1085
1086 } else if (type == "QtImageReader") {
1087
1088 // Create new reader
1089 reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false);
1090 reader->SetJsonValue(root["reader"]);
1091
1092#ifdef USE_IMAGEMAGICK
1093 } else if (type == "ImageReader") {
1094
1095 // Create new reader
1096 reader = new ImageReader(root["reader"]["path"].asString(), false);
1097 reader->SetJsonValue(root["reader"]);
1098
1099 } else if (type == "TextReader") {
1100
1101 // Create new reader
1102 reader = new TextReader();
1103 reader->SetJsonValue(root["reader"]);
1104#endif
1105
1106 } else if (type == "ChunkReader") {
1107
1108 // Create new reader
1109 reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
1110 reader->SetJsonValue(root["reader"]);
1111
1112 } else if (type == "DummyReader") {
1113
1114 // Create new reader
1115 reader = new openshot::DummyReader();
1116 reader->SetJsonValue(root["reader"]);
1117
1118 } else if (type == "Timeline") {
1119
1120 // Create new reader (always load from file again)
1121 // This prevents FrameMappers from being loaded on accident
1122 reader = new openshot::Timeline(root["reader"]["path"].asString(), true);
1123 }
1124
1125 // mark as managed reader and set parent
1126 if (reader) {
1127 reader->ParentClip(this);
1128 allocated_reader = reader;
1129 }
1130
1131 // Re-Open reader (if needed)
1132 if (already_open) {
1133 reader->Open();
1134 }
1135 }
1136 }
1137
1138 // Clear cache (it might have changed)
1139 final_cache.Clear();
1140}
1141
1142// Sort effects by order
1143void Clip::sort_effects()
1144{
1145 // sort clips
1146 effects.sort(CompareClipEffects());
1147}
1148
1149// Add an effect to the clip
1151{
1152 // Set parent clip pointer
1153 effect->ParentClip(this);
1154
1155 // Add effect to list
1156 effects.push_back(effect);
1157
1158 // Sort effects
1159 sort_effects();
1160
1161 // Get the parent timeline of this clip
1162 Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
1163
1164 if (parentTimeline)
1165 effect->ParentTimeline(parentTimeline);
1166
1167 #ifdef USE_OPENCV
1168 // Add Tracked Object to Timeline
1169 if (effect->info.has_tracked_object){
1170
1171 // Check if this clip has a parent timeline
1172 if (parentTimeline){
1173
1174 effect->ParentTimeline(parentTimeline);
1175
1176 // Iterate through effect's vector of Tracked Objects
1177 for (auto const& trackedObject : effect->trackedObjects){
1178
1179 // Cast the Tracked Object as TrackedObjectBBox
1180 std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1181
1182 // Set the Tracked Object's parent clip to this
1183 trackedObjectBBox->ParentClip(this);
1184
1185 // Add the Tracked Object to the timeline
1186 parentTimeline->AddTrackedObject(trackedObjectBBox);
1187 }
1188 }
1189 }
1190 #endif
1191
1192 // Clear cache (it might have changed)
1193 final_cache.Clear();
1194}
1195
1196// Remove an effect from the clip
1198{
1199 effects.remove(effect);
1200
1201 // Clear cache (it might have changed)
1202 final_cache.Clear();
1203}
1204
1205// Apply effects to the source frame (if any)
1206void Clip::apply_effects(std::shared_ptr<Frame> frame)
1207{
1208 // Find Effects at this position and layer
1209 for (auto effect : effects)
1210 {
1211 // Apply the effect to this frame
1212 frame = effect->GetFrame(frame, frame->number);
1213
1214 } // end effect loop
1215}
1216
1217// Compare 2 floating point numbers for equality
1218bool Clip::isEqual(double a, double b)
1219{
1220 return fabs(a - b) < 0.000001;
1221}
1222
1223// Apply keyframes to the source frame (if any)
1224void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<Frame> background_frame) {
1225 // Skip out if video was disabled or only an audio frame (no visualisation in use)
1226 if (!frame->has_image_data) {
1227 // Skip the rest of the image processing for performance reasons
1228 return;
1229 }
1230
1231 // Get image from clip
1232 std::shared_ptr<QImage> source_image = frame->GetImage();
1233 std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1234
1235 // Get transform from clip's keyframes
1236 QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1237
1238 // Debug output
1240 "Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)",
1241 "frame->number", frame->number,
1242 "background_canvas->width()", background_canvas->width(),
1243 "background_canvas->height()", background_canvas->height());
1244
1245 // Load timeline's new frame image into a QPainter
1246 QPainter painter(background_canvas.get());
1247 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
1248
1249 // Apply transform (translate, rotate, scale)
1250 painter.setTransform(transform);
1251
1252 // Composite a new layer onto the image
1253 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1254 painter.drawImage(0, 0, *source_image);
1255
1256 if (timeline) {
1257 Timeline *t = static_cast<Timeline *>(timeline);
1258
1259 // Draw frame #'s on top of image (if needed)
1260 if (display != FRAME_DISPLAY_NONE) {
1261 std::stringstream frame_number_str;
1262 switch (display) {
1263 case (FRAME_DISPLAY_NONE):
1264 // This is only here to prevent unused-enum warnings
1265 break;
1266
1267 case (FRAME_DISPLAY_CLIP):
1268 frame_number_str << frame->number;
1269 break;
1270
1272 frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number;
1273 break;
1274
1275 case (FRAME_DISPLAY_BOTH):
1276 frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
1277 break;
1278 }
1279
1280 // Draw frame number on top of image
1281 painter.setPen(QColor("#ffffff"));
1282 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1283 }
1284 }
1285 painter.end();
1286
1287 // Add new QImage to frame
1288 frame->AddImage(background_canvas);
1289}
1290
1291// Apply apply_waveform image to the source frame (if any)
1292void Clip::apply_waveform(std::shared_ptr<Frame> frame, std::shared_ptr<Frame> background_frame) {
1293
1294 if (!Waveform()) {
1295 // Exit if no waveform is needed
1296 return;
1297 }
1298
1299 // Get image from clip
1300 std::shared_ptr<QImage> source_image = frame->GetImage();
1301 std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1302
1303 // Debug output
1305 "Clip::apply_waveform (Generate Waveform Image)",
1306 "frame->number", frame->number,
1307 "Waveform()", Waveform(),
1308 "background_canvas->width()", background_canvas->width(),
1309 "background_canvas->height()", background_canvas->height());
1310
1311 // Get the color of the waveform
1312 int red = wave_color.red.GetInt(frame->number);
1313 int green = wave_color.green.GetInt(frame->number);
1314 int blue = wave_color.blue.GetInt(frame->number);
1315 int alpha = wave_color.alpha.GetInt(frame->number);
1316
1317 // Generate Waveform Dynamically (the size of the timeline)
1318 source_image = frame->GetWaveform(background_canvas->width(), background_canvas->height(), red, green, blue, alpha);
1319 frame->AddImage(source_image);
1320}
1321
1322// Apply keyframes to the source frame (if any)
1323QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
1324{
1325 // Get image from clip
1326 std::shared_ptr<QImage> source_image = frame->GetImage();
1327
1328 /* ALPHA & OPACITY */
1329 if (alpha.GetValue(frame->number) != 1.0)
1330 {
1331 float alpha_value = alpha.GetValue(frame->number);
1332
1333 // Get source image's pixels
1334 unsigned char *pixels = source_image->bits();
1335
1336 // Loop through pixels
1337 for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1338 {
1339 // Apply alpha to pixel values (since we use a premultiplied value, we must
1340 // multiply the alpha with all colors).
1341 pixels[byte_index + 0] *= alpha_value;
1342 pixels[byte_index + 1] *= alpha_value;
1343 pixels[byte_index + 2] *= alpha_value;
1344 pixels[byte_index + 3] *= alpha_value;
1345 }
1346
1347 // Debug output
1349 "Clip::get_transform (Set Alpha & Opacity)",
1350 "alpha_value", alpha_value,
1351 "frame->number", frame->number);
1352 }
1353
1354 /* RESIZE SOURCE IMAGE - based on scale type */
1355 QSize source_size = source_image->size();
1356
1357 // Apply stretch scale to correctly fit the bounding-box
1358 if (parentTrackedObject){
1360 }
1361
1362 switch (scale)
1363 {
1364 case (SCALE_FIT): {
1365 source_size.scale(width, height, Qt::KeepAspectRatio);
1366
1367 // Debug output
1369 "Clip::get_transform (Scale: SCALE_FIT)",
1370 "frame->number", frame->number,
1371 "source_width", source_size.width(),
1372 "source_height", source_size.height());
1373 break;
1374 }
1375 case (SCALE_STRETCH): {
1376 source_size.scale(width, height, Qt::IgnoreAspectRatio);
1377
1378 // Debug output
1380 "Clip::get_transform (Scale: SCALE_STRETCH)",
1381 "frame->number", frame->number,
1382 "source_width", source_size.width(),
1383 "source_height", source_size.height());
1384 break;
1385 }
1386 case (SCALE_CROP): {
1387 source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
1388
1389 // Debug output
1391 "Clip::get_transform (Scale: SCALE_CROP)",
1392 "frame->number", frame->number,
1393 "source_width", source_size.width(),
1394 "source_height", source_size.height());
1395 break;
1396 }
1397 case (SCALE_NONE): {
1398 // Image is already the original size (i.e. no scaling mode) relative
1399 // to the preview window size (i.e. timeline / preview ratio). No further
1400 // scaling is needed here.
1401 // Debug output
1403 "Clip::get_transform (Scale: SCALE_NONE)",
1404 "frame->number", frame->number,
1405 "source_width", source_size.width(),
1406 "source_height", source_size.height());
1407 break;
1408 }
1409 }
1410
1411 // Initialize parent object's properties (Clip or Tracked Object)
1412 float parentObject_location_x = 0.0;
1413 float parentObject_location_y = 0.0;
1414 float parentObject_scale_x = 1.0;
1415 float parentObject_scale_y = 1.0;
1416 float parentObject_shear_x = 0.0;
1417 float parentObject_shear_y = 0.0;
1418 float parentObject_rotation = 0.0;
1419
1420 // Get the parentClipObject properties
1421 if (parentClipObject){
1422
1423 // Convert Clip's frame position to Timeline's frame position
1424 long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
1425 long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
1426 double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1427
1428 // Get parent object's properties (Clip)
1429 parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
1430 parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
1431 parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
1432 parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
1433 parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
1434 parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
1435 parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
1436 }
1437
1438 // Get the parentTrackedObject properties
1439 if (parentTrackedObject){
1440 // Convert Clip's frame position to Timeline's frame position
1441 long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
1442 long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
1443 double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1444
1445 // Get parentTrackedObject's parent clip's properties
1446 std::map<std::string, float> trackedObjectParentClipProperties =
1447 parentTrackedObject->GetParentClipProperties(timeline_frame_number);
1448
1449 // Get the attached object's parent clip's properties
1450 if (!trackedObjectParentClipProperties.empty())
1451 {
1452 // Get parent object's properties (Tracked Object)
1453 float parentObject_frame_number = trackedObjectParentClipProperties["frame_number"];
1454
1455 // Access the parentTrackedObject's properties
1456 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
1457
1458 // Get the Tracked Object's properties and correct them by the clip's reference system
1459 parentObject_location_x = trackedObjectProperties["cx"] - 0.5 + trackedObjectParentClipProperties["location_x"];
1460 parentObject_location_y = trackedObjectProperties["cy"] - 0.5 + trackedObjectParentClipProperties["location_y"];
1461 parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
1462 parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
1463 parentObject_rotation = trackedObjectProperties["r"] + trackedObjectParentClipProperties["rotation"];
1464 }
1465 else
1466 {
1467 // Access the parentTrackedObject's properties
1468 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(timeline_frame_number);
1469
1470 // Get the Tracked Object's properties and correct them by the clip's reference system
1471 parentObject_location_x = trackedObjectProperties["cx"] - 0.5;
1472 parentObject_location_y = trackedObjectProperties["cy"] - 0.5;
1473 parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
1474 parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
1475 parentObject_rotation = trackedObjectProperties["r"];
1476 }
1477 }
1478
1479 /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
1480 float x = 0.0; // left
1481 float y = 0.0; // top
1482
1483 // Adjust size for scale x and scale y
1484 float sx = scale_x.GetValue(frame->number); // percentage X scale
1485 float sy = scale_y.GetValue(frame->number); // percentage Y scale
1486
1487 // Change clip's scale to parentObject's scale
1488 if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1489 sx*= parentObject_scale_x;
1490 sy*= parentObject_scale_y;
1491 }
1492
1493 float scaled_source_width = source_size.width() * sx;
1494 float scaled_source_height = source_size.height() * sy;
1495
1496 switch (gravity)
1497 {
1498 case (GRAVITY_TOP_LEFT):
1499 // This is only here to prevent unused-enum warnings
1500 break;
1501 case (GRAVITY_TOP):
1502 x = (width - scaled_source_width) / 2.0; // center
1503 break;
1504 case (GRAVITY_TOP_RIGHT):
1505 x = width - scaled_source_width; // right
1506 break;
1507 case (GRAVITY_LEFT):
1508 y = (height - scaled_source_height) / 2.0; // center
1509 break;
1510 case (GRAVITY_CENTER):
1511 x = (width - scaled_source_width) / 2.0; // center
1512 y = (height - scaled_source_height) / 2.0; // center
1513 break;
1514 case (GRAVITY_RIGHT):
1515 x = width - scaled_source_width; // right
1516 y = (height - scaled_source_height) / 2.0; // center
1517 break;
1518 case (GRAVITY_BOTTOM_LEFT):
1519 y = (height - scaled_source_height); // bottom
1520 break;
1521 case (GRAVITY_BOTTOM):
1522 x = (width - scaled_source_width) / 2.0; // center
1523 y = (height - scaled_source_height); // bottom
1524 break;
1525 case (GRAVITY_BOTTOM_RIGHT):
1526 x = width - scaled_source_width; // right
1527 y = (height - scaled_source_height); // bottom
1528 break;
1529 }
1530
1531 // Debug output
1533 "Clip::get_transform (Gravity)",
1534 "frame->number", frame->number,
1535 "source_clip->gravity", gravity,
1536 "scaled_source_width", scaled_source_width,
1537 "scaled_source_height", scaled_source_height);
1538
1539 QTransform transform;
1540
1541 /* LOCATION, ROTATION, AND SCALE */
1542 float r = rotation.GetValue(frame->number) + parentObject_rotation; // rotate in degrees
1543 x += (width * (location_x.GetValue(frame->number) + parentObject_location_x )); // move in percentage of final width
1544 y += (height * (location_y.GetValue(frame->number) + parentObject_location_y )); // move in percentage of final height
1545 float shear_x_value = shear_x.GetValue(frame->number) + parentObject_shear_x;
1546 float shear_y_value = shear_y.GetValue(frame->number) + parentObject_shear_y;
1547 float origin_x_value = origin_x.GetValue(frame->number);
1548 float origin_y_value = origin_y.GetValue(frame->number);
1549
1550 // Transform source image (if needed)
1552 "Clip::get_transform (Build QTransform - if needed)",
1553 "frame->number", frame->number,
1554 "x", x, "y", y,
1555 "r", r,
1556 "sx", sx, "sy", sy);
1557
1558 if (!isEqual(x, 0) || !isEqual(y, 0)) {
1559 // TRANSLATE/MOVE CLIP
1560 transform.translate(x, y);
1561 }
1562 if (!isEqual(r, 0) || !isEqual(shear_x_value, 0) || !isEqual(shear_y_value, 0)) {
1563 // ROTATE CLIP (around origin_x, origin_y)
1564 float origin_x_offset = (scaled_source_width * origin_x_value);
1565 float origin_y_offset = (scaled_source_height * origin_y_value);
1566 transform.translate(origin_x_offset, origin_y_offset);
1567 transform.rotate(r);
1568 transform.shear(shear_x_value, shear_y_value);
1569 transform.translate(-origin_x_offset,-origin_y_offset);
1570 }
1571 // SCALE CLIP (if needed)
1572 float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1573 float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1574 if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
1575 transform.scale(source_width_scale, source_height_scale);
1576 }
1577
1578 return transform;
1579}
1580
1581// Adjust frame number for Clip position and start (which can result in a different number)
1582int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1583
1584 // Get clip position from parent clip (if any)
1585 float position = 0.0;
1586 float start = 0.0;
1587 Clip *parent = static_cast<Clip *>(ParentClip());
1588 if (parent) {
1589 position = parent->Position();
1590 start = parent->Start();
1591 }
1592
1593 // Adjust start frame and position based on parent clip.
1594 // This ensures the same frame # is used by mapped readers and clips,
1595 // when calculating samples per frame.
1596 // Thus, this prevents gaps and mismatches in # of samples.
1597 int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1;
1598 int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1;
1599 int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1600
1601 return frame_number;
1602}
Header file for AudioResampler class.
Header file for ChunkReader class.
Header file for Clip class.
Header file for DummyReader class.
Header file for all Exception classes.
Header file for FFmpegReader class.
Header file for the FrameMapper class.
Header file for ImageReader class.
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
Header file for QtImageReader class.
Header file for TextReader class.
Header file for Timeline class.
Header file for ZeroMQ-based Logger class.
This class is used to resample audio data for many sequential frames.
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition CacheBase.cpp:30
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
void Clear()
Clear the cache of all frames.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition ChunkReader.h:79
float Start() const
Get start position (in seconds) of clip (trim start of video)
Definition ClipBase.h:88
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition ClipBase.h:38
float Duration() const
Get the length of this clip (in seconds)
Definition ClipBase.h:90
virtual float End() const
Get end position (in seconds) of clip (trim end of video)
Definition ClipBase.h:89
std::string Id() const
Get the Id of this clip object.
Definition ClipBase.h:85
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition ClipBase.cpp:64
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
Definition ClipBase.cpp:132
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition ClipBase.h:87
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition ClipBase.cpp:80
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
Definition ClipBase.h:41
float Position() const
Get position on timeline (in seconds)
Definition ClipBase.h:86
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
Definition ClipBase.h:91
std::string id
ID Property for all derived Clip and Effect classes.
Definition ClipBase.h:35
float position
The position on the timeline where this clip should start playing.
Definition ClipBase.h:36
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition ClipBase.h:39
std::string previous_properties
This string contains the previous JSON properties.
Definition ClipBase.h:40
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
Definition ClipBase.cpp:96
This class represents a clip (used to arrange readers on the timeline)
Definition Clip.h:89
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
Definition Clip.cpp:262
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition Clip.h:298
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
Definition Clip.h:301
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition Clip.h:306
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition Clip.h:325
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition Clip.h:167
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition Clip.h:169
void Open() override
Open the internal reader.
Definition Clip.cpp:318
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition Clip.h:305
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
Definition Clip.h:329
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition Clip.h:168
void init_reader_rotation()
Update default rotation from reader.
Definition Clip.cpp:115
Clip()
Default Constructor.
Definition Clip.cpp:135
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition Clip.h:319
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
Definition Clip.cpp:241
std::string Json() const override
Generate JSON string of this object.
Definition Clip.cpp:735
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
Definition Clip.cpp:470
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition Clip.cpp:962
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition Clip.h:302
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
Definition Clip.h:333
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition Clip.h:323
void init_reader_settings()
Init reader info details.
Definition Clip.cpp:101
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition Clip.h:320
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition Clip.cpp:889
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
Definition Clip.cpp:267
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
Definition Clip.h:276
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition Clip.h:326
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
Definition Clip.h:312
bool Waveform()
Get the waveform property of this clip.
Definition Clip.h:294
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition Clip.h:165
AudioLocation previous_location
Previous time-mapped audio location.
Definition Clip.h:95
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition Clip.h:324
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
Definition Clip.cpp:1150
void Close() override
Close the internal reader.
Definition Clip.cpp:339
virtual ~Clip()
Destructor.
Definition Clip.cpp:221
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition Clip.h:322
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition Clip.h:313
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition Clip.h:307
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition Clip.h:299
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
Definition Clip.cpp:354
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
Definition Clip.cpp:389
openshot::ReaderBase * Reader()
Get the current reader.
Definition Clip.cpp:308
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
Definition Clip.cpp:1197
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition Clip.h:330
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
Definition Clip.h:334
std::string PropertiesJSON(int64_t requested_frame) const override
Definition Clip.cpp:742
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition Clip.h:316
void init_settings()
Init default settings for a clip.
Definition Clip.cpp:36
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition Clip.h:321
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition Clip.h:166
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
Definition Clip.h:300
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
Definition Clip.h:308
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition Clip.h:92
void SetJson(const std::string value) override
Load JSON string into this object.
Definition Clip.cpp:945
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
Definition Clip.h:309
This class represents a color (used on the timeline and clips)
Definition Color.h:27
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition Color.h:32
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition Color.h:30
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition Color.h:31
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition Color.cpp:117
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition Color.h:33
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition Color.cpp:86
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
Definition DummyReader.h:86
This abstract class is the base class, used by all effects in libopenshot.
Definition EffectBase.h:53
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
EffectInfoStruct info
Information about the current effect.
Definition EffectBase.h:69
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
Definition EffectBase.h:66
This class returns a listing of all effects supported by libopenshot.
Definition EffectInfo.h:29
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition Fraction.cpp:35
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition Fraction.cpp:40
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
ReaderBase * Reader()
Get the current reader.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition Frame.cpp:480
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition ImageReader.h:56
Exception for invalid JSON.
Definition Exceptions.h:218
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition KeyFrame.h:53
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition KeyFrame.cpp:282
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition KeyFrame.cpp:372
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
Definition KeyFrame.cpp:392
int64_t GetLength() const
Definition KeyFrame.cpp:410
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
Definition KeyFrame.cpp:287
double GetValue(int64_t index) const
Get the value at a specific index.
Definition KeyFrame.cpp:258
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition KeyFrame.cpp:339
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
Definition KeyFrame.cpp:292
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition KeyFrame.cpp:417
Exception for frames that are out of bounds.
Definition Exceptions.h:301
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
This abstract class is the base class, used by all readers in libopenshot.
Definition ReaderBase.h:76
virtual bool IsOpen()=0
Determine if reader is open or closed.
virtual std::string Name()=0
Return the type name of the class.
openshot::ReaderInfo info
Information about the current media file.
Definition ReaderBase.h:88
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
virtual void Close()=0
Close the reader (and any resources it was consuming)
Exception when a reader is closed, and a frame is requested.
Definition Exceptions.h:364
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition TextReader.h:63
This class represents a timeline (used for building generic timeline implementations)
This class represents a timeline.
Definition Timeline.h:150
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Definition Timeline.cpp:223
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
Definition Timeline.cpp:241
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition Timeline.cpp:408
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer)
Apply global/timeline effects to the source frame (if any)
Definition Timeline.cpp:526
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition ZmqLogger.cpp:35
This namespace is the default namespace for all code in the openshot library.
Definition Compressor.h:29
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition Enums.h:45
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
Definition Enums.h:46
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
Definition ChunkReader.h:50
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition Enums.h:22
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
Definition Enums.h:23
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
Definition Enums.h:26
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
Definition Enums.h:25
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
Definition Enums.h:28
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
Definition Enums.h:29
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
Definition Enums.h:30
@ GRAVITY_TOP
Align clip to the top center of its parent.
Definition Enums.h:24
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
Definition Enums.h:31
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
Definition Enums.h:27
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition Enums.h:36
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition Enums.h:38
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition Enums.h:39
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition Enums.h:37
@ SCALE_NONE
Do not scale the clip.
Definition Enums.h:40
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition Enums.h:61
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition Enums.h:63
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
Definition Enums.h:62
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition Enums.h:64
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
Definition Enums.h:52
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
Definition Enums.h:54
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
Definition Enums.h:55
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
Definition Enums.h:56
@ FRAME_DISPLAY_NONE
Do not display the frame number.
Definition Enums.h:53
const Json::Value stringToJson(const std::string value)
Definition Json.cpp:16
This struct holds the associated video frame and starting sample # for an audio packet.
bool has_tracked_object
Determines if this effect track objects through the clip.
Definition EffectBase.h:42
float duration
Length of time (in seconds)
Definition ReaderBase.h:43
int width
The width of the video (in pixesl)
Definition ReaderBase.h:46
int channels
The number of audio channels used in the audio stream.
Definition ReaderBase.h:61
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition ReaderBase.h:48
int height
The height of the video (in pixels)
Definition ReaderBase.h:45
int64_t video_length
The number of frames in the video stream.
Definition ReaderBase.h:53
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition ReaderBase.h:65
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition ReaderBase.h:62
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition ReaderBase.h:60
This struct contains info about the current Timeline clip instance.
bool is_top_clip
Is clip on top (if overlapping another clip)