OpenShot Library | libopenshot  0.7.0
Clip.cpp
Go to the documentation of this file.
1 
9 // Copyright (c) 2008-2019 OpenShot Studios, LLC
10 //
11 // SPDX-License-Identifier: LGPL-3.0-or-later
12 
13 #include "Clip.h"
14 
15 #include "AudioResampler.h"
16 #include "Exceptions.h"
17 #include "FFmpegReader.h"
18 #include "FrameMapper.h"
19 #include "QtImageReader.h"
20 #include "ChunkReader.h"
21 #include "DummyReader.h"
22 #include "Timeline.h"
23 #include "ZmqLogger.h"
25 
26 #include <algorithm>
27 #include <cmath>
28 #include <sstream>
29 #include <QPainter>
30 
31 #ifdef USE_IMAGEMAGICK
32  #include "MagickUtilities.h"
33  #include "ImageReader.h"
34  #include "TextReader.h"
35 #endif
36 
37 #include <Qt>
38 
39 using namespace openshot;
40 
41 namespace {
42  struct CompositeChoice { const char* name; CompositeType value; };
43  const CompositeChoice composite_choices[] = {
44  {"Normal", COMPOSITE_SOURCE_OVER},
45 
46  // Darken group
47  {"Darken", COMPOSITE_DARKEN},
48  {"Multiply", COMPOSITE_MULTIPLY},
49  {"Color Burn", COMPOSITE_COLOR_BURN},
50 
51  // Lighten group
52  {"Lighten", COMPOSITE_LIGHTEN},
53  {"Screen", COMPOSITE_SCREEN},
54  {"Color Dodge", COMPOSITE_COLOR_DODGE},
55  {"Add", COMPOSITE_PLUS},
56 
57  // Contrast group
58  {"Overlay", COMPOSITE_OVERLAY},
59  {"Soft Light", COMPOSITE_SOFT_LIGHT},
60  {"Hard Light", COMPOSITE_HARD_LIGHT},
61 
62  // Compare
63  {"Difference", COMPOSITE_DIFFERENCE},
64  {"Exclusion", COMPOSITE_EXCLUSION},
65  };
66  const int composite_choices_count = sizeof(composite_choices)/sizeof(CompositeChoice);
67 }
68 
69 // Init default settings for a clip
71 {
72  // Init clip settings
73  Position(0.0);
74  Layer(0);
75  Start(0.0);
76  ClipBase::End(0.0);
78  scale = SCALE_FIT;
83  waveform = false;
85  reader_orientation_mode = ReaderOrientationMode::Reader;
87  parentObjectId = "";
88 
89  // Init scale curves
90  scale_x = Keyframe(1.0);
91  scale_y = Keyframe(1.0);
92 
93  // Init location curves
94  location_x = Keyframe(0.0);
95  location_y = Keyframe(0.0);
96 
97  // Init alpha
98  alpha = Keyframe(1.0);
99 
100  // Init time & volume
101  time = Keyframe(1.0);
102  volume = Keyframe(1.0);
103 
104  // Init audio waveform color
105  wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
106 
107  // Init shear and perspective curves
108  shear_x = Keyframe(0.0);
109  shear_y = Keyframe(0.0);
110  origin_x = Keyframe(0.5);
111  origin_y = Keyframe(0.5);
112  perspective_c1_x = Keyframe(-1.0);
113  perspective_c1_y = Keyframe(-1.0);
114  perspective_c2_x = Keyframe(-1.0);
115  perspective_c2_y = Keyframe(-1.0);
116  perspective_c3_x = Keyframe(-1.0);
117  perspective_c3_y = Keyframe(-1.0);
118  perspective_c4_x = Keyframe(-1.0);
119  perspective_c4_y = Keyframe(-1.0);
120 
121  // Init audio channel filter and mappings
122  channel_filter = Keyframe(-1.0);
123  channel_mapping = Keyframe(-1.0);
124 
125  // Init audio and video overrides
126  has_audio = Keyframe(-1.0);
127  has_video = Keyframe(-1.0);
128 
129  // Initialize the attached object and attached clip as null pointers
130  parentTrackedObject = nullptr;
131  parentClipObject = NULL;
132 
133  // Init reader info struct
135 }
136 
137 // Init reader info details
139  if (reader) {
140  // Init rotation (if any)
142 
143  // Initialize info struct
144  info = reader->info;
145 
146  // Init cache
148  }
149 }
150 
152  // Only apply metadata rotation if clip rotation has not been explicitly set.
153  if (rotation.GetCount() > 0 || !reader)
154  return;
155 
156  if (reader->ApplyOrientationMetadata()) {
157  rotation = Keyframe(0.0f);
158  return;
159  }
160 
161  const auto rotate_meta = reader->info.metadata.find("rotate");
162  if (rotate_meta == reader->info.metadata.end()) {
163  // Ensure rotation keyframes always start with a default 0° point.
164  rotation = Keyframe(0.0f);
165  return;
166  }
167 
168  float rotate_angle = 0.0f;
169  try {
170  rotate_angle = strtof(rotate_meta->second.c_str(), nullptr);
171  } catch (const std::exception& e) {
172  return; // ignore invalid metadata
173  }
174 
175  rotation = Keyframe(rotate_angle);
176 
177  // Do not overwrite user-authored scale curves.
178  auto has_default_scale = [](const Keyframe& kf) {
179  return kf.GetCount() == 1 && fabs(kf.GetPoint(0).co.Y - 1.0) < 0.00001;
180  };
181  if (!has_default_scale(scale_x) || !has_default_scale(scale_y))
182  return;
183 
184  // No need to adjust scaling when the metadata rotation is effectively zero.
185  if (fabs(rotate_angle) < 0.0001f)
186  return;
187 
188  float w = static_cast<float>(reader->info.width);
189  float h = static_cast<float>(reader->info.height);
190  if (w <= 0.0f || h <= 0.0f)
191  return;
192 
193  float rad = rotate_angle * static_cast<float>(M_PI) / 180.0f;
194 
195  float new_width = fabs(w * cos(rad)) + fabs(h * sin(rad));
196  float new_height = fabs(w * sin(rad)) + fabs(h * cos(rad));
197  if (new_width <= 0.0f || new_height <= 0.0f)
198  return;
199 
200  float uniform_scale = std::min(w / new_width, h / new_height);
201 
202  scale_x = Keyframe(uniform_scale);
203  scale_y = Keyframe(uniform_scale);
204 }
205 
206 // Default Constructor for a clip
207 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
208 {
209  // Init all default settings
210  init_settings();
211 }
212 
213 // Constructor with reader
214 Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
215 {
216  // Init all default settings
217  init_settings();
218 
219  // Open and Close the reader (to set the duration of the clip)
220  Open();
221  Close();
222 
223  // Update duration and set parent
224  if (reader) {
225  ClipBase::End(reader->info.duration);
226  reader->ParentClip(this);
227  // Init reader info struct
229  }
230 }
231 
232 // Constructor with filepath
233 Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
234 {
235  // Init all default settings
236  init_settings();
237  reader = CreateReader(path);
238 
239  // Update duration and set parent
240  if (reader) {
241  ClipBase::End(reader->info.duration);
242  reader->ParentClip(this);
243  allocated_reader = reader;
244  // Init reader info struct
246  }
247 }
248 
249 ReaderBase* Clip::CreateReader(std::string path, bool inspect_reader)
250 {
251  // Get file extension (and convert to lower case)
252  std::string ext = get_file_extension(path);
253  std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
254 
255  // Determine if common video formats (or image sequences)
256  if (ext=="avi" || ext=="flac" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
257  ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob" || ext=="gif" || path.find("%") != std::string::npos)
258  {
259  try
260  {
261  return new openshot::FFmpegReader(path, inspect_reader);
262  } catch(...) { }
263  }
264  if (ext=="osp")
265  {
266  try
267  {
268  return new openshot::Timeline(path, true);
269  } catch(...) { }
270  }
271 
272  // If no video found, try each reader
273  try
274  {
275  return new openshot::QtImageReader(path, inspect_reader);
276  } catch(...) {
277  try
278  {
279  return new openshot::FFmpegReader(path, inspect_reader);
280  } catch(...) { }
281  }
282 
283  return NULL;
284 }
285 
286 // Destructor
288 {
289  // Delete the reader if clip created it
290  if (allocated_reader) {
291  delete allocated_reader;
292  allocated_reader = NULL;
293  reader = NULL;
294  }
295 
296  // Close the resampler
297  if (resampler) {
298  delete resampler;
299  resampler = NULL;
300  }
301 
302  // Close clip
303  Close();
304 }
305 
306 // Attach clip to bounding box
307 void Clip::AttachToObject(std::string object_id)
308 {
309  // Search for the tracked object on the timeline
310  Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
311 
312  if (parentTimeline) {
313  // Create a smart pointer to the tracked object from the timeline
314  std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->GetTrackedObject(object_id);
315  Clip* clipObject = parentTimeline->GetClip(object_id);
316 
317  // Check for valid tracked object
318  if (trackedObject){
319  SetAttachedObject(trackedObject);
320  parentClipObject = NULL;
321  }
322  else if (clipObject) {
323  SetAttachedClip(clipObject);
324  parentTrackedObject = nullptr;
325  }
326  }
327 }
328 
329 // Set the pointer to the trackedObject this clip is attached to
330 void Clip::SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
331  parentTrackedObject = trackedObject;
332 }
333 
334 // Set the pointer to the clip this clip is attached to
335 void Clip::SetAttachedClip(Clip* clipObject){
336  parentClipObject = clipObject;
337 }
338 
340 void Clip::Reader(ReaderBase* new_reader)
341 {
342  // Delete previously allocated reader (if not related to new reader)
343  // FrameMappers that point to the same allocated reader are ignored
344  bool is_same_reader = false;
345  if (new_reader && allocated_reader) {
346  if (new_reader->Name() == "FrameMapper") {
347  // Determine if FrameMapper is pointing at the same allocated ready
348  FrameMapper* clip_mapped_reader = static_cast<FrameMapper*>(new_reader);
349  if (allocated_reader == clip_mapped_reader->Reader()) {
350  is_same_reader = true;
351  }
352  }
353  }
354  // Clear existing allocated reader (if different)
355  if (allocated_reader && !is_same_reader) {
356  reader->Close();
357  allocated_reader->Close();
358  delete allocated_reader;
359  reader = NULL;
360  allocated_reader = NULL;
361  }
362 
363  // set reader pointer
364  reader = new_reader;
365 
366  // set parent
367  if (reader) {
368  reader->ApplyOrientationMetadata(reader_orientation_mode == ReaderOrientationMode::Reader);
369  reader->ParentClip(this);
370 
371  // Init reader info struct
373  }
374 }
375 
378 {
379  if (reader)
380  return reader;
381  else
382  // Throw error if reader not initialized
383  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
384 }
385 
386 // Open the internal reader
388 {
389  if (reader)
390  {
391  // Open the reader
392  reader->Open();
393  is_open = true;
394 
395  // Copy Reader info to Clip
396  info = reader->info;
397 
398  // Set some clip properties from the file reader
399  if (end == 0.0)
400  ClipBase::End(reader->info.duration);
401  }
402  else
403  // Throw error if reader not initialized
404  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
405 }
406 
407 // Close the internal reader
409 {
410  if (is_open && reader) {
411  ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
412 
413  // Close the reader
414  reader->Close();
415  }
416 
417  // Clear cache
418  final_cache.Clear();
419  is_open = false;
420 }
421 
422 // Get end position of clip (trim end of video), which can be affected by the time curve.
423 float Clip::End() const
424 {
425  // if a time curve is present, use its length
426  if (time.GetCount() > 1)
427  {
428  // Determine the FPS fo this clip
429  float fps = 24.0;
430  if (reader)
431  // file reader
432  fps = reader->info.fps.ToFloat();
433  else
434  // Throw error if reader not initialized
435  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
436 
437  return float(time.GetLength()) / fps;
438  }
439  else
440  // just use the duration (as detected by the reader)
441  return end;
442 }
443 
444 // Override End() position
445 void Clip::End(float value) {
446  ClipBase::End(value);
447 }
448 
449 // Set associated Timeline pointer
451  timeline = new_timeline;
452 
453  // Clear cache (it might have changed)
454  final_cache.Clear();
455 }
456 
457 // Create an openshot::Frame object for a specific frame number of this reader.
458 std::shared_ptr<Frame> Clip::GetFrame(int64_t clip_frame_number)
459 {
460  // Call override of GetFrame
461  return GetFrame(NULL, clip_frame_number, NULL);
462 }
463 
464 // Create an openshot::Frame object for a specific frame number of this reader.
465 // NOTE: background_frame is ignored in this method (this method is only used by Effect classes)
466 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
467 {
468  // Call override of GetFrame
469  return GetFrame(background_frame, clip_frame_number, NULL);
470 }
471 
472 // Use an existing openshot::Frame object and draw this Clip's frame onto it
473 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number, openshot::TimelineInfoStruct* options)
474 {
475  // Check for open reader (or throw exception)
476  if (!is_open)
477  throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
478 
479  if (reader)
480  {
481  // Get frame object
482  std::shared_ptr<Frame> frame = NULL;
483 
484  // Generate clip frame
485  frame = GetOrCreateFrame(clip_frame_number);
486 
487  // Get frame size and frame #
488  int64_t timeline_frame_number = clip_frame_number;
489  QSize timeline_size(frame->GetWidth(), frame->GetHeight());
490  if (background_frame) {
491  // If a background frame is provided, use it instead
492  timeline_frame_number = background_frame->number;
493  timeline_size.setWidth(background_frame->GetWidth());
494  timeline_size.setHeight(background_frame->GetHeight());
495  }
496 
497  // Get time mapped frame object (used to increase speed, change direction, etc...)
498  apply_timemapping(frame);
499 
500  // Apply waveform image (if any)
501  apply_waveform(frame, timeline_size);
502 
503  // Apply effects BEFORE applying keyframes (if any local or global effects are used)
504  apply_effects(frame, timeline_frame_number, options, true);
505 
506  // Apply keyframe / transforms to current clip image
507  apply_keyframes(frame, timeline_size);
508 
509  // Apply effects AFTER applying keyframes (if any local or global effects are used)
510  apply_effects(frame, timeline_frame_number, options, false);
511 
512  // Timeline composition can paint directly into the timeline-owned background
513  // without mutating the cached clip frame.
514  if (options) {
515  if (!background_frame) {
516  background_frame = std::make_shared<Frame>(frame->number, frame->GetWidth(), frame->GetHeight(),
517  "#00000000", frame->GetAudioSamplesCount(),
518  frame->GetAudioChannelsCount());
519  }
520  apply_background(frame, background_frame, false);
521  return frame;
522  }
523 
524  // No background: return the frame directly.
525  if (!background_frame) {
526  return frame;
527  }
528 
529  // Always composite on a copy so cached frame pixels remain immutable.
530  auto output = std::make_shared<Frame>(*frame.get());
531  apply_background(output, background_frame, true);
532  return output;
533  }
534  else
535  // Throw error if reader not initialized
536  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
537 }
538 
539 // Look up an effect by ID
540 openshot::EffectBase* Clip::GetEffect(const std::string& id)
541 {
542  // Find the matching effect (if any)
543  for (const auto& effect : effects) {
544  if (effect->Id() == id) {
545  return effect;
546  }
547  }
548  return nullptr;
549 }
550 
551 // Return the associated ParentClip (if any)
553  if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
554  // Attach parent clip OR object to this clip
555  AttachToObject(parentObjectId);
556  }
557  return parentClipObject;
558 }
559 
560 // Return the associated Parent Tracked Object (if any)
561 std::shared_ptr<openshot::TrackedObjectBase> Clip::GetParentTrackedObject() {
562  if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
563  // Attach parent clip OR object to this clip
564  AttachToObject(parentObjectId);
565  }
566  return parentTrackedObject;
567 }
568 
569 // Get file extension
570 std::string Clip::get_file_extension(std::string path)
571 {
572  // Return last part of path safely (handle filenames without a dot)
573  const auto dot_pos = path.find_last_of('.');
574  if (dot_pos == std::string::npos || dot_pos + 1 >= path.size()) {
575  return std::string();
576  }
577 
578  return path.substr(dot_pos + 1);
579 }
580 
581 // Adjust the audio and image of a time mapped frame
582 void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
583 {
584  // Check for valid reader
585  if (!reader)
586  // Throw error if reader not initialized
587  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
588 
589  // Check for a valid time map curve
590  if (time.GetLength() > 1)
591  {
592  const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
593 
594  int64_t clip_frame_number = frame->number;
595  int64_t new_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
596 
597  // create buffer
598  juce::AudioBuffer<float> *source_samples = nullptr;
599 
600  // Get delta (difference from this frame to the next time mapped frame: Y value)
601  double delta = time.GetDelta(clip_frame_number + 1);
602  const bool prev_is_increasing = time.IsIncreasing(clip_frame_number);
603  const bool is_increasing = time.IsIncreasing(clip_frame_number + 1);
604 
605  // Determine length of source audio (in samples)
606  // A delta of 1.0 == normal expected samples
607  // A delta of 0.5 == 50% of normal expected samples
608  // A delta of 2.0 == 200% of normal expected samples
609  int target_sample_count = Frame::GetSamplesPerFrame(adjust_timeline_framenumber(clip_frame_number), Reader()->info.fps,
611  Reader()->info.channels);
612  int source_sample_count = round(target_sample_count * fabs(delta));
613 
614  // Determine starting audio location
615  AudioLocation location;
616  if (previous_location.frame == 0 || abs(new_frame_number - previous_location.frame) > 2 || prev_is_increasing != is_increasing) {
617  // No previous location OR gap detected
618  location.frame = new_frame_number;
619  location.sample_start = 0;
620 
621  // Create / Reset resampler
622  // We don't want to interpolate between unrelated audio data
623  if (resampler) {
624  delete resampler;
625  resampler = nullptr;
626  }
627  // Init resampler with # channels from Reader (should match the timeline)
628  resampler = new AudioResampler(Reader()->info.channels);
629 
630  // Allocate buffer of silence to initialize some data inside the resampler
631  // To prevent it from becoming input limited
632  juce::AudioBuffer<float> init_samples(Reader()->info.channels, 64);
633  init_samples.clear();
634  resampler->SetBuffer(&init_samples, 1.0);
635  resampler->GetResampledBuffer();
636 
637  } else {
638  // Use previous location
639  location = previous_location;
640  }
641 
642  if (source_sample_count <= 0) {
643  // Add silence and bail (we don't need any samples)
644  frame->AddAudioSilence(target_sample_count);
645  return;
646  }
647 
648  // Allocate a new sample buffer for these delta frames
649  source_samples = new juce::AudioBuffer<float>(Reader()->info.channels, source_sample_count);
650  source_samples->clear();
651 
652  // Determine ending audio location
653  int remaining_samples = source_sample_count;
654  int source_pos = 0;
655  while (remaining_samples > 0) {
656  std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.frame, false);
657  int frame_sample_count = source_frame->GetAudioSamplesCount() - location.sample_start;
658 
659  // Inform FrameMapper of the direction for THIS mapper frame
660  if (auto *fm = dynamic_cast<FrameMapper*>(reader)) {
661  fm->SetDirectionHint(is_increasing);
662  }
663  source_frame->SetAudioDirection(is_increasing);
664 
665  if (frame_sample_count == 0) {
666  // No samples found in source frame (fill with silence)
667  if (is_increasing) {
668  location.frame++;
669  } else {
670  location.frame--;
671  }
672  location.sample_start = 0;
673  break;
674  }
675  if (remaining_samples - frame_sample_count >= 0) {
676  // Use all frame samples & increment location
677  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
678  source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, frame_sample_count, 1.0f);
679  }
680  if (is_increasing) {
681  location.frame++;
682  } else {
683  location.frame--;
684  }
685  location.sample_start = 0;
686  remaining_samples -= frame_sample_count;
687  source_pos += frame_sample_count;
688 
689  } else {
690  // Use just what is needed (and reverse samples)
691  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
692  source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, remaining_samples, 1.0f);
693  }
694  location.sample_start += remaining_samples;
695  remaining_samples = 0;
696  source_pos += remaining_samples;
697  }
698 
699  }
700 
701  // Resize audio for current frame object + fill with silence
702  // We are fixing to clobber this with actual audio data (possibly resampled)
703  frame->AddAudioSilence(target_sample_count);
704 
705  if (source_sample_count != target_sample_count) {
706  // Resample audio (if needed)
707  double resample_ratio = double(source_sample_count) / double(target_sample_count);
708  resampler->SetBuffer(source_samples, resample_ratio);
709 
710  // Resample the data
711  juce::AudioBuffer<float> *resampled_buffer = resampler->GetResampledBuffer();
712 
713  // Fill the frame with resampled data
714  for (int channel = 0; channel < Reader()->info.channels; channel++) {
715  // Add new (slower) samples, to the frame object
716  frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
717  }
718  } else {
719  // Fill the frame
720  for (int channel = 0; channel < Reader()->info.channels; channel++) {
721  // Add new (slower) samples, to the frame object
722  frame->AddAudio(true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
723  }
724  }
725 
726  // Clean up
727  delete source_samples;
728 
729  // Set previous location
730  previous_location = location;
731  }
732 }
733 
734 // Adjust frame number minimum value
735 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
736 {
737  // Never return a frame number 0 or below
738  if (frame_number < 1)
739  return 1;
740  else
741  return frame_number;
742 
743 }
744 
745 // Get or generate a blank frame
746 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number, bool enable_time)
747 {
748  try {
749  // Init to requested frame
750  int64_t clip_frame_number = adjust_frame_number_minimum(number);
751  bool is_increasing = true;
752 
753  // Adjust for time-mapping (if any)
754  if (enable_time && time.GetLength() > 1) {
755  is_increasing = time.IsIncreasing(clip_frame_number + 1);
756  const int64_t time_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
757  if (auto *fm = dynamic_cast<FrameMapper*>(reader)) {
758  // Inform FrameMapper which direction this mapper frame is being requested
759  fm->SetDirectionHint(is_increasing);
760  }
761  clip_frame_number = time_frame_number;
762  }
763 
764  // Debug output
766  "Clip::GetOrCreateFrame (from reader)",
767  "number", number, "clip_frame_number", clip_frame_number);
768 
769  // Attempt to get a frame (but this could fail if a reader has just been closed)
770  auto reader_frame = reader->GetFrame(clip_frame_number);
771  if (reader_frame) {
772  // Override frame # (due to time-mapping might change it)
773  reader_frame->number = number;
774  reader_frame->SetAudioDirection(is_increasing);
775 
776  // Return real frame
777  // Create a new copy of reader frame
778  // This allows a clip to modify the pixels and audio of this frame without
779  // changing the underlying reader's frame data
780  auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
781  if (has_video.GetInt(number) == 0) {
782  // No video, so add transparent pixels
783  reader_copy->AddColor(QColor(Qt::transparent));
784  }
785  if (has_audio.GetInt(number) == 0 || number > reader->info.video_length) {
786  // No audio, so include silence (also, mute audio if past end of reader)
787  reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
788  }
789  return reader_copy;
790  }
791 
792  } catch (const ReaderClosed & e) {
793  // ...
794  } catch (const OutOfBoundsFrame & e) {
795  // ...
796  }
797 
798  // Estimate # of samples needed for this frame
799  int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
800 
801  // Debug output
803  "Clip::GetOrCreateFrame (create blank)",
804  "number", number,
805  "estimated_samples_in_frame", estimated_samples_in_frame);
806 
807  // Create blank frame
808  auto new_frame = std::make_shared<Frame>(
809  number, reader->info.width, reader->info.height,
810  "#000000", estimated_samples_in_frame, reader->info.channels);
811  new_frame->SampleRate(reader->info.sample_rate);
812  new_frame->ChannelsLayout(reader->info.channel_layout);
813  new_frame->AddAudioSilence(estimated_samples_in_frame);
814  return new_frame;
815 }
816 
817 // Generate JSON string of this object
818 std::string Clip::Json() const {
819 
820  // Return formatted string
821  return JsonValue().toStyledString();
822 }
823 
824 // Get all properties for a specific frame
825 std::string Clip::PropertiesJSON(int64_t requested_frame) const {
826 
827  // Generate JSON properties list
828  Json::Value root;
829  root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
830  root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
831  root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
832  root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
833  root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
834  root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
835  root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
836  root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
837  root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
838  root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
839  root["composite"] = add_property_json("Composite", composite, "int", "", NULL, 0, composite_choices_count - 1, false, requested_frame);
840  root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
841  root["waveform_mode"] = add_property_json("Waveform Mode", waveform_mode, "int", "", NULL, 0, AUDIO_VISUALIZATION_RADIAL_BARS, false, requested_frame);
842  root["parentObjectId"] = add_property_json("Parent", 0.0, "string", parentObjectId, NULL, -1, -1, false, requested_frame);
843 
844  // Add gravity choices (dropdown style)
845  root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
846  root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
847  root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
848  root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
849  root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
850  root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
851  root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
852  root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
853  root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
854 
855  // Add scale choices (dropdown style)
856  root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
857  root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
858  root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
859  root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
860 
861  // Add frame number display choices (dropdown style)
862  root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
863  root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
864  root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
865  root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
866 
867  // Add volume mixing choices (dropdown style)
868  root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
869  root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
870  root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
871 
872  // Add composite choices (dropdown style)
873  for (int i = 0; i < composite_choices_count; ++i)
874  root["composite"]["choices"].append(add_property_choice_json(composite_choices[i].name, composite_choices[i].value, composite));
875 
876  // Add waveform choices (dropdown style)
877  root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
878  root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
879 
880  // Add waveform mode choices (dropdown style)
881  root["waveform_mode"]["choices"].append(add_property_choice_json("Waveform", AUDIO_VISUALIZATION_WAVEFORM, waveform_mode));
882  root["waveform_mode"]["choices"].append(add_property_choice_json("Filled Waveform", AUDIO_VISUALIZATION_FILLED_WAVEFORM, waveform_mode));
883  root["waveform_mode"]["choices"].append(add_property_choice_json("Bars", AUDIO_VISUALIZATION_BARS, waveform_mode));
884  root["waveform_mode"]["choices"].append(add_property_choice_json("Radial", AUDIO_VISUALIZATION_RADIAL, waveform_mode));
885  root["waveform_mode"]["choices"].append(add_property_choice_json("Radial Bars", AUDIO_VISUALIZATION_RADIAL_BARS, waveform_mode));
886  root["waveform_mode"]["choices"].append(add_property_choice_json("Spectrum", AUDIO_VISUALIZATION_SPECTRUM, waveform_mode));
887  root["waveform_mode"]["choices"].append(add_property_choice_json("Phase Scope", AUDIO_VISUALIZATION_PHASE_SCOPE, waveform_mode));
888  root["waveform_mode"]["choices"].append(add_property_choice_json("Particles", AUDIO_VISUALIZATION_PARTICLES, waveform_mode));
889  root["waveform_mode"]["choices"].append(add_property_choice_json("VU Meter", AUDIO_VISUALIZATION_VU_METER, waveform_mode));
890 
891  // Add the parentClipObject's properties
892  if (parentClipObject)
893  {
894  // Convert Clip's frame position to Timeline's frame position
895  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
896  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
897  double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
898 
899  // Correct the parent Clip Object properties by the clip's reference system
900  float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
901  float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
902  float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
903  float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
904  float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
905  float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
906  float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
907 
908  // Add the parent Clip Object properties to JSON
909  root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
910  root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
911  root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
912  root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
913  root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
914  root["shear_x"] = add_property_json("Shear X", parentObject_shear_x, "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
915  root["shear_y"] = add_property_json("Shear Y", parentObject_shear_y, "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
916  }
917  else
918  {
919  // Add this own clip's properties to JSON
920  root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
921  root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
922  root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
923  root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
924  root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
925  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
926  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
927  }
928 
929  // Keyframes
930  root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
931  root["origin_x"] = add_property_json("Origin X", origin_x.GetValue(requested_frame), "float", "", &origin_x, 0.0, 1.0, false, requested_frame);
932  root["origin_y"] = add_property_json("Origin Y", origin_y.GetValue(requested_frame), "float", "", &origin_y, 0.0, 1.0, false, requested_frame);
933  root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
934  root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
935  root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
936  root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
937  root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
938  root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
939 
940  // Add enable audio/video choices (dropdown style)
941  root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
942  root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
943  root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
944  root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
945  root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
946  root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
947 
948  root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
949  root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
950  root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
951  root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
952  root["wave_color"]["alpha"] = add_property_json("Alpha", wave_color.alpha.GetValue(requested_frame), "float", "", &wave_color.alpha, 0, 255, false, requested_frame);
953 
954  // Return formatted string
955  return root.toStyledString();
956 }
957 
958 // Generate Json::Value for this object
959 Json::Value Clip::JsonValue() const {
960 
961  // Create root json object
962  Json::Value root = ClipBase::JsonValue(); // get parent properties
963  root["parentObjectId"] = parentObjectId;
964  root["gravity"] = gravity;
965  root["scale"] = scale;
966  root["anchor"] = anchor;
967  root["display"] = display;
968  root["mixing"] = mixing;
969  root["composite"] = composite;
970  root["waveform"] = waveform;
971  root["waveform_mode"] = waveform_mode;
972  switch (reader_orientation_mode) {
973  case ReaderOrientationMode::LegacyClipTransform:
974  root["reader_orientation_mode"] = "legacy_clip_transform";
975  break;
976  case ReaderOrientationMode::Reader:
977  default:
978  root["reader_orientation_mode"] = "reader";
979  break;
980  }
981  root["scale_x"] = scale_x.JsonValue();
982  root["scale_y"] = scale_y.JsonValue();
983  root["location_x"] = location_x.JsonValue();
984  root["location_y"] = location_y.JsonValue();
985  root["alpha"] = alpha.JsonValue();
986  root["rotation"] = rotation.JsonValue();
987  root["time"] = time.JsonValue();
988  root["volume"] = volume.JsonValue();
989  root["wave_color"] = wave_color.JsonValue();
990  root["shear_x"] = shear_x.JsonValue();
991  root["shear_y"] = shear_y.JsonValue();
992  root["origin_x"] = origin_x.JsonValue();
993  root["origin_y"] = origin_y.JsonValue();
994  root["channel_filter"] = channel_filter.JsonValue();
995  root["channel_mapping"] = channel_mapping.JsonValue();
996  root["has_audio"] = has_audio.JsonValue();
997  root["has_video"] = has_video.JsonValue();
998  root["perspective_c1_x"] = perspective_c1_x.JsonValue();
999  root["perspective_c1_y"] = perspective_c1_y.JsonValue();
1000  root["perspective_c2_x"] = perspective_c2_x.JsonValue();
1001  root["perspective_c2_y"] = perspective_c2_y.JsonValue();
1002  root["perspective_c3_x"] = perspective_c3_x.JsonValue();
1003  root["perspective_c3_y"] = perspective_c3_y.JsonValue();
1004  root["perspective_c4_x"] = perspective_c4_x.JsonValue();
1005  root["perspective_c4_y"] = perspective_c4_y.JsonValue();
1006 
1007  // Add array of effects
1008  root["effects"] = Json::Value(Json::arrayValue);
1009 
1010  // loop through effects
1011  for (auto existing_effect : effects)
1012  {
1013  root["effects"].append(existing_effect->JsonValue());
1014  }
1015 
1016  if (reader)
1017  root["reader"] = reader->JsonValue();
1018  else
1019  root["reader"] = Json::Value(Json::objectValue);
1020 
1021  // return JsonValue
1022  return root;
1023 }
1024 
1025 // Load JSON string into this object
1026 void Clip::SetJson(const std::string value) {
1027 
1028  // Parse JSON string into JSON objects
1029  try
1030  {
1031  const Json::Value root = openshot::stringToJson(value);
1032  // Set all values that match
1033  SetJsonValue(root);
1034  }
1035  catch (const std::exception& e)
1036  {
1037  // Error parsing JSON (or missing keys)
1038  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1039  }
1040 }
1041 
1042 // Load Json::Value into this object
1043 void Clip::SetJsonValue(const Json::Value root) {
1044  auto ensure_default_keyframe = [](Keyframe& kf, double default_value) {
1045  if (kf.GetCount() == 0) {
1046  kf = Keyframe(default_value);
1047  }
1048  };
1049 
1050  // Set parent data
1051  ClipBase::SetJsonValue(root);
1052 
1053  // Older project files predate reader-applied orientation metadata and stored
1054  // phone/camera rotation as ordinary clip rotation/scale keyframes.
1055  if (root["reader_orientation_mode"].isNull()) {
1056  reader_orientation_mode = ReaderOrientationMode::LegacyClipTransform;
1057  } else {
1058  const std::string mode = root["reader_orientation_mode"].asString();
1059  if (mode == "legacy_clip_transform") {
1060  reader_orientation_mode = ReaderOrientationMode::LegacyClipTransform;
1061  } else {
1062  reader_orientation_mode = ReaderOrientationMode::Reader;
1063  }
1064  }
1065 
1066  // Set data from Json (if key is found)
1067  if (!root["parentObjectId"].isNull()){
1068  parentObjectId = root["parentObjectId"].asString();
1069  if (parentObjectId.size() > 0 && parentObjectId != ""){
1070  AttachToObject(parentObjectId);
1071  } else{
1072  parentTrackedObject = nullptr;
1073  parentClipObject = NULL;
1074  }
1075  }
1076  if (!root["gravity"].isNull())
1077  gravity = (GravityType) root["gravity"].asInt();
1078  if (!root["scale"].isNull())
1079  scale = (ScaleType) root["scale"].asInt();
1080  if (!root["anchor"].isNull())
1081  anchor = (AnchorType) root["anchor"].asInt();
1082  if (!root["display"].isNull())
1083  display = (FrameDisplayType) root["display"].asInt();
1084  if (!root["mixing"].isNull())
1085  mixing = (VolumeMixType) root["mixing"].asInt();
1086  if (!root["composite"].isNull())
1087  composite = (CompositeType) root["composite"].asInt();
1088  if (!root["waveform"].isNull())
1089  waveform = root["waveform"].asBool();
1090  if (!root["waveform_mode"].isNull())
1091  waveform_mode = root["waveform_mode"].asInt();
1092  if (!root["scale_x"].isNull())
1093  scale_x.SetJsonValue(root["scale_x"]);
1094  if (!root["scale_y"].isNull())
1095  scale_y.SetJsonValue(root["scale_y"]);
1096  if (!root["location_x"].isNull())
1097  location_x.SetJsonValue(root["location_x"]);
1098  if (!root["location_y"].isNull())
1099  location_y.SetJsonValue(root["location_y"]);
1100  if (!root["alpha"].isNull())
1101  alpha.SetJsonValue(root["alpha"]);
1102  if (!root["rotation"].isNull())
1103  rotation.SetJsonValue(root["rotation"]);
1104  if (!root["time"].isNull())
1105  time.SetJsonValue(root["time"]);
1106  if (!root["volume"].isNull())
1107  volume.SetJsonValue(root["volume"]);
1108  if (!root["wave_color"].isNull())
1109  wave_color.SetJsonValue(root["wave_color"]);
1110  if (!root["shear_x"].isNull())
1111  shear_x.SetJsonValue(root["shear_x"]);
1112  if (!root["shear_y"].isNull())
1113  shear_y.SetJsonValue(root["shear_y"]);
1114  if (!root["origin_x"].isNull())
1115  origin_x.SetJsonValue(root["origin_x"]);
1116  if (!root["origin_y"].isNull())
1117  origin_y.SetJsonValue(root["origin_y"]);
1118  if (!root["channel_filter"].isNull())
1119  channel_filter.SetJsonValue(root["channel_filter"]);
1120  if (!root["channel_mapping"].isNull())
1121  channel_mapping.SetJsonValue(root["channel_mapping"]);
1122  if (!root["has_audio"].isNull())
1123  has_audio.SetJsonValue(root["has_audio"]);
1124  if (!root["has_video"].isNull())
1125  has_video.SetJsonValue(root["has_video"]);
1126  if (!root["perspective_c1_x"].isNull())
1127  perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
1128  if (!root["perspective_c1_y"].isNull())
1129  perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
1130  if (!root["perspective_c2_x"].isNull())
1131  perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
1132  if (!root["perspective_c2_y"].isNull())
1133  perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
1134  if (!root["perspective_c3_x"].isNull())
1135  perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
1136  if (!root["perspective_c3_y"].isNull())
1137  perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
1138  if (!root["perspective_c4_x"].isNull())
1139  perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
1140  if (!root["perspective_c4_y"].isNull())
1141  perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
1142 
1143  // Core clip transforms should never remain empty after load. Empty JSON
1144  // point arrays can be produced by editing flows that remove every keyframe.
1145  ensure_default_keyframe(scale_x, 1.0);
1146  ensure_default_keyframe(scale_y, 1.0);
1147  ensure_default_keyframe(location_x, 0.0);
1148  ensure_default_keyframe(location_y, 0.0);
1149  ensure_default_keyframe(origin_x, 0.5);
1150  ensure_default_keyframe(origin_y, 0.5);
1151  ensure_default_keyframe(rotation, 0.0);
1152  if (!root["effects"].isNull()) {
1153 
1154  // Clear existing effects
1155  effects.clear();
1156 
1157  // loop through effects
1158  for (const auto existing_effect : root["effects"]) {
1159  // Skip NULL nodes
1160  if (existing_effect.isNull()) {
1161  continue;
1162  }
1163 
1164  // Create Effect
1165  EffectBase *e = NULL;
1166  if (!existing_effect["type"].isNull()) {
1167 
1168  // Create instance of effect
1169  if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
1170 
1171  // Load Json into Effect
1172  e->SetJsonValue(existing_effect);
1173 
1174  // Add Effect to Timeline
1175  AddEffect(e);
1176  }
1177  }
1178  }
1179  }
1180  if (!root["reader"].isNull()) // does Json contain a reader?
1181  {
1182  if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
1183  {
1184  // Close previous reader (if any)
1185  bool already_open = false;
1186  if (reader)
1187  {
1188  // Track if reader was open
1189  already_open = reader->IsOpen();
1190 
1191  // Close and delete existing allocated reader (if any)
1192  Reader(NULL);
1193  }
1194 
1195  // Create new reader (and load properties)
1196  std::string type = root["reader"]["type"].asString();
1197 
1198  if (type == "FFmpegReader") {
1199 
1200  // Create new reader
1201  reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false);
1202  reader->SetJsonValue(root["reader"]);
1203 
1204  } else if (type == "QtImageReader") {
1205 
1206  // Create new reader
1207  reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false);
1208  reader->SetJsonValue(root["reader"]);
1209 
1210 #ifdef USE_IMAGEMAGICK
1211  } else if (type == "ImageReader") {
1212 
1213  // Create new reader
1214  reader = new ImageReader(root["reader"]["path"].asString(), false);
1215  reader->SetJsonValue(root["reader"]);
1216 
1217  } else if (type == "TextReader") {
1218 
1219  // Create new reader
1220  reader = new TextReader();
1221  reader->SetJsonValue(root["reader"]);
1222 #endif
1223 
1224  } else if (type == "ChunkReader") {
1225 
1226  // Create new reader
1227  reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
1228  reader->SetJsonValue(root["reader"]);
1229 
1230  } else if (type == "DummyReader") {
1231 
1232  // Create new reader
1233  reader = new openshot::DummyReader();
1234  reader->SetJsonValue(root["reader"]);
1235 
1236  } else if (type == "Timeline") {
1237 
1238  // Create new reader (always load from file again)
1239  // This prevents FrameMappers from being loaded on accident
1240  reader = new openshot::Timeline(root["reader"]["path"].asString(), true);
1241  }
1242 
1243  // mark as managed reader and set parent
1244  if (reader) {
1245  reader->ApplyOrientationMetadata(reader_orientation_mode == ReaderOrientationMode::Reader);
1246  reader->ParentClip(this);
1247  allocated_reader = reader;
1248  }
1249 
1250  // Re-Open reader (if needed)
1251  if (already_open) {
1252  reader->Open();
1253  }
1254  }
1255  }
1256 
1257  // Clear cache (it might have changed)
1258  final_cache.Clear();
1259 }
1260 
1261 // Sort effects by order
1262 void Clip::sort_effects()
1263 {
1264  // sort clips
1265  effects.sort(CompareClipEffects());
1266 }
1267 
1268 // Add an effect to the clip
1270 {
1271  // Set parent clip pointer
1272  effect->ParentClip(this);
1273 
1274  // Add effect to list
1275  effects.push_back(effect);
1276 
1277  // Sort effects
1278  sort_effects();
1279 
1280  // Get the parent timeline of this clip
1281  Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
1282 
1283  if (parentTimeline)
1284  effect->ParentTimeline(parentTimeline);
1285 
1286  #ifdef USE_OPENCV
1287  // Add Tracked Object to Timeline
1288  if (effect->info.has_tracked_object){
1289 
1290  // Check if this clip has a parent timeline
1291  if (parentTimeline){
1292 
1293  effect->ParentTimeline(parentTimeline);
1294 
1295  // Iterate through effect's vector of Tracked Objects
1296  for (auto const& trackedObject : effect->trackedObjects){
1297 
1298  // Cast the Tracked Object as TrackedObjectBBox
1299  std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1300 
1301  // Set the Tracked Object's parent clip to this
1302  trackedObjectBBox->ParentClip(this);
1303 
1304  // Add the Tracked Object to the timeline
1305  parentTimeline->AddTrackedObject(trackedObjectBBox);
1306  }
1307  }
1308  }
1309  #endif
1310 
1311  // Clear cache (it might have changed)
1312  final_cache.Clear();
1313 }
1314 
1315 // Remove an effect from the clip
1317 {
1318  effects.remove(effect);
1319 
1320  // Clear cache (it might have changed)
1321  final_cache.Clear();
1322 }
1323 
1324 // Apply background image to the current clip image (i.e. flatten this image onto previous layer)
1325 void Clip::apply_background(std::shared_ptr<openshot::Frame> frame,
1326  std::shared_ptr<openshot::Frame> background_frame,
1327  bool update_frame_image) {
1328  // Add background canvas
1329  std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1330  QPainter painter(background_canvas.get());
1331 
1332  // Composite a new layer onto the image
1333  painter.setCompositionMode(static_cast<QPainter::CompositionMode>(composite));
1334  painter.drawImage(0, 0, *frame->GetImage());
1335  painter.end();
1336 
1337  // Standalone clip requests update frame->image, but timeline composition
1338  // draws onto the timeline-owned background frame only.
1339  if (update_frame_image)
1340  frame->AddImage(background_canvas);
1341 }
1342 
1343 // Apply effects to the source frame (if any)
1344 void Clip::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, TimelineInfoStruct* options, bool before_keyframes)
1345 {
1346  for (auto effect : effects)
1347  {
1348  // Apply the effect to this frame
1349  if (effect->info.apply_before_clip && before_keyframes) {
1350  effect->ProcessFrame(frame, frame->number);
1351  } else if (!effect->info.apply_before_clip && !before_keyframes) {
1352  effect->ProcessFrame(frame, frame->number);
1353  }
1354  }
1355 
1356  if (timeline != NULL && options != NULL) {
1357  // Apply global timeline effects (i.e. transitions & masks... if any)
1358  Timeline* timeline_instance = static_cast<Timeline*>(timeline);
1359  options->is_before_clip_keyframes = before_keyframes;
1360  timeline_instance->apply_effects(frame, timeline_frame_number, Layer(), options);
1361  }
1362 }
1363 
1364 // Compare 2 floating point numbers for equality
1365 bool Clip::isNear(double a, double b)
1366 {
1367  return fabs(a - b) < 0.000001;
1368 }
1369 
1370 // Apply keyframes to the source frame (if any)
1371 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, QSize timeline_size) {
1372  // Skip out if video was disabled or only an audio frame (no visualisation in use)
1373  if (!frame->has_image_data) {
1374  // Skip the rest of the image processing for performance reasons
1375  return;
1376  }
1377 
1378  // Get image from clip, and create transparent background image
1379  std::shared_ptr<QImage> source_image = frame->GetImage();
1380  std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(timeline_size.width(),
1381  timeline_size.height(),
1382  QImage::Format_RGBA8888_Premultiplied);
1383  background_canvas->fill(QColor(Qt::transparent));
1384 
1385  // Get transform from clip's keyframes
1386  QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1387 
1388  // Load timeline's new frame image into a QPainter
1389  QPainter painter(background_canvas.get());
1390  painter.setRenderHint(QPainter::TextAntialiasing, true);
1391  if (!transform.isIdentity()) {
1392  painter.setRenderHint(QPainter::SmoothPixmapTransform, true);
1393  }
1394  // Apply transform (translate, rotate, scale)
1395  painter.setTransform(transform);
1396 
1397  // Composite a new layer onto the image
1398  painter.setCompositionMode(static_cast<QPainter::CompositionMode>(composite));
1399 
1400  // Apply opacity via painter instead of per-pixel alpha manipulation
1401  const float alpha_value = alpha.GetValue(frame->number);
1402  if (alpha_value != 1.0f) {
1403  painter.setOpacity(alpha_value);
1404  painter.drawImage(0, 0, *source_image);
1405  // Reset so any subsequent drawing (e.g., overlays) isn’t faded
1406  painter.setOpacity(1.0);
1407  } else {
1408  painter.drawImage(0, 0, *source_image);
1409  }
1410 
1411  if (timeline) {
1412  Timeline *t = static_cast<Timeline *>(timeline);
1413 
1414  // Draw frame #'s on top of image (if needed)
1415  if (display != FRAME_DISPLAY_NONE) {
1416  std::stringstream frame_number_str;
1417  switch (display) {
1418  case (FRAME_DISPLAY_NONE):
1419  // This is only here to prevent unused-enum warnings
1420  break;
1421 
1422  case (FRAME_DISPLAY_CLIP):
1423  frame_number_str << frame->number;
1424  break;
1425 
1426  case (FRAME_DISPLAY_TIMELINE):
1427  frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number;
1428  break;
1429 
1430  case (FRAME_DISPLAY_BOTH):
1431  frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
1432  break;
1433  }
1434 
1435  // Draw frame number on top of image
1436  painter.setPen(QColor("#ffffff"));
1437  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1438  }
1439  }
1440  painter.end();
1441 
1442  // Add new QImage to frame
1443  frame->AddImage(background_canvas);
1444 }
1445 
1446 // Apply apply_waveform image to the source frame (if any)
1447 void Clip::apply_waveform(std::shared_ptr<Frame> frame, QSize timeline_size) {
1448 
1449  if (!Waveform()) {
1450  // Exit if no waveform is needed
1451  return;
1452  }
1453 
1454  // Debug output
1455  ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_waveform (Generate Waveform Image)",
1456  "frame->number", frame->number,
1457  "Waveform()", Waveform(),
1458  "width", timeline_size.width(),
1459  "height", timeline_size.height());
1460 
1461  // Get the color of the waveform
1462  int red = wave_color.red.GetInt(frame->number);
1463  int green = wave_color.green.GetInt(frame->number);
1464  int blue = wave_color.blue.GetInt(frame->number);
1465  int alpha = wave_color.alpha.GetInt(frame->number);
1466 
1467  // Render the waveform through the audio visualization effect so clip shortcuts
1468  // and explicit effects share the same rendering path.
1469  auto visual_frame = std::make_shared<Frame>(*frame.get());
1470  visual_frame->AddImage(std::make_shared<QImage>(
1471  timeline_size.width(), timeline_size.height(), QImage::Format_RGBA8888_Premultiplied));
1472  visual_frame->GetImage()->fill(Qt::transparent);
1473 
1474  AudioVisualization visualization;
1475  visualization.visualization_type = waveform_mode;
1476  visualization.style = AUDIO_VISUALIZATION_STYLE_MINIMAL;
1477  visualization.color = Color(
1478  static_cast<unsigned char>(red),
1479  static_cast<unsigned char>(green),
1480  static_cast<unsigned char>(blue),
1481  static_cast<unsigned char>(alpha));
1482  visualization.intensity = Keyframe(1.0);
1483  visualization.smoothing = Keyframe(0.25);
1484  visualization.detail = Keyframe(0.75);
1485  visualization.glow = Keyframe(0.0);
1486  visualization.color_spread = Keyframe(0.0);
1489  visualization.frequency_low = Keyframe(0.0);
1490  visualization.frequency_high = Keyframe(1.0);
1492  visualization.GetFrame(visual_frame, frame->number);
1493 
1494  frame->AddImage(visual_frame->GetImage());
1495 }
1496 
1497 // Scale a source size to a target size (given a specific scale-type)
1498 QSize Clip::scale_size(QSize source_size, ScaleType source_scale, int target_width, int target_height) {
1499  switch (source_scale)
1500  {
1501  case (SCALE_FIT): {
1502  source_size.scale(target_width, target_height, Qt::KeepAspectRatio);
1503  break;
1504  }
1505  case (SCALE_STRETCH): {
1506  source_size.scale(target_width, target_height, Qt::IgnoreAspectRatio);
1507  break;
1508  }
1509  case (SCALE_CROP): {
1510  source_size.scale(target_width, target_height, Qt::KeepAspectRatioByExpanding);;
1511  break;
1512  }
1513  }
1514 
1515  return source_size;
1516 }
1517 
1518 // Get QTransform from keyframes
1519 QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
1520 {
1521  // Get image from clip
1522  std::shared_ptr<QImage> source_image = frame->GetImage();
1523 
1524  /* RESIZE SOURCE IMAGE - based on scale type */
1525  QSize source_size = scale_size(source_image->size(), scale, width, height);
1526 
1527  // Initialize parent object's properties (Clip or Tracked Object)
1528  float parentObject_location_x = 0.0;
1529  float parentObject_location_y = 0.0;
1530  float parentObject_scale_x = 1.0;
1531  float parentObject_scale_y = 1.0;
1532  float parentObject_shear_x = 0.0;
1533  float parentObject_shear_y = 0.0;
1534  float parentObject_rotation = 0.0;
1535 
1536  // Get the parentClipObject properties
1537  if (GetParentClip()){
1538  // Get the start trim position of the parent clip
1539  long parent_start_offset = parentClipObject->Start() * info.fps.ToDouble();
1540  long parent_frame_number = frame->number + parent_start_offset;
1541 
1542  // Get parent object's properties (Clip)
1543  parentObject_location_x = parentClipObject->location_x.GetValue(parent_frame_number);
1544  parentObject_location_y = parentClipObject->location_y.GetValue(parent_frame_number);
1545  parentObject_scale_x = parentClipObject->scale_x.GetValue(parent_frame_number);
1546  parentObject_scale_y = parentClipObject->scale_y.GetValue(parent_frame_number);
1547  parentObject_shear_x = parentClipObject->shear_x.GetValue(parent_frame_number);
1548  parentObject_shear_y = parentClipObject->shear_y.GetValue(parent_frame_number);
1549  parentObject_rotation = parentClipObject->rotation.GetValue(parent_frame_number);
1550  }
1551 
1552  // Get the parentTrackedObject properties
1553  if (GetParentTrackedObject()){
1554  // Get the attached object's parent clip's properties
1555  Clip* parentClip = (Clip*) parentTrackedObject->ParentClip();
1556  if (parentClip)
1557  {
1558  // Get the start trim position of the parent clip
1559  long parent_start_offset = parentClip->Start() * info.fps.ToDouble();
1560  long parent_frame_number = frame->number + parent_start_offset;
1561 
1562  // Access the parentTrackedObject's properties
1563  std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parent_frame_number);
1564 
1565  // Get actual scaled parent size
1566  QSize parent_size = scale_size(QSize(parentClip->info.width, parentClip->info.height),
1567  parentClip->scale, width, height);
1568 
1569  // Get actual scaled tracked object size
1570  int trackedWidth = trackedObjectProperties["w"] * trackedObjectProperties["sx"] * parent_size.width() *
1571  parentClip->scale_x.GetValue(parent_frame_number);
1572  int trackedHeight = trackedObjectProperties["h"] * trackedObjectProperties["sy"] * parent_size.height() *
1573  parentClip->scale_y.GetValue(parent_frame_number);
1574 
1575  // Scale the clip source_size based on the actual tracked object size
1576  source_size = scale_size(source_size, scale, trackedWidth, trackedHeight);
1577 
1578  // Update parentObject's properties based on the tracked object's properties and parent clip's scale
1579  parentObject_location_x = parentClip->location_x.GetValue(parent_frame_number) + ((trackedObjectProperties["cx"] - 0.5) * parentClip->scale_x.GetValue(parent_frame_number));
1580  parentObject_location_y = parentClip->location_y.GetValue(parent_frame_number) + ((trackedObjectProperties["cy"] - 0.5) * parentClip->scale_y.GetValue(parent_frame_number));
1581  parentObject_rotation = trackedObjectProperties["r"] + parentClip->rotation.GetValue(parent_frame_number);
1582  }
1583  }
1584 
1585  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
1586  float x = 0.0; // left
1587  float y = 0.0; // top
1588 
1589  // Adjust size for scale x and scale y
1590  float sx = scale_x.GetValue(frame->number); // percentage X scale
1591  float sy = scale_y.GetValue(frame->number); // percentage Y scale
1592 
1593  // Change clip's scale to parentObject's scale
1594  if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1595  sx*= parentObject_scale_x;
1596  sy*= parentObject_scale_y;
1597  }
1598 
1599  float scaled_source_width = source_size.width() * sx;
1600  float scaled_source_height = source_size.height() * sy;
1601 
1602  switch (gravity)
1603  {
1604  case (GRAVITY_TOP_LEFT):
1605  // This is only here to prevent unused-enum warnings
1606  break;
1607  case (GRAVITY_TOP):
1608  x = (width - scaled_source_width) / 2.0; // center
1609  break;
1610  case (GRAVITY_TOP_RIGHT):
1611  x = width - scaled_source_width; // right
1612  break;
1613  case (GRAVITY_LEFT):
1614  y = (height - scaled_source_height) / 2.0; // center
1615  break;
1616  case (GRAVITY_CENTER):
1617  x = (width - scaled_source_width) / 2.0; // center
1618  y = (height - scaled_source_height) / 2.0; // center
1619  break;
1620  case (GRAVITY_RIGHT):
1621  x = width - scaled_source_width; // right
1622  y = (height - scaled_source_height) / 2.0; // center
1623  break;
1624  case (GRAVITY_BOTTOM_LEFT):
1625  y = (height - scaled_source_height); // bottom
1626  break;
1627  case (GRAVITY_BOTTOM):
1628  x = (width - scaled_source_width) / 2.0; // center
1629  y = (height - scaled_source_height); // bottom
1630  break;
1631  case (GRAVITY_BOTTOM_RIGHT):
1632  x = width - scaled_source_width; // right
1633  y = (height - scaled_source_height); // bottom
1634  break;
1635  }
1636 
1637  // Debug output
1639  "Clip::get_transform (Gravity)",
1640  "frame->number", frame->number,
1641  "source_clip->gravity", gravity,
1642  "scaled_source_width", scaled_source_width,
1643  "scaled_source_height", scaled_source_height);
1644 
1645  QTransform transform;
1646 
1647  /* LOCATION, ROTATION, AND SCALE */
1648  float r = rotation.GetValue(frame->number) + parentObject_rotation; // rotate in degrees
1649  float location_x_value = location_x.GetValue(frame->number) + parentObject_location_x;
1650  float location_y_value = location_y.GetValue(frame->number) + parentObject_location_y;
1651  auto location_offset = [](float location, float anchored_position, float canvas_size, float clip_size) {
1652  if (location < 0.0f) {
1653  return location * (anchored_position + clip_size);
1654  }
1655  return location * (canvas_size - anchored_position);
1656  };
1657  x += location_offset(location_x_value, x, width, scaled_source_width);
1658  y += location_offset(location_y_value, y, height, scaled_source_height);
1659  float shear_x_value = shear_x.GetValue(frame->number) + parentObject_shear_x;
1660  float shear_y_value = shear_y.GetValue(frame->number) + parentObject_shear_y;
1661  float origin_x_value = origin_x.GetValue(frame->number);
1662  float origin_y_value = origin_y.GetValue(frame->number);
1663 
1664  // Transform source image (if needed)
1666  "Clip::get_transform (Build QTransform - if needed)",
1667  "frame->number", frame->number,
1668  "x", x, "y", y,
1669  "r", r,
1670  "sx", sx, "sy", sy);
1671 
1672  if (!isNear(x, 0) || !isNear(y, 0)) {
1673  // TRANSLATE/MOVE CLIP
1674  transform.translate(x, y);
1675  }
1676  if (!isNear(r, 0) || !isNear(shear_x_value, 0) || !isNear(shear_y_value, 0)) {
1677  // ROTATE CLIP (around origin_x, origin_y)
1678  float origin_x_offset = (scaled_source_width * origin_x_value);
1679  float origin_y_offset = (scaled_source_height * origin_y_value);
1680  transform.translate(origin_x_offset, origin_y_offset);
1681  transform.rotate(r);
1682  transform.shear(shear_x_value, shear_y_value);
1683  transform.translate(-origin_x_offset,-origin_y_offset);
1684  }
1685  // SCALE CLIP (if needed)
1686  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1687  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1688  if (!isNear(source_width_scale, 1.0) || !isNear(source_height_scale, 1.0)) {
1689  transform.scale(source_width_scale, source_height_scale);
1690  }
1691 
1692  return transform;
1693 }
1694 
1695 // Adjust frame number for Clip position and start (which can result in a different number)
1696 int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1697 
1698  // Get clip position from parent clip (if any)
1699  float position = 0.0;
1700  float start = 0.0;
1701  Clip *parent = static_cast<Clip *>(ParentClip());
1702  if (parent) {
1703  position = parent->Position();
1704  start = parent->Start();
1705  }
1706 
1707  // Adjust start frame and position based on parent clip.
1708  // This ensures the same frame # is used by mapped readers and clips,
1709  // when calculating samples per frame.
1710  // Thus, this prevents gaps and mismatches in # of samples.
1711  int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1;
1712  int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1;
1713  int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1714 
1715  return frame_number;
1716 }
openshot::ClipBase::add_property_json
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
Definition: ClipBase.cpp:96
openshot::stringToJson
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:16
openshot::CacheMemory::Clear
void Clear()
Clear the cache of all frames.
Definition: CacheMemory.cpp:224
openshot::Clip::Open
void Open() override
Open the internal reader.
Definition: Clip.cpp:387
openshot::Keyframe::IsIncreasing
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
Definition: KeyFrame.cpp:292
openshot::AudioVisualization::background
int background
Definition: AudioVisualization.h:84
openshot::ReaderInfo::sample_rate
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:60
openshot::ClipBase::timeline
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
Definition: ClipBase.h:40
openshot::AUDIO_VISUALIZATION_STYLE_MINIMAL
@ AUDIO_VISUALIZATION_STYLE_MINIMAL
Definition: AudioVisualization.h:43
openshot::EffectInfo
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:28
openshot::FRAME_DISPLAY_BOTH
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
Definition: Enums.h:56
openshot::Fraction::ToFloat
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:35
openshot::AudioVisualization::visualization_type
int visualization_type
Definition: AudioVisualization.h:72
openshot::EffectBase
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:56
openshot::EffectBase::info
EffectInfoStruct info
Information about the current effect.
Definition: EffectBase.h:110
openshot::ReaderBase::JsonValue
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ReaderBase.cpp:110
openshot::AUDIO_VISUALIZATION_RADIAL
@ AUDIO_VISUALIZATION_RADIAL
Definition: AudioVisualization.h:31
openshot::COMPOSITE_MULTIPLY
@ COMPOSITE_MULTIPLY
Definition: Enums.h:91
openshot::Clip::anchor
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition: Clip.h:187
Clip.h
Header file for Clip class.
openshot::Keyframe::GetLong
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
Definition: KeyFrame.cpp:287
openshot::Clip::CreateReader
static openshot::ReaderBase * CreateReader(std::string path, bool inspect_reader=true)
Definition: Clip.cpp:249
openshot::ChunkReader
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition: ChunkReader.h:78
openshot::ReaderBase::GetFrame
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
openshot::Clip::previous_location
AudioLocation previous_location
Previous time-mapped audio location.
Definition: Clip.h:95
openshot::AudioVisualization::channel_layout
int channel_layout
Definition: AudioVisualization.h:81
openshot::FRAME_DISPLAY_CLIP
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
Definition: Enums.h:54
openshot::FRAME_DISPLAY_TIMELINE
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
Definition: Enums.h:55
openshot::ReaderBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ReaderBase.cpp:161
openshot::Clip::GetEffect
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
Definition: Clip.cpp:540
openshot::ReaderBase::ApplyOrientationMetadata
void ApplyOrientationMetadata(bool value)
Set whether readers should apply source orientation metadata to returned frames.
Definition: ReaderBase.cpp:270
openshot::ClipBase::End
virtual void End(float value)
Set end position (in seconds) of clip (trim end of video)
Definition: ClipBase.cpp:53
openshot
This namespace is the default namespace for all code in the openshot library.
Definition: AnimatedCurve.h:24
openshot::Clip::scale_y
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:332
openshot::Clip::PropertiesJSON
std::string PropertiesJSON(int64_t requested_frame) const override
Definition: Clip.cpp:825
openshot::EffectBase::ParentClip
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
Definition: EffectBase.cpp:549
openshot::AudioLocation
This struct holds the associated video frame and starting sample # for an audio packet.
Definition: AudioLocation.h:25
openshot::Keyframe::GetDelta
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
Definition: KeyFrame.cpp:399
TextReader.h
Header file for TextReader class.
openshot::AUDIO_VISUALIZATION_SPECTRUM
@ AUDIO_VISUALIZATION_SPECTRUM
Definition: AudioVisualization.h:32
openshot::Clip::time
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
Definition: Clip.h:345
openshot::CompositeType
CompositeType
This enumeration determines how clips are composited onto lower layers.
Definition: Enums.h:75
openshot::ClipBase::add_property_choice_json
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
Definition: ClipBase.cpp:132
openshot::AudioLocation::frame
int64_t frame
Definition: AudioLocation.h:26
openshot::Clip
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:89
juce::AudioBuffer< float >
openshot::AudioLocation::sample_start
int sample_start
Definition: AudioLocation.h:27
openshot::Clip::alpha
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:335
openshot::Clip::End
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
Definition: Clip.cpp:423
openshot::AudioVisualization::GetFrame
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number) override
This method is required for all derived classes of ClipBase, and returns a new openshot::Frame object...
Definition: AudioVisualization.h:89
openshot::ReaderBase::info
openshot::ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:91
openshot::COMPOSITE_SCREEN
@ COMPOSITE_SCREEN
Definition: Enums.h:92
openshot::GRAVITY_TOP_LEFT
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
Definition: Enums.h:23
Timeline.h
Header file for Timeline class.
openshot::Clip::origin_x
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
Definition: Clip.h:341
openshot::Clip::ParentTimeline
void ParentTimeline(openshot::TimelineBase *new_timeline) override
Set associated Timeline pointer.
Definition: Clip.cpp:450
openshot::Clip::GetFrame
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
Definition: Clip.cpp:458
openshot::Clip::Close
void Close() override
Close the internal reader.
Definition: Clip.cpp:408
AudioResampler.h
Header file for AudioResampler class.
openshot::Clip::location_y
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
Definition: Clip.h:334
openshot::DummyReader
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
Definition: DummyReader.h:85
openshot::GRAVITY_TOP_RIGHT
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
Definition: Enums.h:25
openshot::Keyframe::SetJsonValue
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: KeyFrame.cpp:372
openshot::GravityType
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition: Enums.h:21
openshot::Clip::origin_y
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
Definition: Clip.h:342
openshot::Clip::GetParentTrackedObject
std::shared_ptr< openshot::TrackedObjectBase > GetParentTrackedObject()
Return the associated Parent Tracked Object (if any)
Definition: Clip.cpp:561
openshot::ReaderInfo::duration
float duration
Length of time (in seconds)
Definition: ReaderBase.h:43
openshot::EffectBase::trackedObjects
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
Definition: EffectBase.h:107
openshot::Clip::channel_mapping
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition: Clip.h:363
openshot::Clip::AddEffect
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
Definition: Clip.cpp:1269
openshot::ReaderBase::Name
virtual std::string Name()=0
Return the type name of the class.
openshot::Clip::~Clip
virtual ~Clip()
Destructor.
Definition: Clip.cpp:287
openshot::ReaderInfo::width
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:46
openshot::Clip::Json
std::string Json() const override
Generate JSON string of this object.
Definition: Clip.cpp:818
openshot::ClipBase::Position
void Position(float value)
Set the Id of this clip object
Definition: ClipBase.cpp:19
openshot::AUDIO_VISUALIZATION_BARS
@ AUDIO_VISUALIZATION_BARS
Definition: AudioVisualization.h:30
openshot::GRAVITY_RIGHT
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:28
openshot::FRAME_DISPLAY_NONE
@ FRAME_DISPLAY_NONE
Do not display the frame number.
Definition: Enums.h:53
openshot::COMPOSITE_SOFT_LIGHT
@ COMPOSITE_SOFT_LIGHT
Definition: Enums.h:99
openshot::CompareClipEffects
Definition: Clip.h:48
openshot::Clip::SetJsonValue
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition: Clip.cpp:1043
openshot::AudioVisualization::color_spread
Keyframe color_spread
Definition: AudioVisualization.h:79
openshot::AudioVisualization::smoothing
Keyframe smoothing
Definition: AudioVisualization.h:76
openshot::OutOfBoundsFrame
Exception for frames that are out of bounds.
Definition: Exceptions.h:306
openshot::Fraction::ToDouble
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:40
openshot::Timeline::apply_effects
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any)
Definition: Timeline.cpp:559
openshot::Keyframe::JsonValue
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: KeyFrame.cpp:339
openshot::AUDIO_VISUALIZATION_PARTICLES
@ AUDIO_VISUALIZATION_PARTICLES
Definition: AudioVisualization.h:34
openshot::COMPOSITE_LIGHTEN
@ COMPOSITE_LIGHTEN
Definition: Enums.h:95
FrameMapper.h
Header file for the FrameMapper class.
openshot::GRAVITY_TOP
@ GRAVITY_TOP
Align clip to the top center of its parent.
Definition: Enums.h:24
openshot::COMPOSITE_OVERLAY
@ COMPOSITE_OVERLAY
Definition: Enums.h:93
openshot::AUDIO_VISUALIZATION_PHASE_SCOPE
@ AUDIO_VISUALIZATION_PHASE_SCOPE
Definition: AudioVisualization.h:33
openshot::CacheBase::SetMaxBytesFromInfo
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:28
openshot::Color
This class represents a color (used on the timeline and clips)
Definition: Color.h:27
openshot::Clip::display
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:188
openshot::AUDIO_VISUALIZATION_BACKGROUND_TRANSPARENT
@ AUDIO_VISUALIZATION_BACKGROUND_TRANSPARENT
Definition: AudioVisualization.h:54
openshot::ClipBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ClipBase.cpp:80
openshot::Clip::perspective_c2_y
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition: Clip.h:355
openshot::Clip::scale_x
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:331
openshot::QtImageReader
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
Definition: QtImageReader.h:74
openshot::ClipBase::JsonValue
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ClipBase.cpp:64
openshot::ReaderInfo::video_length
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:53
openshot::AudioResampler
This class is used to resample audio data for many sequential frames.
Definition: AudioResampler.h:30
openshot::AudioResampler::SetBuffer
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
Definition: AudioResampler.cpp:60
openshot::ReaderInfo::height
int height
The height of the video (in pixels)
Definition: ReaderBase.h:45
openshot::VOLUME_MIX_REDUCE
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition: Enums.h:71
openshot::ClipBase::position
float position
The position on the timeline where this clip should start playing.
Definition: ClipBase.h:35
openshot::Timeline::GetClip
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition: Timeline.cpp:418
openshot::Clip::perspective_c3_y
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition: Clip.h:357
openshot::Clip::perspective_c4_y
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition: Clip.h:359
ZmqLogger.h
Header file for ZeroMQ-based Logger class.
openshot::Clip::has_video
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:367
openshot::Keyframe
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition: KeyFrame.h:53
AudioVisualization.h
Header file for AudioVisualization effect class.
openshot::Clip::gravity
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition: Clip.h:185
openshot::Color::SetJsonValue
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Color.cpp:117
openshot::TimelineInfoStruct::is_before_clip_keyframes
bool is_before_clip_keyframes
Is this before clip keyframes are applied.
Definition: TimelineBase.h:35
openshot::ReaderBase::Open
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
openshot::GRAVITY_BOTTOM
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
Definition: Enums.h:30
openshot::ReaderBase::IsOpen
virtual bool IsOpen()=0
Determine if reader is open or closed.
openshot::AUDIO_VISUALIZATION_VU_METER
@ AUDIO_VISUALIZATION_VU_METER
Definition: AudioVisualization.h:35
openshot::InvalidJSON
Exception for invalid JSON.
Definition: Exceptions.h:223
openshot::AudioVisualization::frequency_low
Keyframe frequency_low
Definition: AudioVisualization.h:82
openshot::Timeline
This class represents a timeline.
Definition: Timeline.h:153
openshot::ImageReader
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition: ImageReader.h:55
openshot::Clip::composite
openshot::CompositeType composite
How this clip is composited onto lower layers.
Definition: Clip.h:190
openshot::Clip::perspective_c1_x
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition: Clip.h:352
openshot::SCALE_CROP
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition: Enums.h:37
openshot::Color::green
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:31
openshot::COMPOSITE_HARD_LIGHT
@ COMPOSITE_HARD_LIGHT
Definition: Enums.h:98
openshot::Clip::init_settings
void init_settings()
Init default settings for a clip.
Definition: Clip.cpp:70
openshot::TimelineInfoStruct
This struct contains info about the current Timeline clip instance.
Definition: TimelineBase.h:32
openshot::AudioVisualization::intensity
Keyframe intensity
Definition: AudioVisualization.h:75
openshot::EffectInfoStruct::has_tracked_object
bool has_tracked_object
Determines if this effect track objects through the clip.
Definition: EffectBase.h:45
openshot::ReaderInfo::metadata
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition: ReaderBase.h:65
openshot::ClipBase::end
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition: ClipBase.h:38
openshot::ClipBase::Start
void Start(float value)
Set start position (in seconds) of clip (trim start of video)
Definition: ClipBase.cpp:42
openshot::FFmpegReader
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
Definition: FFmpegReader.h:103
path
path
Definition: FFmpegWriter.cpp:1481
openshot::COMPOSITE_PLUS
@ COMPOSITE_PLUS
Definition: Enums.h:90
openshot::FrameMapper
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:193
openshot::Frame::GetSamplesPerFrame
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:484
openshot::AudioVisualization::detail
Keyframe detail
Definition: AudioVisualization.h:77
ChunkReader.h
Header file for ChunkReader class.
openshot::COMPOSITE_DIFFERENCE
@ COMPOSITE_DIFFERENCE
Definition: Enums.h:100
openshot::AudioVisualization::style
int style
Definition: AudioVisualization.h:73
openshot::ZmqLogger::Instance
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition: ZmqLogger.cpp:35
openshot::ClipBase::start
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition: ClipBase.h:37
openshot::Clip::Reader
openshot::ReaderBase * Reader()
Get the current reader.
Definition: Clip.cpp:377
openshot::SCALE_FIT
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:38
openshot::GRAVITY_BOTTOM_LEFT
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
Definition: Enums.h:29
openshot::Clip::perspective_c2_x
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition: Clip.h:354
openshot::ZmqLogger::AppendDebugMethod
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
Definition: ZmqLogger.cpp:178
openshot::Clip::volume
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:346
openshot::COMPOSITE_DARKEN
@ COMPOSITE_DARKEN
Definition: Enums.h:94
openshot::Timeline::AddTrackedObject
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Definition: Timeline.cpp:229
openshot::Clip::SetJson
void SetJson(const std::string value) override
Load JSON string into this object.
Definition: Clip.cpp:1026
openshot::GRAVITY_BOTTOM_RIGHT
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
Definition: Enums.h:31
openshot::Color::JsonValue
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: Color.cpp:86
openshot::Keyframe::GetLength
int64_t GetLength() const
Definition: KeyFrame.cpp:417
openshot::Keyframe::GetInt
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:282
openshot::AudioVisualization::color_mode
int color_mode
Definition: AudioVisualization.h:80
openshot::ANCHOR_CANVAS
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
Definition: Enums.h:46
openshot::Clip::SetAttachedClip
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
Definition: Clip.cpp:335
openshot::AUDIO_VISUALIZATION_RADIAL_BARS
@ AUDIO_VISUALIZATION_RADIAL_BARS
Definition: AudioVisualization.h:36
openshot::Clip::perspective_c4_x
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition: Clip.h:358
openshot::ReaderClosed
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:369
openshot::ReaderInfo::channel_layout
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:62
openshot::Clip::perspective_c1_y
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition: Clip.h:353
openshot::COMPOSITE_EXCLUSION
@ COMPOSITE_EXCLUSION
Definition: Enums.h:101
openshot::AUDIO_VISUALIZATION_COLOR_SEED
@ AUDIO_VISUALIZATION_COLOR_SEED
Definition: AudioVisualization.h:62
openshot::Clip::channel_filter
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
Definition: Clip.h:362
openshot::Clip::init_reader_rotation
void init_reader_rotation()
Update default rotation from reader.
Definition: Clip.cpp:151
openshot::ClipBase::Id
void Id(std::string value)
Definition: ClipBase.h:94
openshot::Clip::init_reader_settings
void init_reader_settings()
Init reader info details.
Definition: Clip.cpp:138
openshot::TimelineBase
This class represents a timeline (used for building generic timeline implementations)
Definition: TimelineBase.h:41
MagickUtilities.h
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
openshot::GRAVITY_LEFT
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:26
openshot::Keyframe::GetCount
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition: KeyFrame.cpp:424
openshot::ReaderInfo::fps
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:48
openshot::Clip::Clip
Clip()
Default Constructor.
Definition: Clip.cpp:207
openshot::ReaderBase
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:75
openshot::Clip::SetAttachedObject
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
Definition: Clip.cpp:330
openshot::AUDIO_VISUALIZATION_CHANNEL_AUTO
@ AUDIO_VISUALIZATION_CHANNEL_AUTO
Definition: AudioVisualization.h:47
openshot::ClipBase::previous_properties
std::string previous_properties
This string contains the previous JSON properties.
Definition: ClipBase.h:39
openshot::Clip::scale
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition: Clip.h:186
openshot::COMPOSITE_SOURCE_OVER
@ COMPOSITE_SOURCE_OVER
Definition: Enums.h:76
openshot::AudioResampler::GetResampledBuffer
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
Definition: AudioResampler.cpp:106
openshot::VOLUME_MIX_AVERAGE
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition: Enums.h:70
openshot::ReaderBase::Close
virtual void Close()=0
Close the reader (and any resources it was consuming)
openshot::AudioVisualization
Definition: AudioVisualization.h:66
openshot::AnchorType
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition: Enums.h:44
openshot::ScaleType
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition: Enums.h:35
openshot::Clip::AttachToObject
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
Definition: Clip.cpp:307
openshot::AudioVisualization::glow
Keyframe glow
Definition: AudioVisualization.h:78
openshot::Color::alpha
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:33
openshot::Clip::has_audio
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:366
openshot::Clip::GetParentClip
openshot::Clip * GetParentClip()
Return the associated ParentClip (if any)
Definition: Clip.cpp:552
openshot::Clip::rotation
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:338
openshot::SCALE_NONE
@ SCALE_NONE
Do not scale the clip.
Definition: Enums.h:40
openshot::TextReader
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition: TextReader.h:62
QtImageReader.h
Header file for QtImageReader class.
openshot::GRAVITY_CENTER
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:27
openshot::AUDIO_VISUALIZATION_WAVEFORM
@ AUDIO_VISUALIZATION_WAVEFORM
Definition: AudioVisualization.h:28
openshot::Clip::JsonValue
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition: Clip.cpp:959
openshot::Color::red
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:30
openshot::SCALE_STRETCH
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:39
ImageReader.h
Header file for ImageReader class.
openshot::FrameMapper::Reader
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:67
openshot::AudioVisualization::frequency_high
Keyframe frequency_high
Definition: AudioVisualization.h:83
openshot::Clip::perspective_c3_x
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition: Clip.h:356
openshot::COMPOSITE_COLOR_BURN
@ COMPOSITE_COLOR_BURN
Definition: Enums.h:97
openshot::VOLUME_MIX_NONE
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
Definition: Enums.h:69
openshot::AudioVisualization::color
Color color
Definition: AudioVisualization.h:74
openshot::ChunkVersion
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
Definition: ChunkReader.h:49
openshot::ClipBase::Layer
void Layer(int value)
Set layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.cpp:31
openshot::ReaderInfo::channels
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:61
openshot::VolumeMixType
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition: Enums.h:67
openshot::Clip::wave_color
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:349
openshot::Clip::shear_y
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:340
openshot::Clip::RemoveEffect
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
Definition: Clip.cpp:1316
DummyReader.h
Header file for DummyReader class.
openshot::AUDIO_VISUALIZATION_FILLED_WAVEFORM
@ AUDIO_VISUALIZATION_FILLED_WAVEFORM
Definition: AudioVisualization.h:29
openshot::Color::blue
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:32
openshot::Timeline::GetTrackedObject
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
Definition: Timeline.cpp:247
Exceptions.h
Header file for all Exception classes.
openshot::Clip::mixing
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:189
FFmpegReader.h
Header file for FFmpegReader class.
openshot::COMPOSITE_COLOR_DODGE
@ COMPOSITE_COLOR_DODGE
Definition: Enums.h:96
openshot::Clip::shear_x
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:339
openshot::EffectBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: EffectBase.cpp:139
openshot::Keyframe::GetValue
double GetValue(int64_t index) const
Get the value at a specific index.
Definition: KeyFrame.cpp:258
openshot::Clip::location_x
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
Definition: Clip.h:333
openshot::Clip::getFrameMutex
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition: Clip.h:92
openshot::FrameDisplayType
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
Definition: Enums.h:51
openshot::ReaderBase::ParentClip
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
Definition: ReaderBase.cpp:244