OpenShot Library | libopenshot  0.3.1
Clip.cpp
Go to the documentation of this file.
1 
9 // Copyright (c) 2008-2019 OpenShot Studios, LLC
10 //
11 // SPDX-License-Identifier: LGPL-3.0-or-later
12 
13 #include "Clip.h"
14 
15 #include "AudioResampler.h"
16 #include "Exceptions.h"
17 #include "FFmpegReader.h"
18 #include "FrameMapper.h"
19 #include "QtImageReader.h"
20 #include "ChunkReader.h"
21 #include "DummyReader.h"
22 #include "Timeline.h"
23 #include "ZmqLogger.h"
24 
25 #ifdef USE_IMAGEMAGICK
26  #include "MagickUtilities.h"
27  #include "ImageReader.h"
28  #include "TextReader.h"
29 #endif
30 
31 #include <Qt>
32 
33 using namespace openshot;
34 
35 // Init default settings for a clip
37 {
38  // Init clip settings
39  Position(0.0);
40  Layer(0);
41  Start(0.0);
42  ClipBase::End(0.0);
44  scale = SCALE_FIT;
48  waveform = false;
50  parentObjectId = "";
51 
52  // Init scale curves
53  scale_x = Keyframe(1.0);
54  scale_y = Keyframe(1.0);
55 
56  // Init location curves
57  location_x = Keyframe(0.0);
58  location_y = Keyframe(0.0);
59 
60  // Init alpha
61  alpha = Keyframe(1.0);
62 
63  // Init time & volume
64  time = Keyframe(1.0);
65  volume = Keyframe(1.0);
66 
67  // Init audio waveform color
68  wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
69 
70  // Init shear and perspective curves
71  shear_x = Keyframe(0.0);
72  shear_y = Keyframe(0.0);
73  origin_x = Keyframe(0.5);
74  origin_y = Keyframe(0.5);
75  perspective_c1_x = Keyframe(-1.0);
76  perspective_c1_y = Keyframe(-1.0);
77  perspective_c2_x = Keyframe(-1.0);
78  perspective_c2_y = Keyframe(-1.0);
79  perspective_c3_x = Keyframe(-1.0);
80  perspective_c3_y = Keyframe(-1.0);
81  perspective_c4_x = Keyframe(-1.0);
82  perspective_c4_y = Keyframe(-1.0);
83 
84  // Init audio channel filter and mappings
85  channel_filter = Keyframe(-1.0);
86  channel_mapping = Keyframe(-1.0);
87 
88  // Init audio and video overrides
89  has_audio = Keyframe(-1.0);
90  has_video = Keyframe(-1.0);
91 
92  // Initialize the attached object and attached clip as null pointers
93  parentTrackedObject = nullptr;
94  parentClipObject = NULL;
95 
96  // Init reader info struct
98 }
99 
100 // Init reader info details
102  if (reader) {
103  // Init rotation (if any)
105 
106  // Initialize info struct
107  info = reader->info;
108 
109  // Init cache
111  }
112 }
113 
114 // Init reader's rotation (if any)
116  // Dont init rotation if clip has keyframes
117  if (rotation.GetCount() > 0)
118  return;
119 
120  // Init rotation
121  if (reader && reader->info.metadata.count("rotate") > 0) {
122  // Use reader metadata rotation (if any)
123  // This is typical with cell phone videos filmed in different orientations
124  try {
125  float rotate_metadata = strtof(reader->info.metadata["rotate"].c_str(), 0);
126  rotation = Keyframe(rotate_metadata);
127  } catch (const std::exception& e) {}
128  }
129  else
130  // Default no rotation
131  rotation = Keyframe(0.0);
132 }
133 
134 // Default Constructor for a clip
135 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
136 {
137  // Init all default settings
138  init_settings();
139 }
140 
141 // Constructor with reader
142 Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
143 {
144  // Init all default settings
145  init_settings();
146 
147  // Open and Close the reader (to set the duration of the clip)
148  Open();
149  Close();
150 
151  // Update duration and set parent
152  if (reader) {
153  ClipBase::End(reader->info.duration);
154  reader->ParentClip(this);
155  // Init reader info struct
157  }
158 }
159 
160 // Constructor with filepath
161 Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
162 {
163  // Init all default settings
164  init_settings();
165 
166  // Get file extension (and convert to lower case)
167  std::string ext = get_file_extension(path);
168  std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
169 
170  // Determine if common video formats (or image sequences)
171  if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
172  ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob" || path.find("%") != std::string::npos)
173  {
174  try
175  {
176  // Open common video format
177  reader = new openshot::FFmpegReader(path);
178 
179  } catch(...) { }
180  }
181  if (ext=="osp")
182  {
183  try
184  {
185  // Open common video format
186  reader = new openshot::Timeline(path, true);
187 
188  } catch(...) { }
189  }
190 
191 
192  // If no video found, try each reader
193  if (!reader)
194  {
195  try
196  {
197  // Try an image reader
198  reader = new openshot::QtImageReader(path);
199 
200  } catch(...) {
201  try
202  {
203  // Try a video reader
204  reader = new openshot::FFmpegReader(path);
205 
206  } catch(...) { }
207  }
208  }
209 
210  // Update duration and set parent
211  if (reader) {
212  ClipBase::End(reader->info.duration);
213  reader->ParentClip(this);
214  allocated_reader = reader;
215  // Init reader info struct
217  }
218 }
219 
220 // Destructor
222 {
223  // Delete the reader if clip created it
224  if (allocated_reader) {
225  delete allocated_reader;
226  allocated_reader = NULL;
227  reader = NULL;
228  }
229 
230  // Close the resampler
231  if (resampler) {
232  delete resampler;
233  resampler = NULL;
234  }
235 }
236 
237 // Attach clip to bounding box
238 void Clip::AttachToObject(std::string object_id)
239 {
240  // Search for the tracked object on the timeline
241  Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
242 
243  if (parentTimeline) {
244  // Create a smart pointer to the tracked object from the timeline
245  std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->GetTrackedObject(object_id);
246  Clip* clipObject = parentTimeline->GetClip(object_id);
247 
248  // Check for valid tracked object
249  if (trackedObject){
250  SetAttachedObject(trackedObject);
251  }
252  else if (clipObject) {
253  SetAttachedClip(clipObject);
254  }
255  }
256  return;
257 }
258 
259 // Set the pointer to the trackedObject this clip is attached to
260 void Clip::SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
261  parentTrackedObject = trackedObject;
262  return;
263 }
264 
265 // Set the pointer to the clip this clip is attached to
266 void Clip::SetAttachedClip(Clip* clipObject){
267  parentClipObject = clipObject;
268  return;
269 }
270 
272 void Clip::Reader(ReaderBase* new_reader)
273 {
274  // Delete previously allocated reader (if not related to new reader)
275  // FrameMappers that point to the same allocated reader are ignored
276  bool is_same_reader = false;
277  if (new_reader && allocated_reader) {
278  if (new_reader->Name() == "FrameMapper") {
279  // Determine if FrameMapper is pointing at the same allocated ready
280  FrameMapper* clip_mapped_reader = static_cast<FrameMapper*>(new_reader);
281  if (allocated_reader == clip_mapped_reader->Reader()) {
282  is_same_reader = true;
283  }
284  }
285  }
286  // Clear existing allocated reader (if different)
287  if (allocated_reader && !is_same_reader) {
288  reader->Close();
289  allocated_reader->Close();
290  delete allocated_reader;
291  reader = NULL;
292  allocated_reader = NULL;
293  }
294 
295  // set reader pointer
296  reader = new_reader;
297 
298  // set parent
299  if (reader) {
300  reader->ParentClip(this);
301 
302  // Init reader info struct
304  }
305 }
306 
309 {
310  if (reader)
311  return reader;
312  else
313  // Throw error if reader not initialized
314  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
315 }
316 
317 // Open the internal reader
319 {
320  if (reader)
321  {
322  // Open the reader
323  reader->Open();
324  is_open = true;
325 
326  // Copy Reader info to Clip
327  info = reader->info;
328 
329  // Set some clip properties from the file reader
330  if (end == 0.0)
331  ClipBase::End(reader->info.duration);
332  }
333  else
334  // Throw error if reader not initialized
335  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
336 }
337 
338 // Close the internal reader
340 {
341  is_open = false;
342  if (reader) {
343  ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
344 
345  // Close the reader
346  reader->Close();
347  }
348  else
349  // Throw error if reader not initialized
350  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
351 }
352 
353 // Get end position of clip (trim end of video), which can be affected by the time curve.
354 float Clip::End() const
355 {
356  // if a time curve is present, use its length
357  if (time.GetCount() > 1)
358  {
359  // Determine the FPS fo this clip
360  float fps = 24.0;
361  if (reader)
362  // file reader
363  fps = reader->info.fps.ToFloat();
364  else
365  // Throw error if reader not initialized
366  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
367 
368  return float(time.GetLength()) / fps;
369  }
370  else
371  // just use the duration (as detected by the reader)
372  return end;
373 }
374 
375 // Override End() position
376 void Clip::End(float value) {
377  ClipBase::End(value);
378 }
379 
380 // Set associated Timeline pointer
382  timeline = new_timeline;
383 
384  // Clear cache (it might have changed)
385  final_cache.Clear();
386 }
387 
388 // Create an openshot::Frame object for a specific frame number of this reader.
389 std::shared_ptr<Frame> Clip::GetFrame(int64_t clip_frame_number)
390 {
391  // Call override of GetFrame
392  return GetFrame(NULL, clip_frame_number, NULL);
393 }
394 
395 // Create an openshot::Frame object for a specific frame number of this reader.
396 // NOTE: background_frame is ignored in this method (this method is only used by Effect classes)
397 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
398 {
399  // Call override of GetFrame
400  return GetFrame(background_frame, clip_frame_number, NULL);
401 }
402 
403 // Use an existing openshot::Frame object and draw this Clip's frame onto it
404 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number, openshot::TimelineInfoStruct* options)
405 {
406  // Check for open reader (or throw exception)
407  if (!is_open)
408  throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
409 
410  if (reader)
411  {
412  // Get frame object
413  std::shared_ptr<Frame> frame = NULL;
414 
415  // Check cache
416  frame = final_cache.GetFrame(clip_frame_number);
417  if (frame) {
418  // Debug output
420  "Clip::GetFrame (Cached frame found)",
421  "requested_frame", clip_frame_number);
422 
423  // Return cached frame
424  return frame;
425  }
426 
427  // Generate clip frame
428  frame = GetOrCreateFrame(clip_frame_number);
429 
430  if (!background_frame) {
431  // Create missing background_frame w/ transparent color (if needed)
432  background_frame = std::make_shared<Frame>(clip_frame_number, frame->GetWidth(), frame->GetHeight(),
433  "#00000000", frame->GetAudioSamplesCount(),
434  frame->GetAudioChannelsCount());
435  }
436 
437  // Get time mapped frame object (used to increase speed, change direction, etc...)
438  apply_timemapping(frame);
439 
440  // Apply waveform image (if any)
441  apply_waveform(frame, background_frame->GetImage());
442 
443  // Apply local effects to the frame (if any)
444  apply_effects(frame);
445 
446  // Apply global timeline effects (i.e. transitions & masks... if any)
447  if (timeline != NULL && options != NULL) {
448  if (options->is_top_clip) {
449  // Apply global timeline effects (only to top clip... if overlapping, pass in timeline frame number)
450  Timeline* timeline_instance = static_cast<Timeline*>(timeline);
451  frame = timeline_instance->apply_effects(frame, background_frame->number, Layer());
452  }
453  }
454 
455  // Apply keyframe / transforms
456  apply_keyframes(frame, background_frame->GetImage());
457 
458  // Add final frame to cache
459  final_cache.Add(frame);
460 
461  // Return processed 'frame'
462  return frame;
463  }
464  else
465  // Throw error if reader not initialized
466  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
467 }
468 
469 // Look up an effect by ID
470 openshot::EffectBase* Clip::GetEffect(const std::string& id)
471 {
472  // Find the matching effect (if any)
473  for (const auto& effect : effects) {
474  if (effect->Id() == id) {
475  return effect;
476  }
477  }
478  return nullptr;
479 }
480 
481 // Get file extension
482 std::string Clip::get_file_extension(std::string path)
483 {
484  // return last part of path
485  return path.substr(path.find_last_of(".") + 1);
486 }
487 
488 // Reverse an audio buffer
489 void Clip::reverse_buffer(juce::AudioBuffer<float>* buffer)
490 {
491  int number_of_samples = buffer->getNumSamples();
492  int channels = buffer->getNumChannels();
493 
494  // Reverse array (create new buffer to hold the reversed version)
495  auto *reversed = new juce::AudioBuffer<float>(channels, number_of_samples);
496  reversed->clear();
497 
498  for (int channel = 0; channel < channels; channel++)
499  {
500  int n=0;
501  for (int s = number_of_samples - 1; s >= 0; s--, n++)
502  reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
503  }
504 
505  // Copy the samples back to the original array
506  buffer->clear();
507  // Loop through channels, and get audio samples
508  for (int channel = 0; channel < channels; channel++)
509  // Get the audio samples for this channel
510  buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
511 
512  delete reversed;
513 }
514 
515 // Adjust the audio and image of a time mapped frame
516 void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
517 {
518  // Check for valid reader
519  if (!reader)
520  // Throw error if reader not initialized
521  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
522 
523  // Check for a valid time map curve
524  if (time.GetLength() > 1)
525  {
526  const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
527 
528  int64_t clip_frame_number = frame->number;
529  int64_t new_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
530 
531  // create buffer
532  juce::AudioBuffer<float> *source_samples = nullptr;
533 
534  // Get delta (difference from this frame to the next time mapped frame: Y value)
535  double delta = time.GetDelta(clip_frame_number + 1);
536  bool is_increasing = time.IsIncreasing(clip_frame_number + 1);
537 
538  // Determine length of source audio (in samples)
539  // A delta of 1.0 == normal expected samples
540  // A delta of 0.5 == 50% of normal expected samples
541  // A delta of 2.0 == 200% of normal expected samples
542  int target_sample_count = Frame::GetSamplesPerFrame(adjust_timeline_framenumber(clip_frame_number), Reader()->info.fps,
544  Reader()->info.channels);
545  int source_sample_count = round(target_sample_count * fabs(delta));
546 
547  // Determine starting audio location
548  AudioLocation location;
549  if (previous_location.frame == 0 || abs(new_frame_number - previous_location.frame) > 2) {
550  // No previous location OR gap detected
551  location.frame = new_frame_number;
552  location.sample_start = 0;
553 
554  // Create / Reset resampler
555  // We don't want to interpolate between unrelated audio data
556  if (resampler) {
557  delete resampler;
558  }
559  // Init resampler with # channels from Reader (should match the timeline)
560  resampler = new AudioResampler(Reader()->info.channels);
561 
562  // Allocate buffer of silence to initialize some data inside the resampler
563  // To prevent it from becoming input limited
564  juce::AudioBuffer<float> init_samples(Reader()->info.channels, 64);
565  init_samples.clear();
566  resampler->SetBuffer(&init_samples, 1.0);
567  resampler->GetResampledBuffer();
568 
569  } else {
570  // Use previous location
571  location = previous_location;
572  }
573 
574  if (source_sample_count <= 0) {
575  // Add silence and bail (we don't need any samples)
576  frame->AddAudioSilence(target_sample_count);
577  return;
578  }
579 
580  // Allocate a new sample buffer for these delta frames
581  source_samples = new juce::AudioBuffer<float>(Reader()->info.channels, source_sample_count);
582  source_samples->clear();
583 
584  // Determine ending audio location
585  int remaining_samples = source_sample_count;
586  int source_pos = 0;
587  while (remaining_samples > 0) {
588  std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.frame, false);
589  int frame_sample_count = source_frame->GetAudioSamplesCount() - location.sample_start;
590 
591  if (frame_sample_count == 0) {
592  // No samples found in source frame (fill with silence)
593  if (is_increasing) {
594  location.frame++;
595  } else {
596  location.frame--;
597  }
598  location.sample_start = 0;
599  break;
600  }
601  if (remaining_samples - frame_sample_count >= 0) {
602  // Use all frame samples & increment location
603  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
604  source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, frame_sample_count, 1.0f);
605  }
606  if (is_increasing) {
607  location.frame++;
608  } else {
609  location.frame--;
610  }
611  location.sample_start = 0;
612  remaining_samples -= frame_sample_count;
613  source_pos += frame_sample_count;
614 
615  } else {
616  // Use just what is needed (and reverse samples)
617  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
618  source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, remaining_samples, 1.0f);
619  }
620  location.sample_start += remaining_samples;
621  remaining_samples = 0;
622  source_pos += remaining_samples;
623  }
624 
625  }
626 
627  // Resize audio for current frame object + fill with silence
628  // We are fixing to clobber this with actual audio data (possibly resampled)
629  frame->AddAudioSilence(target_sample_count);
630 
631  if (source_sample_count != target_sample_count) {
632  // Resample audio (if needed)
633  double resample_ratio = double(source_sample_count) / double(target_sample_count);
634  resampler->SetBuffer(source_samples, resample_ratio);
635 
636  // Resample the data
637  juce::AudioBuffer<float> *resampled_buffer = resampler->GetResampledBuffer();
638 
639  // Fill the frame with resampled data
640  for (int channel = 0; channel < Reader()->info.channels; channel++) {
641  // Add new (slower) samples, to the frame object
642  frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
643  }
644  } else {
645  // Fill the frame
646  for (int channel = 0; channel < Reader()->info.channels; channel++) {
647  // Add new (slower) samples, to the frame object
648  frame->AddAudio(true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
649  }
650  }
651 
652  // Clean up
653  delete source_samples;
654 
655  // Set previous location
656  previous_location = location;
657  }
658 }
659 
660 // Adjust frame number minimum value
661 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
662 {
663  // Never return a frame number 0 or below
664  if (frame_number < 1)
665  return 1;
666  else
667  return frame_number;
668 
669 }
670 
671 // Get or generate a blank frame
672 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number, bool enable_time)
673 {
674  try {
675  // Init to requested frame
676  int64_t clip_frame_number = adjust_frame_number_minimum(number);
677 
678  // Adjust for time-mapping (if any)
679  if (enable_time && time.GetLength() > 1) {
680  clip_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
681  }
682 
683  // Debug output
685  "Clip::GetOrCreateFrame (from reader)",
686  "number", number, "clip_frame_number", clip_frame_number);
687 
688  // Attempt to get a frame (but this could fail if a reader has just been closed)
689  auto reader_frame = reader->GetFrame(clip_frame_number);
690  reader_frame->number = number; // Override frame # (due to time-mapping might change it)
691 
692  // Return real frame
693  if (reader_frame) {
694  // Create a new copy of reader frame
695  // This allows a clip to modify the pixels and audio of this frame without
696  // changing the underlying reader's frame data
697  auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
698  if (has_video.GetInt(number) == 0) {
699  // No video, so add transparent pixels
700  reader_copy->AddColor(QColor(Qt::transparent));
701  }
702  if (has_audio.GetInt(number) == 0 || number > reader->info.video_length) {
703  // No audio, so include silence (also, mute audio if past end of reader)
704  reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
705  }
706  return reader_copy;
707  }
708 
709  } catch (const ReaderClosed & e) {
710  // ...
711  } catch (const OutOfBoundsFrame & e) {
712  // ...
713  }
714 
715  // Estimate # of samples needed for this frame
716  int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
717 
718  // Debug output
720  "Clip::GetOrCreateFrame (create blank)",
721  "number", number,
722  "estimated_samples_in_frame", estimated_samples_in_frame);
723 
724  // Create blank frame
725  auto new_frame = std::make_shared<Frame>(
726  number, reader->info.width, reader->info.height,
727  "#000000", estimated_samples_in_frame, reader->info.channels);
728  new_frame->SampleRate(reader->info.sample_rate);
729  new_frame->ChannelsLayout(reader->info.channel_layout);
730  new_frame->AddAudioSilence(estimated_samples_in_frame);
731  return new_frame;
732 }
733 
734 // Generate JSON string of this object
735 std::string Clip::Json() const {
736 
737  // Return formatted string
738  return JsonValue().toStyledString();
739 }
740 
741 // Get all properties for a specific frame
742 std::string Clip::PropertiesJSON(int64_t requested_frame) const {
743 
744  // Generate JSON properties list
745  Json::Value root;
746  root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
747  root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
748  root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
749  root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
750  root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
751  root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
752  root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
753  root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
754  root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
755  root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
756  root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
757  if (!parentObjectId.empty()) {
758  root["parentObjectId"] = add_property_json("Parent", 0.0, "string", parentObjectId, NULL, -1, -1, false, requested_frame);
759  } else {
760  root["parentObjectId"] = add_property_json("Parent", 0.0, "string", "", NULL, -1, -1, false, requested_frame);
761  }
762  // Add gravity choices (dropdown style)
763  root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
764  root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
765  root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
766  root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
767  root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
768  root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
769  root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
770  root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
771  root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
772 
773  // Add scale choices (dropdown style)
774  root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
775  root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
776  root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
777  root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
778 
779  // Add frame number display choices (dropdown style)
780  root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
781  root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
782  root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
783  root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
784 
785  // Add volume mixing choices (dropdown style)
786  root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
787  root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
788  root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
789 
790  // Add waveform choices (dropdown style)
791  root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
792  root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
793 
794  // Add the parentTrackedObject's properties
795  if (parentTrackedObject)
796  {
797  // Convert Clip's frame position to Timeline's frame position
798  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
799  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
800  double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
801 
802  // Get attached object's parent clip properties
803  std::map< std::string, float > trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
804  double parentObject_frame_number = trackedObjectParentClipProperties["frame_number"];
805  // Get attached object properties
806  std::map< std::string, float > trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
807 
808  // Correct the parent Tracked Object properties by the clip's reference system
809  float parentObject_location_x = trackedObjectProperties["cx"] - 0.5 + trackedObjectParentClipProperties["cx"];
810  float parentObject_location_y = trackedObjectProperties["cy"] - 0.5 + trackedObjectParentClipProperties["cy"];
811  float parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
812  float parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
813  float parentObject_rotation = trackedObjectProperties["r"] + trackedObjectParentClipProperties["r"];
814 
815  // Add the parent Tracked Object properties to JSON
816  root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
817  root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
818  root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
819  root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
820  root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
821  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
822  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
823  }
824  // Add the parentClipObject's properties
825  else if (parentClipObject)
826  {
827  // Convert Clip's frame position to Timeline's frame position
828  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
829  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
830  double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
831 
832  // Correct the parent Clip Object properties by the clip's reference system
833  float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
834  float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
835  float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
836  float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
837  float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
838  float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
839  float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
840 
841  // Add the parent Clip Object properties to JSON
842  root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
843  root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
844  root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
845  root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
846  root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
847  root["shear_x"] = add_property_json("Shear X", parentObject_shear_x, "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
848  root["shear_y"] = add_property_json("Shear Y", parentObject_shear_y, "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
849  }
850  else
851  {
852  // Add this own clip's properties to JSON
853  root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
854  root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
855  root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
856  root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
857  root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
858  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
859  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
860  }
861 
862  // Keyframes
863  root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
864  root["origin_x"] = add_property_json("Origin X", origin_x.GetValue(requested_frame), "float", "", &origin_x, 0.0, 1.0, false, requested_frame);
865  root["origin_y"] = add_property_json("Origin Y", origin_y.GetValue(requested_frame), "float", "", &origin_y, 0.0, 1.0, false, requested_frame);
866  root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
867  root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
868  root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
869  root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
870  root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
871  root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
872 
873  // Add enable audio/video choices (dropdown style)
874  root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
875  root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
876  root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
877  root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
878  root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
879  root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
880 
881  root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
882  root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
883  root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
884  root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
885 
886 
887  // Return formatted string
888  return root.toStyledString();
889 }
890 
891 // Generate Json::Value for this object
892 Json::Value Clip::JsonValue() const {
893 
894  // Create root json object
895  Json::Value root = ClipBase::JsonValue(); // get parent properties
896  root["parentObjectId"] = parentObjectId;
897  root["gravity"] = gravity;
898  root["scale"] = scale;
899  root["anchor"] = anchor;
900  root["display"] = display;
901  root["mixing"] = mixing;
902  root["waveform"] = waveform;
903  root["scale_x"] = scale_x.JsonValue();
904  root["scale_y"] = scale_y.JsonValue();
905  root["location_x"] = location_x.JsonValue();
906  root["location_y"] = location_y.JsonValue();
907  root["alpha"] = alpha.JsonValue();
908  root["rotation"] = rotation.JsonValue();
909  root["time"] = time.JsonValue();
910  root["volume"] = volume.JsonValue();
911  root["wave_color"] = wave_color.JsonValue();
912  root["shear_x"] = shear_x.JsonValue();
913  root["shear_y"] = shear_y.JsonValue();
914  root["origin_x"] = origin_x.JsonValue();
915  root["origin_y"] = origin_y.JsonValue();
916  root["channel_filter"] = channel_filter.JsonValue();
917  root["channel_mapping"] = channel_mapping.JsonValue();
918  root["has_audio"] = has_audio.JsonValue();
919  root["has_video"] = has_video.JsonValue();
920  root["perspective_c1_x"] = perspective_c1_x.JsonValue();
921  root["perspective_c1_y"] = perspective_c1_y.JsonValue();
922  root["perspective_c2_x"] = perspective_c2_x.JsonValue();
923  root["perspective_c2_y"] = perspective_c2_y.JsonValue();
924  root["perspective_c3_x"] = perspective_c3_x.JsonValue();
925  root["perspective_c3_y"] = perspective_c3_y.JsonValue();
926  root["perspective_c4_x"] = perspective_c4_x.JsonValue();
927  root["perspective_c4_y"] = perspective_c4_y.JsonValue();
928 
929  // Add array of effects
930  root["effects"] = Json::Value(Json::arrayValue);
931 
932  // loop through effects
933  for (auto existing_effect : effects)
934  {
935  root["effects"].append(existing_effect->JsonValue());
936  }
937 
938  if (reader)
939  root["reader"] = reader->JsonValue();
940  else
941  root["reader"] = Json::Value(Json::objectValue);
942 
943  // return JsonValue
944  return root;
945 }
946 
947 // Load JSON string into this object
948 void Clip::SetJson(const std::string value) {
949 
950  // Parse JSON string into JSON objects
951  try
952  {
953  const Json::Value root = openshot::stringToJson(value);
954  // Set all values that match
955  SetJsonValue(root);
956  }
957  catch (const std::exception& e)
958  {
959  // Error parsing JSON (or missing keys)
960  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
961  }
962 }
963 
964 // Load Json::Value into this object
965 void Clip::SetJsonValue(const Json::Value root) {
966 
967  // Set parent data
969 
970  // Set data from Json (if key is found)
971  if (!root["parentObjectId"].isNull()){
972  parentObjectId = root["parentObjectId"].asString();
973  if (parentObjectId.size() > 0 && parentObjectId != ""){
974  AttachToObject(parentObjectId);
975  } else{
976  parentTrackedObject = nullptr;
977  parentClipObject = NULL;
978  }
979  }
980  if (!root["gravity"].isNull())
981  gravity = (GravityType) root["gravity"].asInt();
982  if (!root["scale"].isNull())
983  scale = (ScaleType) root["scale"].asInt();
984  if (!root["anchor"].isNull())
985  anchor = (AnchorType) root["anchor"].asInt();
986  if (!root["display"].isNull())
987  display = (FrameDisplayType) root["display"].asInt();
988  if (!root["mixing"].isNull())
989  mixing = (VolumeMixType) root["mixing"].asInt();
990  if (!root["waveform"].isNull())
991  waveform = root["waveform"].asBool();
992  if (!root["scale_x"].isNull())
993  scale_x.SetJsonValue(root["scale_x"]);
994  if (!root["scale_y"].isNull())
995  scale_y.SetJsonValue(root["scale_y"]);
996  if (!root["location_x"].isNull())
997  location_x.SetJsonValue(root["location_x"]);
998  if (!root["location_y"].isNull())
999  location_y.SetJsonValue(root["location_y"]);
1000  if (!root["alpha"].isNull())
1001  alpha.SetJsonValue(root["alpha"]);
1002  if (!root["rotation"].isNull())
1003  rotation.SetJsonValue(root["rotation"]);
1004  if (!root["time"].isNull())
1005  time.SetJsonValue(root["time"]);
1006  if (!root["volume"].isNull())
1007  volume.SetJsonValue(root["volume"]);
1008  if (!root["wave_color"].isNull())
1009  wave_color.SetJsonValue(root["wave_color"]);
1010  if (!root["shear_x"].isNull())
1011  shear_x.SetJsonValue(root["shear_x"]);
1012  if (!root["shear_y"].isNull())
1013  shear_y.SetJsonValue(root["shear_y"]);
1014  if (!root["origin_x"].isNull())
1015  origin_x.SetJsonValue(root["origin_x"]);
1016  if (!root["origin_y"].isNull())
1017  origin_y.SetJsonValue(root["origin_y"]);
1018  if (!root["channel_filter"].isNull())
1019  channel_filter.SetJsonValue(root["channel_filter"]);
1020  if (!root["channel_mapping"].isNull())
1021  channel_mapping.SetJsonValue(root["channel_mapping"]);
1022  if (!root["has_audio"].isNull())
1023  has_audio.SetJsonValue(root["has_audio"]);
1024  if (!root["has_video"].isNull())
1025  has_video.SetJsonValue(root["has_video"]);
1026  if (!root["perspective_c1_x"].isNull())
1027  perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
1028  if (!root["perspective_c1_y"].isNull())
1029  perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
1030  if (!root["perspective_c2_x"].isNull())
1031  perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
1032  if (!root["perspective_c2_y"].isNull())
1033  perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
1034  if (!root["perspective_c3_x"].isNull())
1035  perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
1036  if (!root["perspective_c3_y"].isNull())
1037  perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
1038  if (!root["perspective_c4_x"].isNull())
1039  perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
1040  if (!root["perspective_c4_y"].isNull())
1041  perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
1042  if (!root["effects"].isNull()) {
1043 
1044  // Clear existing effects
1045  effects.clear();
1046 
1047  // loop through effects
1048  for (const auto existing_effect : root["effects"]) {
1049  // Create Effect
1050  EffectBase *e = NULL;
1051  if (!existing_effect["type"].isNull()) {
1052 
1053  // Create instance of effect
1054  if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
1055 
1056  // Load Json into Effect
1057  e->SetJsonValue(existing_effect);
1058 
1059  // Add Effect to Timeline
1060  AddEffect(e);
1061  }
1062  }
1063  }
1064  }
1065  if (!root["reader"].isNull()) // does Json contain a reader?
1066  {
1067  if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
1068  {
1069  // Close previous reader (if any)
1070  bool already_open = false;
1071  if (reader)
1072  {
1073  // Track if reader was open
1074  already_open = reader->IsOpen();
1075 
1076  // Close and delete existing allocated reader (if any)
1077  Reader(NULL);
1078  }
1079 
1080  // Create new reader (and load properties)
1081  std::string type = root["reader"]["type"].asString();
1082 
1083  if (type == "FFmpegReader") {
1084 
1085  // Create new reader
1086  reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false);
1087  reader->SetJsonValue(root["reader"]);
1088 
1089  } else if (type == "QtImageReader") {
1090 
1091  // Create new reader
1092  reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false);
1093  reader->SetJsonValue(root["reader"]);
1094 
1095 #ifdef USE_IMAGEMAGICK
1096  } else if (type == "ImageReader") {
1097 
1098  // Create new reader
1099  reader = new ImageReader(root["reader"]["path"].asString(), false);
1100  reader->SetJsonValue(root["reader"]);
1101 
1102  } else if (type == "TextReader") {
1103 
1104  // Create new reader
1105  reader = new TextReader();
1106  reader->SetJsonValue(root["reader"]);
1107 #endif
1108 
1109  } else if (type == "ChunkReader") {
1110 
1111  // Create new reader
1112  reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
1113  reader->SetJsonValue(root["reader"]);
1114 
1115  } else if (type == "DummyReader") {
1116 
1117  // Create new reader
1118  reader = new openshot::DummyReader();
1119  reader->SetJsonValue(root["reader"]);
1120 
1121  } else if (type == "Timeline") {
1122 
1123  // Create new reader (always load from file again)
1124  // This prevents FrameMappers from being loaded on accident
1125  reader = new openshot::Timeline(root["reader"]["path"].asString(), true);
1126  }
1127 
1128  // mark as managed reader and set parent
1129  if (reader) {
1130  reader->ParentClip(this);
1131  allocated_reader = reader;
1132  }
1133 
1134  // Re-Open reader (if needed)
1135  if (already_open) {
1136  reader->Open();
1137  }
1138  }
1139  }
1140 
1141  // Clear cache (it might have changed)
1142  final_cache.Clear();
1143 }
1144 
1145 // Sort effects by order
1146 void Clip::sort_effects()
1147 {
1148  // sort clips
1149  effects.sort(CompareClipEffects());
1150 }
1151 
1152 // Add an effect to the clip
1154 {
1155  // Set parent clip pointer
1156  effect->ParentClip(this);
1157 
1158  // Add effect to list
1159  effects.push_back(effect);
1160 
1161  // Sort effects
1162  sort_effects();
1163 
1164  // Get the parent timeline of this clip
1165  Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
1166 
1167  if (parentTimeline)
1168  effect->ParentTimeline(parentTimeline);
1169 
1170  #ifdef USE_OPENCV
1171  // Add Tracked Object to Timeline
1172  if (effect->info.has_tracked_object){
1173 
1174  // Check if this clip has a parent timeline
1175  if (parentTimeline){
1176 
1177  effect->ParentTimeline(parentTimeline);
1178 
1179  // Iterate through effect's vector of Tracked Objects
1180  for (auto const& trackedObject : effect->trackedObjects){
1181 
1182  // Cast the Tracked Object as TrackedObjectBBox
1183  std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1184 
1185  // Set the Tracked Object's parent clip to this
1186  trackedObjectBBox->ParentClip(this);
1187 
1188  // Add the Tracked Object to the timeline
1189  parentTimeline->AddTrackedObject(trackedObjectBBox);
1190  }
1191  }
1192  }
1193  #endif
1194 
1195  // Clear cache (it might have changed)
1196  final_cache.Clear();
1197 }
1198 
1199 // Remove an effect from the clip
1201 {
1202  effects.remove(effect);
1203 
1204  // Clear cache (it might have changed)
1205  final_cache.Clear();
1206 }
1207 
1208 // Apply effects to the source frame (if any)
1209 void Clip::apply_effects(std::shared_ptr<Frame> frame)
1210 {
1211  // Find Effects at this position and layer
1212  for (auto effect : effects)
1213  {
1214  // Apply the effect to this frame
1215  frame = effect->GetFrame(frame, frame->number);
1216 
1217  } // end effect loop
1218 }
1219 
1220 // Compare 2 floating point numbers for equality
1221 bool Clip::isEqual(double a, double b)
1222 {
1223  return fabs(a - b) < 0.000001;
1224 }
1225 
1226 // Apply keyframes to the source frame (if any)
1227 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<QImage> background_canvas) {
1228  // Skip out if video was disabled or only an audio frame (no visualisation in use)
1229  if (!frame->has_image_data) {
1230  // Skip the rest of the image processing for performance reasons
1231  return;
1232  }
1233 
1234  // Get image from clip
1235  std::shared_ptr<QImage> source_image = frame->GetImage();
1236 
1237  // Get transform from clip's keyframes
1238  QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1239 
1240  // Debug output
1242  "Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)",
1243  "frame->number", frame->number,
1244  "background_canvas->width()", background_canvas->width(),
1245  "background_canvas->height()", background_canvas->height());
1246 
1247  // Load timeline's new frame image into a QPainter
1248  QPainter painter(background_canvas.get());
1249  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
1250 
1251  // Apply transform (translate, rotate, scale)
1252  painter.setTransform(transform);
1253 
1254  // Composite a new layer onto the image
1255  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1256  painter.drawImage(0, 0, *source_image);
1257 
1258  if (timeline) {
1259  Timeline *t = static_cast<Timeline *>(timeline);
1260 
1261  // Draw frame #'s on top of image (if needed)
1262  if (display != FRAME_DISPLAY_NONE) {
1263  std::stringstream frame_number_str;
1264  switch (display) {
1265  case (FRAME_DISPLAY_NONE):
1266  // This is only here to prevent unused-enum warnings
1267  break;
1268 
1269  case (FRAME_DISPLAY_CLIP):
1270  frame_number_str << frame->number;
1271  break;
1272 
1273  case (FRAME_DISPLAY_TIMELINE):
1274  frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number;
1275  break;
1276 
1277  case (FRAME_DISPLAY_BOTH):
1278  frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
1279  break;
1280  }
1281 
1282  // Draw frame number on top of image
1283  painter.setPen(QColor("#ffffff"));
1284  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1285  }
1286  }
1287  painter.end();
1288 
1289  // Add new QImage to frame
1290  frame->AddImage(background_canvas);
1291 }
1292 
1293 // Apply apply_waveform image to the source frame (if any)
1294 void Clip::apply_waveform(std::shared_ptr<Frame> frame, std::shared_ptr<QImage> background_canvas) {
1295 
1296  if (!Waveform()) {
1297  // Exit if no waveform is needed
1298  return;
1299  }
1300 
1301  // Get image from clip
1302  std::shared_ptr<QImage> source_image = frame->GetImage();
1303 
1304  // Debug output
1306  "Clip::apply_waveform (Generate Waveform Image)",
1307  "frame->number", frame->number,
1308  "Waveform()", Waveform(),
1309  "background_canvas->width()", background_canvas->width(),
1310  "background_canvas->height()", background_canvas->height());
1311 
1312  // Get the color of the waveform
1313  int red = wave_color.red.GetInt(frame->number);
1314  int green = wave_color.green.GetInt(frame->number);
1315  int blue = wave_color.blue.GetInt(frame->number);
1316  int alpha = wave_color.alpha.GetInt(frame->number);
1317 
1318  // Generate Waveform Dynamically (the size of the timeline)
1319  source_image = frame->GetWaveform(background_canvas->width(), background_canvas->height(), red, green, blue, alpha);
1320  frame->AddImage(source_image);
1321 }
1322 
1323 // Apply keyframes to the source frame (if any)
1324 QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
1325 {
1326  // Get image from clip
1327  std::shared_ptr<QImage> source_image = frame->GetImage();
1328 
1329  /* ALPHA & OPACITY */
1330  if (alpha.GetValue(frame->number) != 1.0)
1331  {
1332  float alpha_value = alpha.GetValue(frame->number);
1333 
1334  // Get source image's pixels
1335  unsigned char *pixels = source_image->bits();
1336 
1337  // Loop through pixels
1338  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1339  {
1340  // Apply alpha to pixel values (since we use a premultiplied value, we must
1341  // multiply the alpha with all colors).
1342  pixels[byte_index + 0] *= alpha_value;
1343  pixels[byte_index + 1] *= alpha_value;
1344  pixels[byte_index + 2] *= alpha_value;
1345  pixels[byte_index + 3] *= alpha_value;
1346  }
1347 
1348  // Debug output
1350  "Clip::get_transform (Set Alpha & Opacity)",
1351  "alpha_value", alpha_value,
1352  "frame->number", frame->number);
1353  }
1354 
1355  /* RESIZE SOURCE IMAGE - based on scale type */
1356  QSize source_size = source_image->size();
1357 
1358  // Apply stretch scale to correctly fit the bounding-box
1359  if (parentTrackedObject){
1360  scale = SCALE_STRETCH;
1361  }
1362 
1363  switch (scale)
1364  {
1365  case (SCALE_FIT): {
1366  source_size.scale(width, height, Qt::KeepAspectRatio);
1367 
1368  // Debug output
1370  "Clip::get_transform (Scale: SCALE_FIT)",
1371  "frame->number", frame->number,
1372  "source_width", source_size.width(),
1373  "source_height", source_size.height());
1374  break;
1375  }
1376  case (SCALE_STRETCH): {
1377  source_size.scale(width, height, Qt::IgnoreAspectRatio);
1378 
1379  // Debug output
1381  "Clip::get_transform (Scale: SCALE_STRETCH)",
1382  "frame->number", frame->number,
1383  "source_width", source_size.width(),
1384  "source_height", source_size.height());
1385  break;
1386  }
1387  case (SCALE_CROP): {
1388  source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
1389 
1390  // Debug output
1392  "Clip::get_transform (Scale: SCALE_CROP)",
1393  "frame->number", frame->number,
1394  "source_width", source_size.width(),
1395  "source_height", source_size.height());
1396  break;
1397  }
1398  case (SCALE_NONE): {
1399  // Image is already the original size (i.e. no scaling mode) relative
1400  // to the preview window size (i.e. timeline / preview ratio). No further
1401  // scaling is needed here.
1402  // Debug output
1404  "Clip::get_transform (Scale: SCALE_NONE)",
1405  "frame->number", frame->number,
1406  "source_width", source_size.width(),
1407  "source_height", source_size.height());
1408  break;
1409  }
1410  }
1411 
1412  // Initialize parent object's properties (Clip or Tracked Object)
1413  float parentObject_location_x = 0.0;
1414  float parentObject_location_y = 0.0;
1415  float parentObject_scale_x = 1.0;
1416  float parentObject_scale_y = 1.0;
1417  float parentObject_shear_x = 0.0;
1418  float parentObject_shear_y = 0.0;
1419  float parentObject_rotation = 0.0;
1420 
1421  // Get the parentClipObject properties
1422  if (parentClipObject){
1423 
1424  // Convert Clip's frame position to Timeline's frame position
1425  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
1426  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
1427  double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1428 
1429  // Get parent object's properties (Clip)
1430  parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
1431  parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
1432  parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
1433  parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
1434  parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
1435  parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
1436  parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
1437  }
1438 
1439  // Get the parentTrackedObject properties
1440  if (parentTrackedObject){
1441 
1442  // Convert Clip's frame position to Timeline's frame position
1443  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
1444  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
1445  double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1446 
1447  // Get parentTrackedObject's parent clip's properties
1448  std::map<std::string, float> trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
1449 
1450  // Get the attached object's parent clip's properties
1451  if (!trackedObjectParentClipProperties.empty())
1452  {
1453  // Get parent object's properties (Tracked Object)
1454  float parentObject_frame_number = trackedObjectParentClipProperties["frame_number"];
1455 
1456  // Access the parentTrackedObject's properties
1457  std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
1458 
1459  // Get the Tracked Object's properties and correct them by the clip's reference system
1460  parentObject_location_x = trackedObjectProperties["cx"] - 0.5 + trackedObjectParentClipProperties["location_x"];
1461  parentObject_location_y = trackedObjectProperties["cy"] - 0.5 + trackedObjectParentClipProperties["location_y"];
1462  parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
1463  parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
1464  parentObject_rotation = trackedObjectProperties["r"] + trackedObjectParentClipProperties["rotation"];
1465  }
1466  else
1467  {
1468  // Access the parentTrackedObject's properties
1469  std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(timeline_frame_number);
1470 
1471  // Get the Tracked Object's properties and correct them by the clip's reference system
1472  parentObject_location_x = trackedObjectProperties["cx"] - 0.5;
1473  parentObject_location_y = trackedObjectProperties["cy"] - 0.5;
1474  parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
1475  parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
1476  parentObject_rotation = trackedObjectProperties["r"];
1477  }
1478  }
1479 
1480  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
1481  float x = 0.0; // left
1482  float y = 0.0; // top
1483 
1484  // Adjust size for scale x and scale y
1485  float sx = scale_x.GetValue(frame->number); // percentage X scale
1486  float sy = scale_y.GetValue(frame->number); // percentage Y scale
1487 
1488  // Change clip's scale to parentObject's scale
1489  if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1490  sx*= parentObject_scale_x;
1491  sy*= parentObject_scale_y;
1492  }
1493 
1494  float scaled_source_width = source_size.width() * sx;
1495  float scaled_source_height = source_size.height() * sy;
1496 
1497  switch (gravity)
1498  {
1499  case (GRAVITY_TOP_LEFT):
1500  // This is only here to prevent unused-enum warnings
1501  break;
1502  case (GRAVITY_TOP):
1503  x = (width - scaled_source_width) / 2.0; // center
1504  break;
1505  case (GRAVITY_TOP_RIGHT):
1506  x = width - scaled_source_width; // right
1507  break;
1508  case (GRAVITY_LEFT):
1509  y = (height - scaled_source_height) / 2.0; // center
1510  break;
1511  case (GRAVITY_CENTER):
1512  x = (width - scaled_source_width) / 2.0; // center
1513  y = (height - scaled_source_height) / 2.0; // center
1514  break;
1515  case (GRAVITY_RIGHT):
1516  x = width - scaled_source_width; // right
1517  y = (height - scaled_source_height) / 2.0; // center
1518  break;
1519  case (GRAVITY_BOTTOM_LEFT):
1520  y = (height - scaled_source_height); // bottom
1521  break;
1522  case (GRAVITY_BOTTOM):
1523  x = (width - scaled_source_width) / 2.0; // center
1524  y = (height - scaled_source_height); // bottom
1525  break;
1526  case (GRAVITY_BOTTOM_RIGHT):
1527  x = width - scaled_source_width; // right
1528  y = (height - scaled_source_height); // bottom
1529  break;
1530  }
1531 
1532  // Debug output
1534  "Clip::get_transform (Gravity)",
1535  "frame->number", frame->number,
1536  "source_clip->gravity", gravity,
1537  "scaled_source_width", scaled_source_width,
1538  "scaled_source_height", scaled_source_height);
1539 
1540  QTransform transform;
1541 
1542  /* LOCATION, ROTATION, AND SCALE */
1543  float r = rotation.GetValue(frame->number) + parentObject_rotation; // rotate in degrees
1544  x += (width * (location_x.GetValue(frame->number) + parentObject_location_x )); // move in percentage of final width
1545  y += (height * (location_y.GetValue(frame->number) + parentObject_location_y )); // move in percentage of final height
1546  float shear_x_value = shear_x.GetValue(frame->number) + parentObject_shear_x;
1547  float shear_y_value = shear_y.GetValue(frame->number) + parentObject_shear_y;
1548  float origin_x_value = origin_x.GetValue(frame->number);
1549  float origin_y_value = origin_y.GetValue(frame->number);
1550 
1551  // Transform source image (if needed)
1553  "Clip::get_transform (Build QTransform - if needed)",
1554  "frame->number", frame->number,
1555  "x", x, "y", y,
1556  "r", r,
1557  "sx", sx, "sy", sy);
1558 
1559  if (!isEqual(x, 0) || !isEqual(y, 0)) {
1560  // TRANSLATE/MOVE CLIP
1561  transform.translate(x, y);
1562  }
1563  if (!isEqual(r, 0) || !isEqual(shear_x_value, 0) || !isEqual(shear_y_value, 0)) {
1564  // ROTATE CLIP (around origin_x, origin_y)
1565  float origin_x_offset = (scaled_source_width * origin_x_value);
1566  float origin_y_offset = (scaled_source_height * origin_y_value);
1567  transform.translate(origin_x_offset, origin_y_offset);
1568  transform.rotate(r);
1569  transform.shear(shear_x_value, shear_y_value);
1570  transform.translate(-origin_x_offset,-origin_y_offset);
1571  }
1572  // SCALE CLIP (if needed)
1573  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1574  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1575  if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
1576  transform.scale(source_width_scale, source_height_scale);
1577  }
1578 
1579  return transform;
1580 }
1581 
1582 // Adjust frame number for Clip position and start (which can result in a different number)
1583 int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1584 
1585  // Get clip position from parent clip (if any)
1586  float position = 0.0;
1587  float start = 0.0;
1588  Clip *parent = static_cast<Clip *>(ParentClip());
1589  if (parent) {
1590  position = parent->Position();
1591  start = parent->Start();
1592  }
1593 
1594  // Adjust start frame and position based on parent clip.
1595  // This ensures the same frame # is used by mapped readers and clips,
1596  // when calculating samples per frame.
1597  // Thus, this prevents gaps and mismatches in # of samples.
1598  int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1;
1599  int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1;
1600  int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1601 
1602  return frame_number;
1603 }
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
Definition: KeyFrame.cpp:392
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Color.cpp:117
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition: ChunkReader.h:78
Display the timeline&#39;s frame number.
Definition: Enums.h:55
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition: Clip.cpp:965
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:64
This class represents a timeline (used for building generic timeline implementations) ...
Definition: TimelineBase.h:40
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:52
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition: Clip.h:328
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:28
bool is_top_clip
Is clip on top (if overlapping another clip)
Definition: TimelineBase.h:34
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video) ...
Definition: Clip.h:314
std::string Id() const
Get the Id of this clip object.
Definition: ClipBase.h:85
Header file for DummyReader class.
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:318
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
Definition: KeyFrame.cpp:287
Align clip to the bottom right of its parent.
Definition: Enums.h:31
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes) ...
Definition: Clip.h:336
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition: Clip.h:325
float Start() const
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:88
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: EffectBase.cpp:112
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:46
ClipBase * ParentClip() const
Get and set the parentClip of this object.
Do not scale the clip.
Definition: Enums.h:40
This class is used as a simple, dummy reader, which can be very useful when writing unit tests...
Definition: DummyReader.h:85
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:35
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object&#39;s by their indices (used by Effects that track objects on clips) ...
Definition: EffectBase.h:66
std::string id
ID Property for all derived Clip and Effect classes.
Definition: ClipBase.h:35
float duration
Length of time (in seconds)
Definition: ReaderBase.h:43
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
Definition: Clip.cpp:470
virtual float End() const
Get end position (in seconds) of clip (trim end of video)
Definition: ClipBase.h:89
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:303
void Close() override
Close the internal reader.
Definition: Clip.cpp:339
Header file for FFmpegReader class.
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:40
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition: Enums.h:60
This struct contains info about the current Timeline clip instance.
Definition: TimelineBase.h:32
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:30
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:37
float position
The position on the timeline where this clip should start playing.
Definition: ClipBase.h:36
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%...
Definition: Enums.h:63
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition: Timeline.cpp:408
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition: Clip.h:167
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition: Enums.h:35
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
Definition: ClipBase.h:41
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
Definition: Clip.cpp:1153
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:300
virtual void Close()=0
Close the reader (and any resources it was consuming)
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:16
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:75
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:170
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:315
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:301
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:363
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
Definition: KeyFrame.cpp:292
virtual ~Clip()
Destructor.
Definition: Clip.cpp:221
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:308
Do not display the frame number.
Definition: Enums.h:53
void Open() override
Open the internal reader.
Definition: Clip.cpp:318
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve...
Definition: Clip.cpp:354
Align clip to the top right of its parent.
Definition: Enums.h:25
Align clip to the bottom left of its parent.
Definition: Enums.h:29
Header file for Timeline class.
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:282
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
Definition: ZmqLogger.cpp:178
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:32
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ClipBase.cpp:64
std::string Json() const override
Generate JSON string of this object.
Definition: Clip.cpp:735
Do not apply any volume mixing adjustments. Just add the samples together.
Definition: Enums.h:62
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ClipBase.cpp:80
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
std::string PropertiesJSON(int64_t requested_frame) const override
Definition: Clip.cpp:742
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:480
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition: ImageReader.h:55
Header file for all Exception classes.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ReaderBase.cpp:107
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
Definition: FFmpegReader.h:101
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:91
bool Waveform()
Get the waveform property of this clip.
Definition: Clip.h:296
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
Definition: ClipBase.h:91
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:53
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
Definition: Clip.h:331
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:31
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
Definition: Clip.h:278
int height
The height of the video (in pixels)
Definition: ReaderBase.h:45
Header file for ChunkReader class.
Align clip to the bottom center of its parent.
Definition: Enums.h:30
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
Definition: EffectBase.cpp:173
Align clip to the top left of its parent.
Definition: Enums.h:23
Header file for AudioResampler class.
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:304
FrameDisplayType
This enumeration determines the display format of the clip&#39;s frame number (if any). Useful for debugging.
Definition: Enums.h:51
Header file for TextReader class.
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:307
Header file for Clip class.
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition: Clip.h:321
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
Definition: ClipBase.cpp:132
Header file for the FrameMapper class.
Header file for ZeroMQ-based Logger class.
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes) ...
Definition: Clip.h:335
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
Definition: Clip.cpp:389
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition: ReaderBase.h:65
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:62
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition: ClipBase.h:38
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:26
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
Definition: CacheMemory.cpp:80
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition: Clip.h:327
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition: Clip.h:322
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
Definition: Clip.h:311
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low...
Definition: ChunkReader.h:49
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
Definition: Clip.h:310
bool has_tracked_object
Determines if this effect track objects through the clip.
Definition: EffectBase.h:42
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:332
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition: Clip.h:323
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition: Clip.h:326
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:39
Display the clip&#39;s internal frame number.
Definition: Enums.h:54
void init_settings()
Init default settings for a clip.
Definition: Clip.cpp:36
void init_reader_settings()
Init reader info details.
Definition: Clip.cpp:101
void Clear()
Clear the cache of all frames.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ReaderBase.cpp:162
Clip()
Default Constructor.
Definition: Clip.cpp:135
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition: Enums.h:44
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
Definition: ReaderBase.cpp:245
openshot::ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:88
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition: ClipBase.h:39
Exception for frames that are out of bounds.
Definition: Exceptions.h:300
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:201
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:35
This class represents a color (used on the timeline and clips)
Definition: Color.h:27
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%) ...
Definition: Enums.h:64
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it&#39;s id.
Definition: Timeline.cpp:241
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
Definition: Clip.cpp:266
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:27
float Duration() const
Get the length of this clip (in seconds)
Definition: ClipBase.h:90
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: KeyFrame.cpp:372
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Definition: Timeline.cpp:223
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
Definition: Clip.cpp:260
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition: Clip.cpp:892
Display both the clip&#39;s and timeline&#39;s frame number.
Definition: Enums.h:56
void SetJson(const std::string value) override
Load JSON string into this object.
Definition: Clip.cpp:948
This namespace is the default namespace for all code in the openshot library.
Definition: Compressor.h:28
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: KeyFrame.cpp:339
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition: Clip.h:94
Exception for invalid JSON.
Definition: Exceptions.h:217
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition: KeyFrame.cpp:417
double GetValue(int64_t index) const
Get the value at a specific index.
Definition: KeyFrame.cpp:258
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
Definition: Clip.cpp:238
This struct holds the associated video frame and starting sample # for an audio packet.
Definition: AudioLocation.h:25
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition: Clip.h:168
Header file for QtImageReader class.
virtual std::string Name()=0
Return the type name of the class.
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer)
Apply global/timeline effects to the source frame (if any)
Definition: Timeline.cpp:526
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:30
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
Definition: Clip.cpp:1200
Header file for ImageReader class.
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
Definition: CacheMemory.cpp:46
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:309
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:302
void init_reader_rotation()
Update default rotation from reader.
Definition: Clip.cpp:115
float Position() const
Get position on timeline (in seconds)
Definition: ClipBase.h:86
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
std::string previous_properties
This string contains the previous JSON properties.
Definition: ClipBase.h:40
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition: TextReader.h:62
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
Definition: QtImageReader.h:74
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:28
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:33
Align clip to the top center of its parent.
Definition: Enums.h:24
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:61
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:53
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:38
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
Definition: ClipBase.cpp:96
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:171
int64_t GetLength() const
Definition: KeyFrame.cpp:410
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition: Clip.h:169
AudioLocation previous_location
Previous time-mapped audio location.
Definition: Clip.h:97
This class contains the properties of a tracked object and functions to manipulate it...
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:48
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition: Enums.h:21
Anchor the clip to the canvas.
Definition: Enums.h:46
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:87
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:60
openshot::ReaderBase * Reader()
Get the current reader.
Definition: Clip.cpp:308
EffectInfoStruct info
Information about the current effect.
Definition: EffectBase.h:69
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: Color.cpp:86
virtual bool IsOpen()=0
Determine if reader is open or closed.
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition: Clip.h:324
This class is used to resample audio data for many sequential frames.
This class represents a timeline.
Definition: Timeline.h:150