25 #ifdef USE_IMAGEMAGICK 68 wave_color =
Color((
unsigned char)0, (
unsigned char)123, (
unsigned char)255, (
unsigned char)255);
93 parentTrackedObject =
nullptr;
94 parentClipObject = NULL;
121 if (reader && reader->
info.
metadata.count(
"rotate") > 0) {
125 float rotate_metadata = strtof(reader->
info.
metadata[
"rotate"].c_str(), 0);
127 }
catch (
const std::exception& e) {}
135 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
142 Clip::Clip(
ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
161 Clip::Clip(std::string
path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
167 std::string ext = get_file_extension(path);
168 std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
171 if (ext==
"avi" || ext==
"mov" || ext==
"mkv" || ext==
"mpg" || ext==
"mpeg" || ext==
"mp3" || ext==
"mp4" || ext==
"mts" ||
172 ext==
"ogg" || ext==
"wav" || ext==
"wmv" || ext==
"webm" || ext==
"vob" || path.find(
"%") != std::string::npos)
214 allocated_reader = reader;
224 if (allocated_reader) {
225 delete allocated_reader;
226 allocated_reader = NULL;
243 if (parentTimeline) {
245 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->
GetTrackedObject(object_id);
246 Clip* clipObject = parentTimeline->
GetClip(object_id);
252 else if (clipObject) {
261 parentTrackedObject = trackedObject;
267 parentClipObject = clipObject;
276 bool is_same_reader =
false;
277 if (new_reader && allocated_reader) {
278 if (new_reader->
Name() ==
"FrameMapper") {
281 if (allocated_reader == clip_mapped_reader->
Reader()) {
282 is_same_reader =
true;
287 if (allocated_reader && !is_same_reader) {
289 allocated_reader->
Close();
290 delete allocated_reader;
292 allocated_reader = NULL;
314 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
335 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
350 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
366 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
392 return GetFrame(NULL, clip_frame_number, NULL);
397 std::shared_ptr<Frame>
Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
400 return GetFrame(background_frame, clip_frame_number, NULL);
408 throw ReaderClosed(
"The Clip is closed. Call Open() before calling this method.");
413 std::shared_ptr<Frame> frame = NULL;
416 frame = final_cache.
GetFrame(clip_frame_number);
420 "Clip::GetFrame (Cached frame found)",
421 "requested_frame", clip_frame_number);
428 frame = GetOrCreateFrame(clip_frame_number);
430 if (!background_frame) {
432 background_frame = std::make_shared<Frame>(clip_frame_number, frame->GetWidth(), frame->GetHeight(),
433 "#00000000", frame->GetAudioSamplesCount(),
434 frame->GetAudioChannelsCount());
438 apply_timemapping(frame);
441 apply_waveform(frame, background_frame->GetImage());
444 apply_effects(frame);
447 if (
timeline != NULL && options != NULL) {
456 apply_keyframes(frame, background_frame->GetImage());
459 final_cache.
Add(frame);
466 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
473 for (
const auto& effect : effects) {
474 if (effect->Id() ==
id) {
482 std::string Clip::get_file_extension(std::string
path)
485 return path.substr(path.find_last_of(
".") + 1);
491 int number_of_samples = buffer->getNumSamples();
492 int channels = buffer->getNumChannels();
498 for (
int channel = 0; channel < channels; channel++)
501 for (
int s = number_of_samples - 1; s >= 0; s--, n++)
502 reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
508 for (
int channel = 0; channel < channels; channel++)
510 buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
516 void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
521 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
526 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
528 int64_t clip_frame_number = frame->number;
529 int64_t new_frame_number = adjust_frame_number_minimum(
time.
GetLong(clip_frame_number));
545 int source_sample_count = round(target_sample_count * fabs(delta));
551 location.
frame = new_frame_number;
565 init_samples.clear();
566 resampler->
SetBuffer(&init_samples, 1.0);
574 if (source_sample_count <= 0) {
576 frame->AddAudioSilence(target_sample_count);
582 source_samples->clear();
585 int remaining_samples = source_sample_count;
587 while (remaining_samples > 0) {
588 std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.
frame,
false);
589 int frame_sample_count = source_frame->GetAudioSamplesCount() - location.
sample_start;
591 if (frame_sample_count == 0) {
601 if (remaining_samples - frame_sample_count >= 0) {
603 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
604 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, frame_sample_count, 1.0f);
612 remaining_samples -= frame_sample_count;
613 source_pos += frame_sample_count;
617 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
618 source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.
sample_start, remaining_samples, 1.0f);
621 remaining_samples = 0;
622 source_pos += remaining_samples;
629 frame->AddAudioSilence(target_sample_count);
631 if (source_sample_count != target_sample_count) {
633 double resample_ratio = double(source_sample_count) / double(target_sample_count);
634 resampler->
SetBuffer(source_samples, resample_ratio);
642 frame->AddAudio(
true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
648 frame->AddAudio(
true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
653 delete source_samples;
661 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
664 if (frame_number < 1)
672 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number,
bool enable_time)
676 int64_t clip_frame_number = adjust_frame_number_minimum(number);
680 clip_frame_number = adjust_frame_number_minimum(
time.
GetLong(clip_frame_number));
685 "Clip::GetOrCreateFrame (from reader)",
686 "number", number,
"clip_frame_number", clip_frame_number);
689 auto reader_frame = reader->
GetFrame(clip_frame_number);
690 reader_frame->number = number;
697 auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
700 reader_copy->AddColor(QColor(Qt::transparent));
704 reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
720 "Clip::GetOrCreateFrame (create blank)",
722 "estimated_samples_in_frame", estimated_samples_in_frame);
725 auto new_frame = std::make_shared<Frame>(
727 "#000000", estimated_samples_in_frame, reader->
info.
channels);
730 new_frame->AddAudioSilence(estimated_samples_in_frame);
746 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(), NULL, -1, -1,
true, requested_frame);
747 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
749 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
750 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
751 root[
"duration"] =
add_property_json(
"Duration",
Duration(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
true, requested_frame);
756 root[
"waveform"] =
add_property_json(
"Waveform", waveform,
"int",
"", NULL, 0, 1,
false, requested_frame);
757 if (!parentObjectId.empty()) {
758 root[
"parentObjectId"] =
add_property_json(
"Parent", 0.0,
"string", parentObjectId, NULL, -1, -1,
false, requested_frame);
760 root[
"parentObjectId"] =
add_property_json(
"Parent", 0.0,
"string",
"", NULL, -1, -1,
false, requested_frame);
795 if (parentTrackedObject)
800 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
803 std::map< std::string, float > trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
804 double parentObject_frame_number = trackedObjectParentClipProperties[
"frame_number"];
806 std::map< std::string, float > trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
809 float parentObject_location_x = trackedObjectProperties[
"cx"] - 0.5 + trackedObjectParentClipProperties[
"cx"];
810 float parentObject_location_y = trackedObjectProperties[
"cy"] - 0.5 + trackedObjectParentClipProperties[
"cy"];
811 float parentObject_scale_x = trackedObjectProperties[
"w"]*trackedObjectProperties[
"sx"];
812 float parentObject_scale_y = trackedObjectProperties[
"h"]*trackedObjectProperties[
"sy"];
813 float parentObject_rotation = trackedObjectProperties[
"r"] + trackedObjectParentClipProperties[
"r"];
816 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
817 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
818 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
819 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
820 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
825 else if (parentClipObject)
830 double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
833 float parentObject_location_x = parentClipObject->
location_x.
GetValue(timeline_frame_number);
834 float parentObject_location_y = parentClipObject->
location_y.
GetValue(timeline_frame_number);
835 float parentObject_scale_x = parentClipObject->
scale_x.
GetValue(timeline_frame_number);
836 float parentObject_scale_y = parentClipObject->
scale_y.
GetValue(timeline_frame_number);
837 float parentObject_shear_x = parentClipObject->
shear_x.
GetValue(timeline_frame_number);
838 float parentObject_shear_y = parentClipObject->
shear_y.
GetValue(timeline_frame_number);
839 float parentObject_rotation = parentClipObject->
rotation.
GetValue(timeline_frame_number);
842 root[
"location_x"] =
add_property_json(
"Location X", parentObject_location_x,
"float",
"", &
location_x, -1.0, 1.0,
false, requested_frame);
843 root[
"location_y"] =
add_property_json(
"Location Y", parentObject_location_y,
"float",
"", &
location_y, -1.0, 1.0,
false, requested_frame);
844 root[
"scale_x"] =
add_property_json(
"Scale X", parentObject_scale_x,
"float",
"", &
scale_x, 0.0, 1.0,
false, requested_frame);
845 root[
"scale_y"] =
add_property_json(
"Scale Y", parentObject_scale_y,
"float",
"", &
scale_y, 0.0, 1.0,
false, requested_frame);
846 root[
"rotation"] =
add_property_json(
"Rotation", parentObject_rotation,
"float",
"", &
rotation, -360, 360,
false, requested_frame);
847 root[
"shear_x"] =
add_property_json(
"Shear X", parentObject_shear_x,
"float",
"", &
shear_x, -1.0, 1.0,
false, requested_frame);
848 root[
"shear_y"] =
add_property_json(
"Shear Y", parentObject_shear_y,
"float",
"", &
shear_y, -1.0, 1.0,
false, requested_frame);
888 return root.toStyledString();
896 root[
"parentObjectId"] = parentObjectId;
898 root[
"scale"] =
scale;
902 root[
"waveform"] = waveform;
930 root[
"effects"] = Json::Value(Json::arrayValue);
933 for (
auto existing_effect : effects)
935 root[
"effects"].append(existing_effect->JsonValue());
941 root[
"reader"] = Json::Value(Json::objectValue);
957 catch (
const std::exception& e)
960 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
971 if (!root[
"parentObjectId"].isNull()){
972 parentObjectId = root[
"parentObjectId"].asString();
973 if (parentObjectId.size() > 0 && parentObjectId !=
""){
976 parentTrackedObject =
nullptr;
977 parentClipObject = NULL;
980 if (!root[
"gravity"].isNull())
982 if (!root[
"scale"].isNull())
984 if (!root[
"anchor"].isNull())
986 if (!root[
"display"].isNull())
988 if (!root[
"mixing"].isNull())
990 if (!root[
"waveform"].isNull())
991 waveform = root[
"waveform"].asBool();
992 if (!root[
"scale_x"].isNull())
994 if (!root[
"scale_y"].isNull())
996 if (!root[
"location_x"].isNull())
998 if (!root[
"location_y"].isNull())
1000 if (!root[
"alpha"].isNull())
1002 if (!root[
"rotation"].isNull())
1004 if (!root[
"time"].isNull())
1006 if (!root[
"volume"].isNull())
1008 if (!root[
"wave_color"].isNull())
1010 if (!root[
"shear_x"].isNull())
1012 if (!root[
"shear_y"].isNull())
1014 if (!root[
"origin_x"].isNull())
1016 if (!root[
"origin_y"].isNull())
1018 if (!root[
"channel_filter"].isNull())
1020 if (!root[
"channel_mapping"].isNull())
1022 if (!root[
"has_audio"].isNull())
1024 if (!root[
"has_video"].isNull())
1026 if (!root[
"perspective_c1_x"].isNull())
1028 if (!root[
"perspective_c1_y"].isNull())
1030 if (!root[
"perspective_c2_x"].isNull())
1032 if (!root[
"perspective_c2_y"].isNull())
1034 if (!root[
"perspective_c3_x"].isNull())
1036 if (!root[
"perspective_c3_y"].isNull())
1038 if (!root[
"perspective_c4_x"].isNull())
1040 if (!root[
"perspective_c4_y"].isNull())
1042 if (!root[
"effects"].isNull()) {
1048 for (
const auto existing_effect : root[
"effects"]) {
1051 if (!existing_effect[
"type"].isNull()) {
1054 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString()))) {
1065 if (!root[
"reader"].isNull())
1067 if (!root[
"reader"][
"type"].isNull())
1070 bool already_open =
false;
1074 already_open = reader->
IsOpen();
1081 std::string type = root[
"reader"][
"type"].asString();
1083 if (type ==
"FFmpegReader") {
1089 }
else if (type ==
"QtImageReader") {
1095 #ifdef USE_IMAGEMAGICK 1096 }
else if (type ==
"ImageReader") {
1099 reader =
new ImageReader(root[
"reader"][
"path"].asString(),
false);
1102 }
else if (type ==
"TextReader") {
1109 }
else if (type ==
"ChunkReader") {
1115 }
else if (type ==
"DummyReader") {
1121 }
else if (type ==
"Timeline") {
1131 allocated_reader = reader;
1142 final_cache.
Clear();
1146 void Clip::sort_effects()
1159 effects.push_back(effect);
1175 if (parentTimeline){
1183 std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<
TrackedObjectBBox>(trackedObject.second);
1196 final_cache.
Clear();
1202 effects.remove(effect);
1205 final_cache.
Clear();
1209 void Clip::apply_effects(std::shared_ptr<Frame> frame)
1212 for (
auto effect : effects)
1215 frame = effect->GetFrame(frame, frame->number);
1221 bool Clip::isEqual(
double a,
double b)
1223 return fabs(a - b) < 0.000001;
1227 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<QImage> background_canvas) {
1229 if (!frame->has_image_data) {
1235 std::shared_ptr<QImage> source_image = frame->GetImage();
1238 QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1242 "Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)",
1243 "frame->number", frame->number,
1244 "background_canvas->width()", background_canvas->width(),
1245 "background_canvas->height()", background_canvas->height());
1248 QPainter painter(background_canvas.get());
1249 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing,
true);
1252 painter.setTransform(transform);
1255 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1256 painter.drawImage(0, 0, *source_image);
1263 std::stringstream frame_number_str;
1270 frame_number_str << frame->number;
1283 painter.setPen(QColor(
"#ffffff"));
1284 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1290 frame->AddImage(background_canvas);
1294 void Clip::apply_waveform(std::shared_ptr<Frame> frame, std::shared_ptr<QImage> background_canvas) {
1302 std::shared_ptr<QImage> source_image = frame->GetImage();
1306 "Clip::apply_waveform (Generate Waveform Image)",
1307 "frame->number", frame->number,
1309 "background_canvas->width()", background_canvas->width(),
1310 "background_canvas->height()", background_canvas->height());
1319 source_image = frame->GetWaveform(background_canvas->width(), background_canvas->height(), red, green, blue,
alpha);
1320 frame->AddImage(source_image);
1324 QTransform Clip::get_transform(std::shared_ptr<Frame> frame,
int width,
int height)
1327 std::shared_ptr<QImage> source_image = frame->GetImage();
1335 unsigned char *pixels = source_image->bits();
1338 for (
int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1342 pixels[byte_index + 0] *= alpha_value;
1343 pixels[byte_index + 1] *= alpha_value;
1344 pixels[byte_index + 2] *= alpha_value;
1345 pixels[byte_index + 3] *= alpha_value;
1350 "Clip::get_transform (Set Alpha & Opacity)",
1351 "alpha_value", alpha_value,
1352 "frame->number", frame->number);
1356 QSize source_size = source_image->size();
1359 if (parentTrackedObject){
1366 source_size.scale(width, height, Qt::KeepAspectRatio);
1370 "Clip::get_transform (Scale: SCALE_FIT)",
1371 "frame->number", frame->number,
1372 "source_width", source_size.width(),
1373 "source_height", source_size.height());
1377 source_size.scale(width, height, Qt::IgnoreAspectRatio);
1381 "Clip::get_transform (Scale: SCALE_STRETCH)",
1382 "frame->number", frame->number,
1383 "source_width", source_size.width(),
1384 "source_height", source_size.height());
1388 source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
1392 "Clip::get_transform (Scale: SCALE_CROP)",
1393 "frame->number", frame->number,
1394 "source_width", source_size.width(),
1395 "source_height", source_size.height());
1404 "Clip::get_transform (Scale: SCALE_NONE)",
1405 "frame->number", frame->number,
1406 "source_width", source_size.width(),
1407 "source_height", source_size.height());
1413 float parentObject_location_x = 0.0;
1414 float parentObject_location_y = 0.0;
1415 float parentObject_scale_x = 1.0;
1416 float parentObject_scale_y = 1.0;
1417 float parentObject_shear_x = 0.0;
1418 float parentObject_shear_y = 0.0;
1419 float parentObject_rotation = 0.0;
1422 if (parentClipObject){
1427 double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1430 parentObject_location_x = parentClipObject->
location_x.
GetValue(timeline_frame_number);
1431 parentObject_location_y = parentClipObject->
location_y.
GetValue(timeline_frame_number);
1432 parentObject_scale_x = parentClipObject->
scale_x.
GetValue(timeline_frame_number);
1433 parentObject_scale_y = parentClipObject->
scale_y.
GetValue(timeline_frame_number);
1434 parentObject_shear_x = parentClipObject->
shear_x.
GetValue(timeline_frame_number);
1435 parentObject_shear_y = parentClipObject->
shear_y.
GetValue(timeline_frame_number);
1436 parentObject_rotation = parentClipObject->
rotation.
GetValue(timeline_frame_number);
1440 if (parentTrackedObject){
1445 double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1448 std::map<std::string, float> trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
1451 if (!trackedObjectParentClipProperties.empty())
1454 float parentObject_frame_number = trackedObjectParentClipProperties[
"frame_number"];
1457 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
1460 parentObject_location_x = trackedObjectProperties[
"cx"] - 0.5 + trackedObjectParentClipProperties[
"location_x"];
1461 parentObject_location_y = trackedObjectProperties[
"cy"] - 0.5 + trackedObjectParentClipProperties[
"location_y"];
1462 parentObject_scale_x = trackedObjectProperties[
"w"]*trackedObjectProperties[
"sx"];
1463 parentObject_scale_y = trackedObjectProperties[
"h"]*trackedObjectProperties[
"sy"];
1464 parentObject_rotation = trackedObjectProperties[
"r"] + trackedObjectParentClipProperties[
"rotation"];
1469 std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(timeline_frame_number);
1472 parentObject_location_x = trackedObjectProperties[
"cx"] - 0.5;
1473 parentObject_location_y = trackedObjectProperties[
"cy"] - 0.5;
1474 parentObject_scale_x = trackedObjectProperties[
"w"]*trackedObjectProperties[
"sx"];
1475 parentObject_scale_y = trackedObjectProperties[
"h"]*trackedObjectProperties[
"sy"];
1476 parentObject_rotation = trackedObjectProperties[
"r"];
1489 if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1490 sx*= parentObject_scale_x;
1491 sy*= parentObject_scale_y;
1494 float scaled_source_width = source_size.width() * sx;
1495 float scaled_source_height = source_size.height() * sy;
1503 x = (width - scaled_source_width) / 2.0;
1506 x = width - scaled_source_width;
1509 y = (height - scaled_source_height) / 2.0;
1512 x = (width - scaled_source_width) / 2.0;
1513 y = (height - scaled_source_height) / 2.0;
1516 x = width - scaled_source_width;
1517 y = (height - scaled_source_height) / 2.0;
1520 y = (height - scaled_source_height);
1523 x = (width - scaled_source_width) / 2.0;
1524 y = (height - scaled_source_height);
1527 x = width - scaled_source_width;
1528 y = (height - scaled_source_height);
1534 "Clip::get_transform (Gravity)",
1535 "frame->number", frame->number,
1536 "source_clip->gravity",
gravity,
1537 "scaled_source_width", scaled_source_width,
1538 "scaled_source_height", scaled_source_height);
1540 QTransform transform;
1546 float shear_x_value =
shear_x.
GetValue(frame->number) + parentObject_shear_x;
1547 float shear_y_value =
shear_y.
GetValue(frame->number) + parentObject_shear_y;
1553 "Clip::get_transform (Build QTransform - if needed)",
1554 "frame->number", frame->number,
1557 "sx", sx,
"sy", sy);
1559 if (!isEqual(x, 0) || !isEqual(y, 0)) {
1561 transform.translate(x, y);
1563 if (!isEqual(r, 0) || !isEqual(shear_x_value, 0) || !isEqual(shear_y_value, 0)) {
1565 float origin_x_offset = (scaled_source_width * origin_x_value);
1566 float origin_y_offset = (scaled_source_height * origin_y_value);
1567 transform.translate(origin_x_offset, origin_y_offset);
1568 transform.rotate(r);
1569 transform.shear(shear_x_value, shear_y_value);
1570 transform.translate(-origin_x_offset,-origin_y_offset);
1573 float source_width_scale = (float(source_size.width()) /
float(source_image->width())) * sx;
1574 float source_height_scale = (float(source_size.height()) /
float(source_image->height())) * sy;
1575 if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
1576 transform.scale(source_width_scale, source_height_scale);
1583 int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1591 start = parent->
Start();
1599 int64_t clip_start_position = round(position *
info.
fps.
ToDouble()) + 1;
1600 int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1602 return frame_number;
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Display the timeline's frame number.
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
ReaderBase * Reader()
Get the current reader.
This class represents a timeline (used for building generic timeline implementations) ...
This abstract class is the base class, used by all effects in libopenshot.
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Align clip to the right of its parent (middle aligned)
bool is_top_clip
Is clip on top (if overlapping another clip)
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video) ...
std::string Id() const
Get the Id of this clip object.
Header file for DummyReader class.
openshot::Color wave_color
Curve representing the color of the audio wave form.
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
Align clip to the bottom right of its parent.
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes) ...
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
float Start() const
Get start position (in seconds) of clip (trim start of video)
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
int width
The width of the video (in pixesl)
ClipBase * ParentClip() const
Get and set the parentClip of this object.
This class is used as a simple, dummy reader, which can be very useful when writing unit tests...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips) ...
std::string id
ID Property for all derived Clip and Effect classes.
float duration
Length of time (in seconds)
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
virtual float End() const
Get end position (in seconds) of clip (trim end of video)
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
void Close() override
Close the internal reader.
Header file for FFmpegReader class.
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
This struct contains info about the current Timeline clip instance.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
float position
The position on the timeline where this clip should start playing.
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%...
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
virtual void Close()=0
Close the reader (and any resources it was consuming)
const Json::Value stringToJson(const std::string value)
This abstract class is the base class, used by all readers in libopenshot.
openshot::FrameDisplayType display
The format to display the frame number (if any)
openshot::Keyframe volume
Curve representing the volume (0 to 1)
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Exception when a reader is closed, and a frame is requested.
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
virtual ~Clip()
Destructor.
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Do not display the frame number.
void Open() override
Open the internal reader.
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve...
Align clip to the top right of its parent.
Align clip to the bottom left of its parent.
Header file for Timeline class.
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
openshot::Keyframe blue
Curve representing the red value (0 - 255)
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
std::string Json() const override
Generate JSON string of this object.
Do not apply any volume mixing adjustments. Just add the samples together.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
std::string PropertiesJSON(int64_t requested_frame) const override
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Header file for all Exception classes.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
This class represents a clip (used to arrange readers on the timeline)
bool Waveform()
Get the waveform property of this clip.
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
int64_t video_length
The number of frames in the video stream.
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
openshot::Keyframe green
Curve representing the green value (0 - 255)
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
int height
The height of the video (in pixels)
Header file for ChunkReader class.
Align clip to the bottom center of its parent.
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
Align clip to the top left of its parent.
Header file for AudioResampler class.
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any). Useful for debugging.
Header file for TextReader class.
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Header file for Clip class.
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
Header file for the FrameMapper class.
Header file for ZeroMQ-based Logger class.
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes) ...
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Align clip to the left of its parent (middle aligned)
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low...
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
bool has_tracked_object
Determines if this effect track objects through the clip.
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Scale the clip until both height and width fill the canvas (distort to fit)
Display the clip's internal frame number.
void init_settings()
Init default settings for a clip.
void init_reader_settings()
Init reader info details.
void Clear()
Clear the cache of all frames.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Clip()
Default Constructor.
AnchorType
This enumeration determines what parent a clip should be aligned to.
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
openshot::ReaderInfo info
Information about the current media file.
float end
The position in seconds to end playing (used to trim the ending of a clip)
Exception for frames that are out of bounds.
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
This class represents a color (used on the timeline and clips)
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%) ...
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
Align clip to the center of its parent (middle aligned)
float Duration() const
Get the length of this clip (in seconds)
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
Json::Value JsonValue() const override
Generate Json::Value for this object.
Display both the clip's and timeline's frame number.
void SetJson(const std::string value) override
Load JSON string into this object.
This namespace is the default namespace for all code in the openshot library.
Json::Value JsonValue() const
Generate Json::Value for this object.
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Exception for invalid JSON.
int64_t GetCount() const
Get the number of points (i.e. # of points)
double GetValue(int64_t index) const
Get the value at a specific index.
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
This struct holds the associated video frame and starting sample # for an audio packet.
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Header file for QtImageReader class.
virtual std::string Name()=0
Return the type name of the class.
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer)
Apply global/timeline effects to the source frame (if any)
openshot::Keyframe red
Curve representing the red value (0 - 255)
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
Header file for ImageReader class.
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
void init_reader_rotation()
Update default rotation from reader.
float Position() const
Get position on timeline (in seconds)
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
std::string previous_properties
This string contains the previous JSON properties.
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
This class returns a listing of all effects supported by libopenshot.
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Align clip to the top center of its parent.
int channels
The number of audio channels used in the audio stream.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Scale the clip until either height or width fills the canvas (with no cropping)
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
int64_t GetLength() const
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
AudioLocation previous_location
Previous time-mapped audio location.
This class contains the properties of a tracked object and functions to manipulate it...
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
GravityType
This enumeration determines how clips are aligned to their parent container.
Anchor the clip to the canvas.
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
openshot::ReaderBase * Reader()
Get the current reader.
EffectInfoStruct info
Information about the current effect.
Json::Value JsonValue() const
Generate Json::Value for this object.
virtual bool IsOpen()=0
Determine if reader is open or closed.
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
This class is used to resample audio data for many sequential frames.
This class represents a timeline.