You've already forked libopenshot
mirror of
https://github.com/OpenShot/libopenshot.git
synced 2026-03-02 08:53:52 -08:00
Another huge refactor and lots of big fixes! Time mapping now works... mostly. The jumpy / glitchy images have been solved by copying the AVFrame into an AVPicture, which does not get clobbered on the next decode.
Also, added a new frame number overaly (in the top, left part of the image), and a new AddColor() method, to change the size and color of the frame's image... Padded the end of the FFmpegWriter / WriteTrailer() method, to avoid having a codec ignore the last many frames.
This commit is contained in:
@@ -64,9 +64,6 @@ namespace openshot {
|
||||
/// Display a list of cached frame numbers
|
||||
void Display();
|
||||
|
||||
/// Display the list of cache and clear the cache (mainly for debugging reasons)
|
||||
void DisplayAndClear();
|
||||
|
||||
/// Count the frames in the queue
|
||||
int Count();
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ namespace openshot
|
||||
AVCodecContext *pCodecCtx, *aCodecCtx;
|
||||
AVStream *pStream, *aStream;
|
||||
AVPacket *packet;
|
||||
AVFrame *pFrame;
|
||||
AVPicture *pFrame;
|
||||
bool is_open;
|
||||
|
||||
bool check_interlace;
|
||||
@@ -75,7 +75,7 @@ namespace openshot
|
||||
Cache final_cache;
|
||||
Cache working_cache;
|
||||
map<AVPacket*, AVPacket*> packets;
|
||||
map<AVFrame*, AVFrame*> frames;
|
||||
map<AVPicture*, AVPicture*> frames;
|
||||
map<int, int> processing_video_frames;
|
||||
map<int, int> processing_audio_frames;
|
||||
|
||||
@@ -147,7 +147,7 @@ namespace openshot
|
||||
Frame* ReadStream(int requested_frame);
|
||||
|
||||
/// Remove AVFrame from cache (and deallocate it's memory)
|
||||
void RemoveAVFrame(AVFrame*);
|
||||
void RemoveAVFrame(AVPicture*);
|
||||
|
||||
/// Remove AVPacket from cache (and deallocate it's memory)
|
||||
void RemoveAVPacket(AVPacket*);
|
||||
|
||||
@@ -78,6 +78,7 @@ namespace openshot
|
||||
int audio_input_position;
|
||||
AudioResampler *resampler;
|
||||
|
||||
Frame* last_frame;
|
||||
deque<Frame*> spooled_audio_frames;
|
||||
deque<Frame*> spooled_video_frames;
|
||||
|
||||
|
||||
@@ -69,6 +69,9 @@ namespace openshot
|
||||
/// Assignment operator
|
||||
Frame& operator= (const Frame& other);
|
||||
|
||||
/// Add (or replace) pixel data to the frame (based on a solid color)
|
||||
void AddColor(int width, int height, string color);
|
||||
|
||||
/// Add (or replace) pixel data to the frame
|
||||
void AddImage(int width, int height, const string map, const Magick::StorageType type, const void *pixels_);
|
||||
|
||||
@@ -84,6 +87,9 @@ namespace openshot
|
||||
/// Experimental method to add overlay images to this frame
|
||||
void AddOverlay(Frame* frame);
|
||||
|
||||
/// Experimental method to add the frame number on top of the image
|
||||
void AddOverlayNumber(int overlay_number);
|
||||
|
||||
/// Clear the waveform image (and deallocate it's memory)
|
||||
void ClearWaveform();
|
||||
|
||||
|
||||
@@ -169,27 +169,6 @@ void Cache::Display()
|
||||
}
|
||||
}
|
||||
|
||||
// Display the list of cache and clear the cache (mainly for debugging reasons)
|
||||
void Cache::DisplayAndClear()
|
||||
{
|
||||
cout << "----- Cache List (" << frames.size() << ") ------" << endl;
|
||||
int i = 1;
|
||||
while(!frame_numbers.empty())
|
||||
{
|
||||
// Print the frame number
|
||||
int frame_number = frame_numbers.back();
|
||||
cout << " " << i << ") --- Frame " << frame_number << endl;
|
||||
|
||||
// Remove this frame
|
||||
Remove(frame_number);
|
||||
|
||||
// increment counter
|
||||
i++;
|
||||
}
|
||||
|
||||
// Reset total bytes
|
||||
total_bytes = 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -161,8 +161,6 @@ Frame* Clip::GetFrame(int requested_frame) throw(ReaderClosed)
|
||||
// Get time mapped frame number (used to increase speed, change direction, etc...)
|
||||
frame_number = adjust_frame_number_minimum(get_time_mapped_frame(frame_number));
|
||||
|
||||
cout << "Requested frame: " << frame_number << endl;
|
||||
|
||||
// Now that we have re-mapped what frame number is needed, go and get the frame pointer
|
||||
Frame *frame = file_reader->GetFrame(frame_number);
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ using namespace openshot;
|
||||
|
||||
FFmpegReader::FFmpegReader(string path) throw(InvalidFile, NoStreamsFound, InvalidCodec)
|
||||
: last_frame(0), is_seeking(0), seeking_pts(0), seeking_frame(0), seek_count(0),
|
||||
audio_pts_offset(99999), video_pts_offset(99999), working_cache(0), final_cache(24), path(path),
|
||||
audio_pts_offset(99999), video_pts_offset(99999), working_cache(0), final_cache(8200 * 1024), path(path),
|
||||
is_video_seek(true), check_interlace(false), check_fps(false), enable_seek(true),
|
||||
rescaler_position(0), num_of_rescalers(32), is_open(false) {
|
||||
|
||||
@@ -497,16 +497,22 @@ bool FFmpegReader::GetAVFrame()
|
||||
// is frame finished
|
||||
if (frameFinished)
|
||||
{
|
||||
// AVFrames are clobbered on the each call to avcodec_decode_video, so we
|
||||
// must make a copy of the image data before this method is called again.
|
||||
AVPicture *copyFrame = new AVPicture();
|
||||
avpicture_alloc(copyFrame, pCodecCtx->pix_fmt, info.width, info.height);
|
||||
av_picture_copy(copyFrame, (AVPicture *) next_frame, pCodecCtx->pix_fmt, info.width, info.height);
|
||||
|
||||
// add to AVFrame cache (if frame finished)
|
||||
frames[next_frame] = next_frame;
|
||||
pFrame = frames[next_frame];
|
||||
frames[copyFrame] = copyFrame;
|
||||
pFrame = frames[copyFrame];
|
||||
|
||||
// Detect interlaced frame (only once)
|
||||
if (!check_interlace)
|
||||
{
|
||||
check_interlace = true;
|
||||
info.interlaced_frame = pFrame->interlaced_frame;
|
||||
info.top_field_first = pFrame->top_field_first;
|
||||
info.interlaced_frame = next_frame->interlaced_frame;
|
||||
info.top_field_first = next_frame->top_field_first;
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -545,7 +551,7 @@ bool FFmpegReader::CheckSeek(bool is_video)
|
||||
cout << "Woops! Need to seek backwards further..." << endl;
|
||||
|
||||
// Seek again... to the nearest Keyframe
|
||||
Seek(seeking_frame - 5);
|
||||
Seek(seeking_frame - 10);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -591,9 +597,9 @@ void FFmpegReader::ProcessVideoPacket(int requested_frame)
|
||||
long int video_length = info.video_length;
|
||||
Cache *my_cache = &working_cache;
|
||||
AVPacket *my_packet = packets[packet];
|
||||
AVFrame *my_frame = frames[pFrame];
|
||||
AVPicture *my_frame = frames[pFrame];
|
||||
|
||||
// Get a unique rescaler (for this thread)
|
||||
// Get a scaling context
|
||||
SwsContext *img_convert_ctx = image_rescalers[rescaler_position];
|
||||
rescaler_position++;
|
||||
if (rescaler_position == num_of_rescalers)
|
||||
@@ -913,9 +919,6 @@ void FFmpegReader::Seek(int requested_frame) throw(TooManySeeks)
|
||||
{
|
||||
// VIDEO SEEK
|
||||
is_video_seek = true;
|
||||
|
||||
// Flush video buffer
|
||||
avcodec_flush_buffers(pCodecCtx);
|
||||
}
|
||||
|
||||
// Seek audio stream (if not already seeked... and if an audio stream is found)
|
||||
@@ -926,15 +929,19 @@ void FFmpegReader::Seek(int requested_frame) throw(TooManySeeks)
|
||||
} else
|
||||
{
|
||||
// AUDIO SEEK
|
||||
seek_worked = true;
|
||||
is_video_seek = false;
|
||||
|
||||
// Flush audio buffer
|
||||
avcodec_flush_buffers(aCodecCtx);
|
||||
}
|
||||
|
||||
// Was the seek successful?
|
||||
if (seek_worked)
|
||||
{
|
||||
// Flush audio buffer
|
||||
avcodec_flush_buffers(aCodecCtx);
|
||||
|
||||
// Flush video buffer
|
||||
avcodec_flush_buffers(pCodecCtx);
|
||||
|
||||
// init seek flags
|
||||
is_seeking = true;
|
||||
seeking_pts = seek_target;
|
||||
@@ -1152,7 +1159,7 @@ void FFmpegReader::CheckWorkingFrames(bool end_of_stream)
|
||||
void FFmpegReader::CheckFPS()
|
||||
{
|
||||
check_fps = true;
|
||||
pFrame = avcodec_alloc_frame();
|
||||
avpicture_alloc(pFrame, pCodecCtx->pix_fmt, info.width, info.height);
|
||||
|
||||
int first_second_counter = 0;
|
||||
int second_second_counter = 0;
|
||||
@@ -1274,7 +1281,7 @@ void FFmpegReader::CheckFPS()
|
||||
}
|
||||
|
||||
// Remove AVFrame from cache (and deallocate it's memory)
|
||||
void FFmpegReader::RemoveAVFrame(AVFrame* remove_frame)
|
||||
void FFmpegReader::RemoveAVFrame(AVPicture* remove_frame)
|
||||
{
|
||||
// Remove pFrame (if exists)
|
||||
if (frames.count(remove_frame))
|
||||
|
||||
@@ -31,7 +31,7 @@ FFmpegWriter::FFmpegWriter(string path) throw (InvalidFile, InvalidFormat, Inval
|
||||
path(path), fmt(NULL), oc(NULL), audio_st(NULL), video_st(NULL), audio_pts(0), video_pts(0), samples(NULL),
|
||||
audio_outbuf(NULL), audio_outbuf_size(0), audio_input_frame_size(0), audio_input_position(0),
|
||||
initial_audio_input_frame_size(0), resampler(NULL), img_convert_ctx(NULL), cache_size(8),
|
||||
num_of_rescalers(32), rescaler_position(0), video_codec(NULL), audio_codec(NULL), is_writing(false)
|
||||
num_of_rescalers(32), rescaler_position(0), video_codec(NULL), audio_codec(NULL), is_writing(false), last_frame(NULL)
|
||||
{
|
||||
|
||||
// Init FileInfo struct (clear all values)
|
||||
@@ -311,9 +311,10 @@ void FFmpegWriter::WriteFrame(Frame* frame)
|
||||
// Write frames to video file
|
||||
write_queued_frames();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Keep track of the last frame added
|
||||
last_frame = frame;
|
||||
}
|
||||
|
||||
// Write all frames in the queue to the video file.
|
||||
@@ -434,14 +435,24 @@ void FFmpegWriter::WriteFrame(FileReaderBase* reader, int start, int length)
|
||||
// Write the file trailer (after all frames are written)
|
||||
void FFmpegWriter::WriteTrailer()
|
||||
{
|
||||
// YES, WRITING... so wait until it finishes, before writing again
|
||||
while (is_writing)
|
||||
usleep(250 * 1000); // sleep for 250 milliseconds
|
||||
// Experimental: Repeat last frame many times, to pad
|
||||
// the end of the video, to ensure the codec does not
|
||||
// ignore the final frames.
|
||||
if (last_frame)
|
||||
{
|
||||
// Create black frame
|
||||
Frame *padding_frame = new Frame(999999, last_frame->GetWidth(), last_frame->GetHeight(), "#000000", last_frame->GetAudioSamplesCount(), last_frame->GetAudioChannelsCount());
|
||||
padding_frame->AddColor(last_frame->GetWidth(), last_frame->GetHeight(), "#000000");
|
||||
|
||||
// Add the black frame many times
|
||||
for (int p = 0; p < 25; p++)
|
||||
WriteFrame(padding_frame);
|
||||
}
|
||||
|
||||
// Write any remaining queued frames to video file
|
||||
write_queued_frames();
|
||||
|
||||
/* write the trailer, if any. the trailer must be written
|
||||
/* write the trailer, if any. The trailer must be written
|
||||
* before you close the CodecContexts open when you wrote the
|
||||
* header; otherwise write_trailer may try to use memory that
|
||||
* was freed on av_codec_close() */
|
||||
|
||||
@@ -467,6 +467,20 @@ void Frame::Save(string path, float scale)
|
||||
copy.write(path);
|
||||
}
|
||||
|
||||
// Add (or replace) pixel data to the frame (based on a solid color)
|
||||
void Frame::AddColor(int width, int height, string color)
|
||||
{
|
||||
// Deallocate image memory
|
||||
if (image)
|
||||
{
|
||||
delete image;
|
||||
image = NULL;
|
||||
}
|
||||
|
||||
// Create new image object, and fill with pixel data
|
||||
image = new Magick::Image(Magick::Geometry(width, height), Magick::Color(color));
|
||||
}
|
||||
|
||||
// Add (or replace) pixel data to the frame
|
||||
void Frame::AddImage(int width, int height, const string map, const Magick::StorageType type, const void *pixels)
|
||||
{
|
||||
@@ -531,6 +545,28 @@ void Frame::AddOverlay(Frame* frame)
|
||||
image->composite(*overlay, Magick::SouthEastGravity, Magick::OverCompositeOp);
|
||||
}
|
||||
|
||||
// Experimental method to add the frame number on top of the image
|
||||
void Frame::AddOverlayNumber(int overlay_number)
|
||||
{
|
||||
stringstream label;
|
||||
if (overlay_number > 0)
|
||||
label << overlay_number;
|
||||
else
|
||||
label << number;
|
||||
|
||||
// Drawable text
|
||||
list<Magick::Drawable> lines;
|
||||
|
||||
lines.push_back(Magick::DrawableGravity(Magick::NorthWestGravity));
|
||||
lines.push_back(Magick::DrawableStrokeColor("#ffffff"));
|
||||
lines.push_back(Magick::DrawableFillColor("#ffffff"));
|
||||
lines.push_back(Magick::DrawableStrokeWidth(0.1));
|
||||
lines.push_back(Magick::DrawablePointSize(24));
|
||||
lines.push_back(Magick::DrawableText(5, 5, label.str()));
|
||||
|
||||
image->draw(lines);
|
||||
}
|
||||
|
||||
// Get pointer to Magick++ image object
|
||||
Magick::Image* Frame::GetImage()
|
||||
{
|
||||
|
||||
37
src/Main.cpp
37
src/Main.cpp
@@ -18,10 +18,12 @@ int main()
|
||||
Timeline t(640, 360, Framerate(24,1));
|
||||
|
||||
// Add some clips
|
||||
Clip c1("/home/jonathan/Videos/sintel_trailer-720p.mp4");
|
||||
Clip c1("/home/jonathan/Videos/sintel-1024-stereo.mp4");
|
||||
c1.Position(0.0);
|
||||
c1.time.AddPoint(1, 400);
|
||||
c1.time.AddPoint(200, 200, LINEAR);
|
||||
c1.time.AddPoint(1, 700);
|
||||
c1.time.AddPoint(100, 800, LINEAR);
|
||||
c1.time.AddPoint(124, 700);
|
||||
c1.time.AddPoint(324, 800);
|
||||
|
||||
// Add clips
|
||||
t.AddClip(&c1);
|
||||
@@ -44,9 +46,10 @@ int main()
|
||||
// Output stream info
|
||||
w.OutputStreamInfo();
|
||||
|
||||
for (int frame = 1; frame <= 200; frame++)
|
||||
for (int frame = 1; frame <= 324; frame++)
|
||||
{
|
||||
Frame *f = t.GetFrame(frame);
|
||||
f->AddOverlayNumber(0);
|
||||
|
||||
// Write frame
|
||||
cout << "queue frame " << frame << " (" << f << ")" << endl;
|
||||
@@ -102,18 +105,19 @@ int main()
|
||||
// openshot::FFmpegReader r("../../src/examples/piano.wav");
|
||||
// openshot::FFmpegReader r("/home/jonathan/Videos/big-buck-bunny_trailer.webm");
|
||||
|
||||
// openshot::FFmpegReader r("/home/jonathan/Videos/sintel-1024-stereo.mp4");
|
||||
openshot::FFmpegReader r("/home/jonathan/Videos/sintel-1024-stereo.mp4");
|
||||
// openshot::FFmpegReader r("/home/jonathan/Videos/OpenShot_Now_In_3d.mp4");
|
||||
openshot::FFmpegReader r("/home/jonathan/Videos/sintel_trailer-720p.mp4");
|
||||
// openshot::FFmpegReader r("/home/jonathan/Videos/sintel_trailer-720p.mp4");
|
||||
// openshot::FFmpegReader r("/home/jonathan/Aptana Studio Workspace/OpenShotLibrary/src/examples/piano.wav");
|
||||
// openshot::FFmpegReader r("/home/jonathan/Music/Army of Lovers/Crucified/Army of Lovers - Crucified [Single Version].mp3");
|
||||
// openshot::FFmpegReader r("/home/jonathan/Documents/OpenShot Art/test.jpeg");
|
||||
// openshot::FFmpegReader r("/home/jonathan/Videos/60fps.mp4");
|
||||
// openshot::FFmpegReader r("/home/jonathan/Aptana Studio Workspace/OpenShotLibrary/src/examples/asdf.wdf");
|
||||
|
||||
// Display debug info
|
||||
r.DisplayInfo();
|
||||
|
||||
// // Display debug info
|
||||
// r.Open();
|
||||
// r.DisplayInfo();
|
||||
//
|
||||
// // Create a writer
|
||||
// FFmpegWriter w("/home/jonathan/output.webm");
|
||||
// w.DisplayInfo();
|
||||
@@ -149,16 +153,12 @@ int main()
|
||||
//
|
||||
// //Frame *f = r.GetFrame(1);
|
||||
//
|
||||
// for (int frame = 1; frame <= 1000; frame++)
|
||||
// //for (int frame = 800; frame >= 600; frame--)
|
||||
// for (int frame = 1; frame <= 200; frame++)
|
||||
// {
|
||||
// Frame *f = r.GetFrame(frame);
|
||||
// //f->AddOverlay(overlay);
|
||||
//
|
||||
// //if (f->number == 307 || f->number == 308 || f->number == 309 || f->number == 310)
|
||||
// //f->DisplayWaveform();
|
||||
//
|
||||
// // Apply effect
|
||||
// //f->AddEffect("flip");
|
||||
// f->AddOverlayNumber(0);
|
||||
// //f->Display();
|
||||
//
|
||||
// // Write frame
|
||||
// cout << "queue frame " << frame << endl;
|
||||
@@ -170,8 +170,7 @@ int main()
|
||||
//
|
||||
// // Close writer & reader
|
||||
// w.Close();
|
||||
r.Close();
|
||||
//i.Close();
|
||||
// r.Close();
|
||||
|
||||
|
||||
cout << "Successfully executed Main.cpp!" << endl;
|
||||
|
||||
@@ -45,8 +45,6 @@ void Timeline::update_open_clips(Clip *clip, bool is_open)
|
||||
|
||||
// Close the clip's reader
|
||||
clip->Close();
|
||||
|
||||
cout << "-- Remove clip " << clip << " from opened clips map" << endl;
|
||||
}
|
||||
else if (!clip_found && is_open)
|
||||
{
|
||||
@@ -55,8 +53,6 @@ void Timeline::update_open_clips(Clip *clip, bool is_open)
|
||||
|
||||
// Open the clip's reader
|
||||
clip->Open();
|
||||
|
||||
cout << "-- Add clip " << clip << " to opened clips map" << endl;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,8 +94,6 @@ Frame* Timeline::GetFrame(int requested_frame) throw(ReaderClosed)
|
||||
// Calculate time of frame
|
||||
float requested_time = calculate_time(requested_frame, fps);
|
||||
|
||||
//cout << "requested_frame: " << requested_frame << ", requested_time: " << requested_time << endl;
|
||||
|
||||
// Find Clips at this time
|
||||
list<Clip*>::iterator clip_itr;
|
||||
for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
|
||||
|
||||
@@ -27,7 +27,7 @@ TEST(Cache_Max_Bytes_Constructor)
|
||||
Cache c(250 * 1024);
|
||||
|
||||
// Loop 20 times
|
||||
for (int i = 20; i > 0; i--)
|
||||
for (int i = 30; i > 0; i--)
|
||||
{
|
||||
// Add blank frame to the cache
|
||||
Frame *f = new Frame(i, 320, 240, "#000000");
|
||||
@@ -38,7 +38,7 @@ TEST(Cache_Max_Bytes_Constructor)
|
||||
CHECK_EQUAL(20, c.Count());
|
||||
|
||||
// Add 10 frames again
|
||||
for (int i = 30; i > 20; i--)
|
||||
for (int i = 10; i > 0; i--)
|
||||
{
|
||||
// Add blank frame to the cache
|
||||
Frame *f = new Frame(i, 320, 240, "#000000");
|
||||
@@ -49,14 +49,13 @@ TEST(Cache_Max_Bytes_Constructor)
|
||||
CHECK_EQUAL(20, c.Count());
|
||||
|
||||
// Check which items the cache kept
|
||||
CHECK_EQUAL(false, c.Exists(1));
|
||||
CHECK_EQUAL(false, c.Exists(5));
|
||||
CHECK_EQUAL(false, c.Exists(9));
|
||||
CHECK_EQUAL(true, c.Exists(1));
|
||||
CHECK_EQUAL(true, c.Exists(10));
|
||||
CHECK_EQUAL(true, c.Exists(11));
|
||||
CHECK_EQUAL(true, c.Exists(15));
|
||||
CHECK_EQUAL(true, c.Exists(19));
|
||||
CHECK_EQUAL(true, c.Exists(20));
|
||||
CHECK_EQUAL(false, c.Exists(21));
|
||||
CHECK_EQUAL(false, c.Exists(30));
|
||||
}
|
||||
|
||||
TEST(Cache_Clear)
|
||||
|
||||
Reference in New Issue
Block a user