diff --git a/include/AudioBufferSource.h b/include/AudioBufferSource.h
index 99c1a1f6..57826f66 100644
--- a/include/AudioBufferSource.h
+++ b/include/AudioBufferSource.h
@@ -79,13 +79,13 @@ namespace openshot
/// @brief Set the next read position of this source
/// @param newPosition The sample # to start reading from
- void setNextReadPosition (long long newPosition);
+ void setNextReadPosition (int64 newPosition);
/// Get the next read position of this source
- long long getNextReadPosition() const;
+ int64 getNextReadPosition() const;
/// Get the total length (in samples) of this audio source
- long long getTotalLength() const;
+ int64 getTotalLength() const;
/// Determines if this audio source should repeat when it reaches the end
bool isLooping() const;
diff --git a/include/AudioReaderSource.h b/include/AudioReaderSource.h
index 4505426a..31b17d80 100644
--- a/include/AudioReaderSource.h
+++ b/include/AudioReaderSource.h
@@ -61,10 +61,10 @@ namespace openshot
int speed; /// The speed and direction to playback a reader (1=normal, 2=fast, 3=faster, -1=rewind, etc...)
ReaderBase *reader; /// The reader to pull samples from
- int64 original_frame_number; /// The current frame to read from
- int64 frame_number; /// The current frame number
+ int64_t original_frame_number; /// The current frame to read from
+ int64_t frame_number; /// The current frame number
std::shared_ptr frame; /// The current frame object that is being read
- long int frame_position; /// The position of the current frame's buffer
+ int64_t frame_position; /// The position of the current frame's buffer
double estimated_frame; /// The estimated frame position of the currently playing buffer
int estimated_samples_per_frame; /// The estimated samples per frame of video
@@ -80,7 +80,7 @@ namespace openshot
/// @param audio_reader This reader provides constant samples from a ReaderBase derived class
/// @param starting_frame_number This is the frame number to start reading samples from the reader.
/// @param buffer_size The max number of samples to keep in the buffer at one time.
- AudioReaderSource(ReaderBase *audio_reader, int64 starting_frame_number, int buffer_size);
+ AudioReaderSource(ReaderBase *audio_reader, int64_t starting_frame_number, int buffer_size);
/// Destructor
~AudioReaderSource();
@@ -97,13 +97,13 @@ namespace openshot
/// @brief Set the next read position of this source
/// @param newPosition The sample # to start reading from
- void setNextReadPosition (long long newPosition);
+ void setNextReadPosition (int64 newPosition);
/// Get the next read position of this source
- long long getNextReadPosition() const;
+ int64 getNextReadPosition() const;
/// Get the total length (in samples) of this audio source
- long long getTotalLength() const;
+ int64 getTotalLength() const;
/// Determines if this audio source should repeat when it reaches the end
bool isLooping() const;
@@ -121,7 +121,7 @@ namespace openshot
std::shared_ptr getFrame() const { return frame; }
/// Get the estimate frame that is playing at this moment
- long int getEstimatedFrame() const { return long(estimated_frame); }
+ int64_t getEstimatedFrame() const { return int64_t(estimated_frame); }
/// Set Speed (The speed and direction to playback a reader (1=normal, 2=fast, 3=faster, -1=rewind, etc...)
void setSpeed(int new_speed) { speed = new_speed; }
@@ -134,7 +134,7 @@ namespace openshot
ReaderBase* Reader() const { return reader; }
/// Seek to a specific frame
- void Seek(int64 new_position) { frame_number = new_position; estimated_frame = new_position; }
+ void Seek(int64_t new_position) { frame_number = new_position; estimated_frame = new_position; }
};
diff --git a/include/CacheBase.h b/include/CacheBase.h
index 41c7d1f9..379c734a 100644
--- a/include/CacheBase.h
+++ b/include/CacheBase.h
@@ -46,7 +46,7 @@ namespace openshot {
{
protected:
string cache_type; ///< This is a friendly type name of the derived cache instance
- long long int max_bytes; ///< This is the max number of bytes to cache (0 = no limit)
+ int64_t max_bytes; ///< This is the max number of bytes to cache (0 = no limit)
/// Section lock for multiple threads
CriticalSection *cacheCriticalSection;
@@ -58,7 +58,7 @@ namespace openshot {
/// @brief Constructor that sets the max bytes to cache
/// @param max_bytes The maximum bytes to allow in the cache. Once exceeded, the cache will purge the oldest frames.
- CacheBase(long long int max_bytes);
+ CacheBase(int64_t max_bytes);
/// @brief Add a Frame to the cache
/// @param frame The openshot::Frame object needing to be cached.
@@ -68,33 +68,33 @@ namespace openshot {
virtual void Clear() = 0;
/// Count the frames in the queue
- virtual long int Count() = 0;
+ virtual int64_t Count() = 0;
/// @brief Get a frame from the cache
/// @param frame_number The frame number of the cached frame
- virtual std::shared_ptr GetFrame(long int frame_number) = 0;
+ virtual std::shared_ptr GetFrame(int64_t frame_number) = 0;
/// Gets the maximum bytes value
- virtual long long int GetBytes() = 0;
+ virtual int64_t GetBytes() = 0;
/// Get the smallest frame number
virtual std::shared_ptr GetSmallestFrame() = 0;
/// @brief Remove a specific frame
/// @param frame_number The frame number of the cached frame
- virtual void Remove(long int frame_number) = 0;
+ virtual void Remove(int64_t frame_number) = 0;
/// @brief Remove a range of frames
/// @param start_frame_number The starting frame number of the cached frame
/// @param end_frame_number The ending frame number of the cached frame
- virtual void Remove(long int start_frame_number, long int end_frame_number) = 0;
+ virtual void Remove(int64_t start_frame_number, int64_t end_frame_number) = 0;
/// Gets the maximum bytes value
- long long int GetMaxBytes() { return max_bytes; };
+ int64_t GetMaxBytes() { return max_bytes; };
/// @brief Set maximum bytes to a different amount
/// @param number_of_bytes The maximum bytes to allow in the cache. Once exceeded, the cache will purge the oldest frames.
- void SetMaxBytes(long long int number_of_bytes) { max_bytes = number_of_bytes; };
+ void SetMaxBytes(int64_t number_of_bytes) { max_bytes = number_of_bytes; };
/// @brief Set maximum bytes to a different amount based on a ReaderInfo struct
/// @param number_of_frames The maximum number of frames to hold in cache
@@ -102,7 +102,7 @@ namespace openshot {
/// @param height The height of the frame's image
/// @param sample_rate The sample rate of the frame's audio data
/// @param channels The number of audio channels in the frame
- void SetMaxBytesFromInfo(long int number_of_frames, int width, int height, int sample_rate, int channels);
+ void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels);
/// Get and Set JSON methods
virtual string Json() = 0; ///< Generate JSON string of this object
diff --git a/include/CacheDisk.h b/include/CacheDisk.h
index 3a13dcf2..905eb787 100644
--- a/include/CacheDisk.h
+++ b/include/CacheDisk.h
@@ -50,18 +50,18 @@ namespace openshot {
class CacheDisk : public CacheBase {
private:
QDir path; ///< This is the folder path of the cache directory
- map frames; ///< This map holds the frame number and Frame objects
- deque frame_numbers; ///< This queue holds a sequential list of cached Frame numbers
+ map frames; ///< This map holds the frame number and Frame objects
+ deque frame_numbers; ///< This queue holds a sequential list of cached Frame numbers
string image_format;
float image_quality;
float image_scale;
- long long int frame_size_bytes; ///< The size of the cached frame in bytes
+ int64_t frame_size_bytes; ///< The size of the cached frame in bytes
bool needs_range_processing; ///< Something has changed, and the range data needs to be re-calculated
string json_ranges; ///< JSON ranges of frame numbers
- vector ordered_frame_numbers; ///< Ordered list of frame numbers used by cache
- map frame_ranges; ///< This map holds the ranges of frames, useful for quickly displaying the contents of the cache
- long int range_version; ///< The version of the JSON range data (incremented with each change)
+ vector ordered_frame_numbers; ///< Ordered list of frame numbers used by cache
+ map frame_ranges; ///< This map holds the ranges of frames, useful for quickly displaying the contents of the cache
+ int64_t range_version; ///< The version of the JSON range data (incremented with each change)
/// Clean up cached frames that exceed the max number of bytes
void CleanUp();
@@ -86,7 +86,7 @@ namespace openshot {
/// @param quality The quality of the image (1.0=highest quality/slowest speed, 0.0=worst quality/fastest speed)
/// @param scale The scale factor for the preview images (1.0 = original size, 0.5=half size, 0.25=quarter size, etc...)
/// @param max_bytes The maximum bytes to allow in the cache. Once exceeded, the cache will purge the oldest frames.
- CacheDisk(string cache_path, string format, float quality, float scale, long long int max_bytes);
+ CacheDisk(string cache_path, string format, float quality, float scale, int64_t max_bytes);
// Default destructor
~CacheDisk();
@@ -99,30 +99,30 @@ namespace openshot {
void Clear();
/// Count the frames in the queue
- long int Count();
+ int64_t Count();
/// @brief Get a frame from the cache
/// @param frame_number The frame number of the cached frame
- std::shared_ptr GetFrame(long int frame_number);
+ std::shared_ptr GetFrame(int64_t frame_number);
/// Gets the maximum bytes value
- long long int GetBytes();
+ int64_t GetBytes();
/// Get the smallest frame number
std::shared_ptr GetSmallestFrame();
/// @brief Move frame to front of queue (so it lasts longer)
/// @param frame_number The frame number of the cached frame
- void MoveToFront(long int frame_number);
+ void MoveToFront(int64_t frame_number);
/// @brief Remove a specific frame
/// @param frame_number The frame number of the cached frame
- void Remove(long int frame_number);
+ void Remove(int64_t frame_number);
/// @brief Remove a range of frames
/// @param start_frame_number The starting frame number of the cached frame
/// @param end_frame_number The ending frame number of the cached frame
- void Remove(long int start_frame_number, long int end_frame_number);
+ void Remove(int64_t start_frame_number, int64_t end_frame_number);
/// Get and Set JSON methods
string Json(); ///< Generate JSON string of this object
diff --git a/include/CacheMemory.h b/include/CacheMemory.h
index 7002c36e..8220add3 100644
--- a/include/CacheMemory.h
+++ b/include/CacheMemory.h
@@ -47,14 +47,14 @@ namespace openshot {
*/
class CacheMemory : public CacheBase {
private:
- map > frames; ///< This map holds the frame number and Frame objects
- deque frame_numbers; ///< This queue holds a sequential list of cached Frame numbers
+ map > frames; ///< This map holds the frame number and Frame objects
+ deque frame_numbers; ///< This queue holds a sequential list of cached Frame numbers
bool needs_range_processing; ///< Something has changed, and the range data needs to be re-calculated
string json_ranges; ///< JSON ranges of frame numbers
- vector ordered_frame_numbers; ///< Ordered list of frame numbers used by cache
- map frame_ranges; ///< This map holds the ranges of frames, useful for quickly displaying the contents of the cache
- long int range_version; ///< The version of the JSON range data (incremented with each change)
+ vector ordered_frame_numbers; ///< Ordered list of frame numbers used by cache
+ map frame_ranges; ///< This map holds the ranges of frames, useful for quickly displaying the contents of the cache
+ int64_t range_version; ///< The version of the JSON range data (incremented with each change)
/// Clean up cached frames that exceed the max number of bytes
void CleanUp();
@@ -68,7 +68,7 @@ namespace openshot {
/// @brief Constructor that sets the max bytes to cache
/// @param max_bytes The maximum bytes to allow in the cache. Once exceeded, the cache will purge the oldest frames.
- CacheMemory(long long int max_bytes);
+ CacheMemory(int64_t max_bytes);
// Default destructor
~CacheMemory();
@@ -81,30 +81,30 @@ namespace openshot {
void Clear();
/// Count the frames in the queue
- long int Count();
+ int64_t Count();
/// @brief Get a frame from the cache
/// @param frame_number The frame number of the cached frame
- std::shared_ptr GetFrame(long int frame_number);
+ std::shared_ptr GetFrame(int64_t frame_number);
/// Gets the maximum bytes value
- long long int GetBytes();
+ int64_t GetBytes();
/// Get the smallest frame number
std::shared_ptr GetSmallestFrame();
/// @brief Move frame to front of queue (so it lasts longer)
/// @param frame_number The frame number of the cached frame
- void MoveToFront(long int frame_number);
+ void MoveToFront(int64_t frame_number);
/// @brief Remove a specific frame
/// @param frame_number The frame number of the cached frame
- void Remove(long int frame_number);
+ void Remove(int64_t frame_number);
/// @brief Remove a range of frames
/// @param start_frame_number The starting frame number of the cached frame
/// @param end_frame_number The ending frame number of the cached frame
- void Remove(long int start_frame_number, long int end_frame_number);
+ void Remove(int64_t start_frame_number, int64_t end_frame_number);
/// Get and Set JSON methods
string Json(); ///< Generate JSON string of this object
diff --git a/include/ChunkReader.h b/include/ChunkReader.h
index 5e87837f..e97c0efa 100644
--- a/include/ChunkReader.h
+++ b/include/ChunkReader.h
@@ -58,8 +58,8 @@ namespace openshot
*/
struct ChunkLocation
{
- int number; ///< The chunk number
- int frame; ///< The frame number
+ int64_t number; ///< The chunk number
+ int64_t frame; ///< The frame number
};
/**
@@ -106,7 +106,7 @@ namespace openshot
private:
string path;
bool is_open;
- int chunk_size;
+ int64_t chunk_size;
FFmpegReader *local_reader;
ChunkLocation previous_location;
ChunkVersion version;
@@ -116,10 +116,10 @@ namespace openshot
bool does_folder_exist(string path);
/// Find the location of a frame in a chunk
- ChunkLocation find_chunk_frame(long int requested_frame);
+ ChunkLocation find_chunk_frame(int64_t requested_frame);
/// get a formatted path of a specific chunk
- string get_chunk_path(int chunk_number, string folder, string extension);
+ string get_chunk_path(int64_t chunk_number, string folder, string extension);
/// Load JSON meta data about this chunk folder
void load_json();
@@ -137,11 +137,11 @@ namespace openshot
/// @brief Get the chunk size (number of frames to write in each chunk)
/// @returns The number of frames in this chunk
- int GetChunkSize() { return chunk_size; };
+ int64_t GetChunkSize() { return chunk_size; };
/// @brief Set the chunk size (number of frames to write in each chunk)
/// @param new_size The number of frames per chunk
- void SetChunkSize(int new_size) { chunk_size = new_size; };
+ void SetChunkSize(int64_t new_size) { chunk_size = new_size; };
/// Get the cache object used by this reader (always return NULL for this reader)
CacheMemory* GetCache() { return NULL; };
@@ -149,7 +149,7 @@ namespace openshot
/// @brief Get an openshot::Frame object for a specific frame number of this reader.
/// @returns The requested frame (containing the image and audio)
/// @param requested_frame The frame number you want to retrieve
- std::shared_ptr GetFrame(long int requested_frame) throw(ReaderClosed, ChunkNotFound);
+ std::shared_ptr GetFrame(int64_t requested_frame) throw(ReaderClosed, ChunkNotFound);
/// Determine if reader is open or closed
bool IsOpen() { return is_open; };
diff --git a/include/ChunkWriter.h b/include/ChunkWriter.h
index 80a52986..6e569f8b 100644
--- a/include/ChunkWriter.h
+++ b/include/ChunkWriter.h
@@ -81,9 +81,9 @@ namespace openshot
{
private:
string path;
- int chunk_count;
- int chunk_size;
- int frame_count;
+ int64_t chunk_count;
+ int64_t chunk_size;
+ int64_t frame_count;
bool is_open;
bool is_writing;
ReaderBase *local_reader;
@@ -100,7 +100,7 @@ namespace openshot
void create_folder(string path);
/// get a formatted path of a specific chunk
- string get_chunk_path(int chunk_number, string folder, string extension);
+ string get_chunk_path(int64_t chunk_number, string folder, string extension);
/// check for valid chunk json
bool is_chunk_valid();
@@ -119,7 +119,7 @@ namespace openshot
void Close();
/// Get the chunk size (number of frames to write in each chunk)
- int GetChunkSize() { return chunk_size; };
+ int64_t GetChunkSize() { return chunk_size; };
/// Determine if writer is open or closed
bool IsOpen() { return is_open; };
@@ -129,7 +129,7 @@ namespace openshot
/// @brief Set the chunk size (number of frames to write in each chunk)
/// @param new_size The number of frames to write in this chunk file
- void SetChunkSize(int new_size) { chunk_size = new_size; };
+ void SetChunkSize(int64_t new_size) { chunk_size = new_size; };
/// @brief Add a frame to the stack waiting to be encoded.
/// @param frame The openshot::Frame object that needs to be written to this chunk file.
@@ -138,13 +138,13 @@ namespace openshot
/// @brief Write a block of frames from a reader
/// @param start The starting frame number to write (of the reader passed into the constructor)
/// @param length The number of frames to write (of the reader passed into the constructor)
- void WriteFrame(int start, int length) throw(WriterClosed);
+ void WriteFrame(int64_t start, int64_t length) throw(WriterClosed);
/// @brief Write a block of frames from a reader
/// @param reader The reader containing the frames you need
/// @param start The starting frame number to write
/// @param length The number of frames to write
- void WriteFrame(ReaderBase* reader, int start, int length) throw(WriterClosed);
+ void WriteFrame(ReaderBase* reader, int64_t start, int64_t length) throw(WriterClosed);
};
diff --git a/include/Clip.h b/include/Clip.h
index 4b35a868..086ab74d 100644
--- a/include/Clip.h
+++ b/include/Clip.h
@@ -124,7 +124,7 @@ namespace openshot {
bool manage_reader;
/// Adjust frame number minimum value
- long int adjust_frame_number_minimum(long int frame_number);
+ int64_t adjust_frame_number_minimum(int64_t frame_number);
/// Apply effects to the source frame (if any)
std::shared_ptr apply_effects(std::shared_ptr frame);
@@ -133,10 +133,10 @@ namespace openshot {
string get_file_extension(string path);
/// Get a frame object or create a blank one
- std::shared_ptr GetOrCreateFrame(long int number);
+ std::shared_ptr GetOrCreateFrame(int64_t number);
/// Adjust the audio and image of a time mapped frame
- std::shared_ptr get_time_mapped_frame(std::shared_ptr frame, long int frame_number) throw(ReaderClosed);
+ std::shared_ptr get_time_mapped_frame(std::shared_ptr frame, int64_t frame_number) throw(ReaderClosed);
/// Init default settings for a clip
void init_settings();
@@ -181,7 +181,7 @@ namespace openshot {
///
/// @returns The requested frame (containing the image)
/// @param requested_frame The frame number that is requested
- std::shared_ptr GetFrame(long int requested_frame) throw(ReaderClosed);
+ std::shared_ptr GetFrame(int64_t requested_frame) throw(ReaderClosed);
/// Open the internal reader
void Open() throw(InvalidFile, ReaderClosed);
@@ -205,7 +205,7 @@ namespace openshot {
/// Get all properties for a specific frame (perfect for a UI to display the current state
/// of all properties at any time)
- string PropertiesJSON(long int requested_frame);
+ string PropertiesJSON(int64_t requested_frame);
/// @brief Remove an effect from the clip
/// @param effect Remove an effect from the clip.
diff --git a/include/ClipBase.h b/include/ClipBase.h
index fa170bb9..004a1279 100644
--- a/include/ClipBase.h
+++ b/include/ClipBase.h
@@ -62,7 +62,7 @@ namespace openshot {
int max_height; ///< The maximium image height needed by this clip (used for optimizations)
/// Generate JSON for a property
- Json::Value add_property_json(string name, float value, string type, string memo, Keyframe* keyframe, float min_value, float max_value, bool readonly, long int requested_frame);
+ Json::Value add_property_json(string name, float value, string type, string memo, Keyframe* keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame);
/// Generate JSON choice for a property (dropdown properties)
Json::Value add_property_choice_json(string name, int value, int selected_value);
@@ -104,7 +104,7 @@ namespace openshot {
/// Get all properties for a specific frame (perfect for a UI to display the current state
/// of all properties at any time)
- virtual string PropertiesJSON(long int requested_frame) = 0;
+ virtual string PropertiesJSON(int64_t requested_frame) = 0;
};
diff --git a/include/Color.h b/include/Color.h
index b3e5b7c2..440cada9 100644
--- a/include/Color.h
+++ b/include/Color.h
@@ -60,7 +60,7 @@ namespace openshot {
Color(Keyframe Red, Keyframe Green, Keyframe Blue, Keyframe Alpha);
/// Get the HEX value of a color at a specific frame
- string GetColorHex(long int frame_number);
+ string GetColorHex(int64_t frame_number);
/// Get the distance between 2 RGB pairs. (0=identical colors, 10=very close colors, 760=very different colors)
static long GetDistance(long R1, long G1, long B1, long R2, long G2, long B2);
diff --git a/include/DecklinkInput.h b/include/DecklinkInput.h
index 3f2daf87..9964461d 100644
--- a/include/DecklinkInput.h
+++ b/include/DecklinkInput.h
@@ -93,7 +93,7 @@ public:
virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame*, IDeckLinkAudioInputPacket*);
// Extra methods
- std::shared_ptr GetFrame(long int requested_frame);
+ std::shared_ptr GetFrame(int64_t requested_frame);
unsigned long GetCurrentFrameNumber();
private:
diff --git a/include/DecklinkReader.h b/include/DecklinkReader.h
index d3de4586..7cf211cd 100644
--- a/include/DecklinkReader.h
+++ b/include/DecklinkReader.h
@@ -107,7 +107,7 @@ namespace openshot
///
/// @returns The requested frame (containing the image)
/// @param requested_frame The frame number that is requested.
- std::shared_ptr GetFrame(long int requested_frame) throw(ReaderClosed);
+ std::shared_ptr GetFrame(int64_t requested_frame) throw(ReaderClosed);
unsigned long GetCurrentFrameNumber();
/// Determine if reader is open or closed
diff --git a/include/DummyReader.h b/include/DummyReader.h
index ab606696..4d16a675 100644
--- a/include/DummyReader.h
+++ b/include/DummyReader.h
@@ -75,7 +75,7 @@ namespace openshot
///
/// @returns The requested frame (containing the image)
/// @param requested_frame The frame number that is requested.
- std::shared_ptr GetFrame(long int requested_frame) throw(ReaderClosed);
+ std::shared_ptr GetFrame(int64_t requested_frame) throw(ReaderClosed);
/// Determine if reader is open or closed
bool IsOpen() { return is_open; };
diff --git a/include/EffectBase.h b/include/EffectBase.h
index e28d2ee8..2b8594a9 100644
--- a/include/EffectBase.h
+++ b/include/EffectBase.h
@@ -84,7 +84,7 @@ namespace openshot
/// @returns The modified openshot::Frame object
/// @param frame The frame object that needs the effect applied to it
/// @param frame_number The frame number (starting at 1) of the effect on the timeline.
- virtual std::shared_ptr GetFrame(std::shared_ptr frame, long int frame_number) = 0;
+ virtual std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) = 0;
/// Initialize the values of the EffectInfo struct. It is important for derived classes to call
/// this method, or the EffectInfo struct values will not be initialized.
diff --git a/include/Exceptions.h b/include/Exceptions.h
index 1f879388..fe76d6dc 100644
--- a/include/Exceptions.h
+++ b/include/Exceptions.h
@@ -57,10 +57,10 @@ namespace openshot {
{
public:
string file_path;
- int frame_number;
- int chunk_number;
- int chunk_frame;
- ChunkNotFound(string message, int frame_number, int chunk_number, int chunk_frame)
+ int64_t frame_number;
+ int64_t chunk_number;
+ int64_t chunk_frame;
+ ChunkNotFound(string message, int64_t frame_number, int64_t chunk_number, int64_t chunk_frame)
: BaseException(message), frame_number(frame_number), chunk_number(chunk_number), chunk_frame(chunk_frame) { }
virtual ~ChunkNotFound() throw () {}
};
@@ -80,8 +80,8 @@ namespace openshot {
{
public:
string file_path;
- int frame_number;
- ErrorDecodingAudio(string message, int frame_number)
+ int64_t frame_number;
+ ErrorDecodingAudio(string message, int64_t frame_number)
: BaseException(message), frame_number(frame_number) { }
virtual ~ErrorDecodingAudio() throw () {}
};
@@ -91,8 +91,8 @@ namespace openshot {
{
public:
string file_path;
- int frame_number;
- ErrorEncodingAudio(string message, int frame_number)
+ int64_t frame_number;
+ ErrorEncodingAudio(string message, int64_t frame_number)
: BaseException(message), frame_number(frame_number) { }
virtual ~ErrorEncodingAudio() throw () {}
};
@@ -102,8 +102,8 @@ namespace openshot {
{
public:
string file_path;
- int frame_number;
- ErrorEncodingVideo(string message, int frame_number)
+ int64_t frame_number;
+ ErrorEncodingVideo(string message, int64_t frame_number)
: BaseException(message), frame_number(frame_number) { }
virtual ~ErrorEncodingVideo() throw () {}
};
@@ -202,9 +202,9 @@ namespace openshot {
class OutOfBoundsFrame : public BaseException
{
public:
- int FrameRequested;
- int MaxFrames;
- OutOfBoundsFrame(string message, int frame_requested, int max_frames)
+ int64_t FrameRequested;
+ int64_t MaxFrames;
+ OutOfBoundsFrame(string message, int64_t frame_requested, int64_t max_frames)
: BaseException(message), FrameRequested(frame_requested), MaxFrames(max_frames) { }
virtual ~OutOfBoundsFrame() throw () {}
};
diff --git a/include/FFmpegReader.h b/include/FFmpegReader.h
index ed65ea20..e99cfaaf 100644
--- a/include/FFmpegReader.h
+++ b/include/FFmpegReader.h
@@ -58,9 +58,9 @@ namespace openshot
*/
struct AudioLocation
{
- long int frame;
+ int64_t frame;
int sample_start;
- bool is_near(AudioLocation location, int samples_per_frame, long int amount);
+ bool is_near(AudioLocation location, int samples_per_frame, int64_t amount);
};
/**
@@ -108,39 +108,39 @@ namespace openshot
CacheMemory working_cache;
CacheMemory missing_frames;
- map processing_video_frames;
- multimap processing_audio_frames;
- map processed_video_frames;
- map processed_audio_frames;
- multimap missing_video_frames;
- multimap missing_video_frames_source;
- multimap missing_audio_frames;
- multimap missing_audio_frames_source;
- map checked_frames;
+ map processing_video_frames;
+ multimap processing_audio_frames;
+ map processed_video_frames;
+ map processed_audio_frames;
+ multimap missing_video_frames;
+ multimap missing_video_frames_source;
+ multimap missing_audio_frames;
+ multimap missing_audio_frames_source;
+ map checked_frames;
AudioLocation previous_packet_location;
// DEBUG VARIABLES (FOR AUDIO ISSUES)
int prev_samples;
- long int prev_pts;
- long int pts_total;
- long int pts_counter;
- long int num_packets_since_video_frame;
- long int num_checks_since_final;
+ int64_t prev_pts;
+ int64_t pts_total;
+ int64_t pts_counter;
+ int64_t num_packets_since_video_frame;
+ int64_t num_checks_since_final;
std::shared_ptr last_video_frame;
bool is_seeking;
- long int seeking_pts;
- long int seeking_frame;
+ int64_t seeking_pts;
+ int64_t seeking_frame;
bool is_video_seek;
int seek_count;
- long int seek_audio_frame_found;
- long int seek_video_frame_found;
+ int64_t seek_audio_frame_found;
+ int64_t seek_video_frame_found;
- long int audio_pts_offset;
- long int video_pts_offset;
- long int last_frame;
- long int largest_frame_processed;
- long int current_video_frame; // can't reliably use PTS of video to determine this
+ int64_t audio_pts_offset;
+ int64_t video_pts_offset;
+ int64_t last_frame;
+ int64_t largest_frame_processed;
+ int64_t current_video_frame; // can't reliably use PTS of video to determine this
/// Check for the correct frames per second value by scanning the 1st few seconds of video packets.
void CheckFPS();
@@ -149,28 +149,28 @@ namespace openshot
bool CheckSeek(bool is_video);
/// Check if a frame is missing and attempt to replace it's frame image (and
- bool CheckMissingFrame(long int requested_frame);
+ bool CheckMissingFrame(int64_t requested_frame);
/// Check the working queue, and move finished frames to the finished queue
- void CheckWorkingFrames(bool end_of_stream, long int requested_frame);
+ void CheckWorkingFrames(bool end_of_stream, int64_t requested_frame);
/// Convert image to RGB format
- void convert_image(long int current_frame, AVPicture *copyFrame, int width, int height, PixelFormat pix_fmt);
+ void convert_image(int64_t current_frame, AVPicture *copyFrame, int width, int height, PixelFormat pix_fmt);
/// Convert Frame Number into Audio PTS
- long int ConvertFrameToAudioPTS(long int frame_number);
+ int64_t ConvertFrameToAudioPTS(int64_t frame_number);
/// Convert Frame Number into Video PTS
- long int ConvertFrameToVideoPTS(long int frame_number);
+ int64_t ConvertFrameToVideoPTS(int64_t frame_number);
/// Convert Video PTS into Frame Number
- long int ConvertVideoPTStoFrame(long int pts);
+ int64_t ConvertVideoPTStoFrame(int64_t pts);
/// Create a new Frame (or return an existing one) and add it to the working queue.
- std::shared_ptr CreateFrame(long int requested_frame);
+ std::shared_ptr CreateFrame(int64_t requested_frame);
/// Calculate Starting video frame and sample # for an audio PTS
- AudioLocation GetAudioPTSLocation(long int pts);
+ AudioLocation GetAudioPTSLocation(int64_t pts);
/// Get an AVFrame (if any)
bool GetAVFrame();
@@ -179,25 +179,25 @@ namespace openshot
int GetNextPacket();
/// Get the smallest video frame that is still being processed
- long int GetSmallestVideoFrame();
+ int64_t GetSmallestVideoFrame();
/// Get the smallest audio frame that is still being processed
- long int GetSmallestAudioFrame();
+ int64_t GetSmallestAudioFrame();
/// Get the PTS for the current video packet
- long int GetVideoPTS();
+ int64_t GetVideoPTS();
/// Remove partial frames due to seek
- bool IsPartialFrame(long int requested_frame);
+ bool IsPartialFrame(int64_t requested_frame);
/// Process a video packet
- void ProcessVideoPacket(long int requested_frame);
+ void ProcessVideoPacket(int64_t requested_frame);
/// Process an audio packet
- void ProcessAudioPacket(long int requested_frame, long int target_frame, int starting_sample);
+ void ProcessAudioPacket(int64_t requested_frame, int64_t target_frame, int starting_sample);
/// Read the stream until we find the requested Frame
- std::shared_ptr ReadStream(long int requested_frame);
+ std::shared_ptr ReadStream(int64_t requested_frame);
/// Remove AVFrame from cache (and deallocate it's memory)
void RemoveAVFrame(AVPicture*);
@@ -206,7 +206,7 @@ namespace openshot
void RemoveAVPacket(AVPacket*);
/// Seek to a specific Frame. This is not always frame accurate, it's more of an estimation on many codecs.
- void Seek(long int requested_frame) throw(TooManySeeks);
+ void Seek(int64_t requested_frame) throw(TooManySeeks);
/// Update PTS Offset (if any)
void UpdatePTSOffset(bool is_video);
@@ -247,7 +247,7 @@ namespace openshot
///
/// @returns The requested frame of video
/// @param requested_frame The frame number that is requested.
- std::shared_ptr GetFrame(long int requested_frame) throw(OutOfBoundsFrame, ReaderClosed, TooManySeeks);
+ std::shared_ptr GetFrame(int64_t requested_frame) throw(OutOfBoundsFrame, ReaderClosed, TooManySeeks);
/// Determine if reader is open or closed
bool IsOpen() { return is_open; };
diff --git a/include/FFmpegWriter.h b/include/FFmpegWriter.h
index 2fbb8479..ddb6aa01 100644
--- a/include/FFmpegWriter.h
+++ b/include/FFmpegWriter.h
@@ -147,8 +147,8 @@ namespace openshot
int cache_size;
bool is_writing;
bool is_open;
- int64 write_video_count;
- int64 write_audio_count;
+ int64_t write_video_count;
+ int64_t write_audio_count;
bool prepare_streams;
bool write_header;
@@ -322,7 +322,7 @@ namespace openshot
/// @param reader A openshot::ReaderBase object which will provide frames to be written
/// @param start The starting frame number of the reader
/// @param length The number of frames to write
- void WriteFrame(ReaderBase* reader, long int start, long int length) throw(ErrorEncodingVideo, WriterClosed);
+ void WriteFrame(ReaderBase* reader, int64_t start, int64_t length) throw(ErrorEncodingVideo, WriterClosed);
/// @brief Write the file trailer (after all frames are written). This is called automatically
/// by the Close() method if this method has not yet been called.
diff --git a/include/Frame.h b/include/Frame.h
index 844280e0..599bbffb 100644
--- a/include/Frame.h
+++ b/include/Frame.h
@@ -133,7 +133,7 @@ namespace openshot
int constrain(int color_value);
public:
- long int number; ///< This is the frame number (starting at 1)
+ int64_t number; ///< This is the frame number (starting at 1)
bool has_audio_data; ///< This frame has been loaded with audio data
bool has_image_data; ///< This frame has been loaded with pixel data
@@ -141,13 +141,13 @@ namespace openshot
Frame();
/// Constructor - image only (48kHz audio silence)
- Frame(long int number, int width, int height, string color);
+ Frame(int64_t number, int width, int height, string color);
/// Constructor - audio only (300x200 blank image)
- Frame(long int number, int samples, int channels);
+ Frame(int64_t number, int samples, int channels);
/// Constructor - image & audio
- Frame(long int number, int width, int height, string color, int samples, int channels);
+ Frame(int64_t number, int width, int height, string color, int samples, int channels);
/// Copy constructor
Frame ( const Frame &other );
@@ -227,7 +227,7 @@ namespace openshot
juce::AudioSampleBuffer *GetAudioSampleBuffer();
/// Get the size in bytes of this frame (rough estimate)
- int64 GetBytes();
+ int64_t GetBytes();
/// Get pointer to Qt QImage image object
std::shared_ptr GetImage();
@@ -253,7 +253,7 @@ namespace openshot
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels);
/// Calculate the # of samples per video frame (for a specific frame number and frame rate)
- static int GetSamplesPerFrame(long int frame_number, Fraction fps, int sample_rate, int channels);
+ static int GetSamplesPerFrame(int64_t frame_number, Fraction fps, int sample_rate, int channels);
/// Get an audio waveform image
std::shared_ptr GetWaveform(int width, int height, int Red, int Green, int Blue, int Alpha);
@@ -277,7 +277,7 @@ namespace openshot
void Save(string path, float scale, string format="PNG", int quality=100);
/// Set frame number
- void SetFrameNumber(long int number);
+ void SetFrameNumber(int64_t number);
/// Set Pixel Aspect Ratio
void SetPixelRatio(int num, int den);
diff --git a/include/FrameMapper.h b/include/FrameMapper.h
index 69451f4f..3e0d2c8d 100644
--- a/include/FrameMapper.h
+++ b/include/FrameMapper.h
@@ -72,12 +72,12 @@ namespace openshot
*/
struct Field
{
- long int Frame;
+ int64_t Frame;
bool isOdd;
Field() : Frame(0), isOdd(true) { };
- Field(long int frame, bool isodd)
+ Field(int64_t frame, bool isodd)
{
Frame = frame;
isOdd = isodd;
@@ -92,10 +92,10 @@ namespace openshot
*/
struct SampleRange
{
- int frame_start;
+ int64_t frame_start;
int sample_start;
- int frame_end;
+ int64_t frame_end;
int sample_end;
int total;
@@ -147,14 +147,14 @@ namespace openshot
CacheMemory final_cache; // Cache of actual Frame objects
bool is_dirty; // When this is true, the next call to GetFrame will re-init the mapping
AVAudioResampleContext *avr; // Audio resampling context object
- long int timeline_frame_offset; // Timeline frame offset
+ int64_t timeline_frame_offset; // Timeline frame offset
// Internal methods used by init
- void AddField(long int frame);
+ void AddField(int64_t frame);
void AddField(Field field);
// Get Frame or Generate Blank Frame
- std::shared_ptr GetOrCreateFrame(long int number);
+ std::shared_ptr GetOrCreateFrame(int64_t number);
// Use the original and target frame rates and a pull-down technique to create
// a mapping between the original fields and frames or a video to a new frame rate.
@@ -177,13 +177,13 @@ namespace openshot
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout);
// Set offset relative to parent timeline
- void SetTimelineFrameOffset(long int offset);
+ void SetTimelineFrameOffset(int64_t offset);
/// Close the openshot::FrameMapper and internal reader
void Close();
/// Get a frame based on the target frame rate and the new frame number of a frame
- MappedFrame GetMappedFrame(long int TargetFrameNumber) throw(OutOfBoundsFrame);
+ MappedFrame GetMappedFrame(int64_t TargetFrameNumber) throw(OutOfBoundsFrame);
/// Get the cache object used by this reader
CacheMemory* GetCache() { return &final_cache; };
@@ -194,7 +194,7 @@ namespace openshot
///
/// @returns The requested frame of video
/// @param requested_frame The frame number that is requested.
- std::shared_ptr GetFrame(long int requested_frame) throw(ReaderClosed);
+ std::shared_ptr GetFrame(int64_t requested_frame) throw(ReaderClosed);
/// Determine if reader is open or closed
bool IsOpen();
@@ -218,7 +218,7 @@ namespace openshot
ReaderBase* Reader() throw(ReaderClosed);
/// Resample audio and map channels (if needed)
- void ResampleMappedAudio(std::shared_ptr frame, long int original_frame_number);
+ void ResampleMappedAudio(std::shared_ptr frame, int64_t original_frame_number);
};
}
diff --git a/include/ImageReader.h b/include/ImageReader.h
index ca747d97..2c336599 100644
--- a/include/ImageReader.h
+++ b/include/ImageReader.h
@@ -93,7 +93,7 @@ namespace openshot
///
/// @returns The requested frame (containing the image)
/// @param requested_frame The frame number that is requested.
- std::shared_ptr GetFrame(long int requested_frame) throw(ReaderClosed);
+ std::shared_ptr GetFrame(int64_t requested_frame) throw(ReaderClosed);
/// Determine if reader is open or closed
bool IsOpen() { return is_open; };
diff --git a/include/ImageWriter.h b/include/ImageWriter.h
index 003aad3e..9274220b 100644
--- a/include/ImageWriter.h
+++ b/include/ImageWriter.h
@@ -89,7 +89,7 @@ namespace openshot
int cache_size;
bool is_writing;
bool is_open;
- int64 write_video_count;
+ int64_t write_video_count;
vector frames;
int image_quality;
int number_of_loops;
@@ -139,7 +139,7 @@ namespace openshot
/// @param reader A openshot::ReaderBase object which will provide frames to be written
/// @param start The starting frame number of the reader
/// @param length The number of frames to write
- void WriteFrame(ReaderBase* reader, long int start, long int length) throw(WriterClosed);
+ void WriteFrame(ReaderBase* reader, int64_t start, int64_t length) throw(WriterClosed);
};
diff --git a/include/KeyFrame.h b/include/KeyFrame.h
index 4e66036f..f04528c7 100644
--- a/include/KeyFrame.h
+++ b/include/KeyFrame.h
@@ -80,13 +80,13 @@ namespace openshot {
void CreateFactorialTable();
// Get a factorial for a coordinate
- double Factorial(long int n);
+ double Factorial(int64_t n);
// Calculate the factorial function for Bernstein basis
- double Ni(long int n, long int i);
+ double Ni(int64_t n, int64_t i);
// Calculate Bernstein Basis
- double Bernstein(long int n, long int i, double t);
+ double Bernstein(int64_t n, int64_t i, double t);
public:
vector Points; ///< Vector of all Points
@@ -114,25 +114,25 @@ namespace openshot {
void FlipPoints();
/// Get the index of a point by matching a coordinate
- long int FindIndex(Point p) throw(OutOfBoundsPoint);
+ int64_t FindIndex(Point p) throw(OutOfBoundsPoint);
/// Get the value at a specific index
- double GetValue(long int index);
+ double GetValue(int64_t index);
/// Get the rounded INT value at a specific index
- int GetInt(long int index);
+ int GetInt(int64_t index);
/// Get the rounded LONG value at a specific index
- long int GetLong(long int index);
+ int64_t GetLong(int64_t index);
/// Get the fraction that represents how many times this value is repeated in the curve
- Fraction GetRepeatFraction(long int index);
+ Fraction GetRepeatFraction(int64_t index);
/// Get the change in Y value (from the previous Y value)
- double GetDelta(long int index);
+ double GetDelta(int64_t index);
/// Get a point at a specific index
- Point& GetPoint(long int index) throw(OutOfBoundsPoint);
+ Point& GetPoint(int64_t index) throw(OutOfBoundsPoint);
/// Get current point (or closest point to the right) from the X coordinate (i.e. the frame number)
Point GetClosestPoint(Point p);
@@ -148,10 +148,10 @@ namespace openshot {
Point GetMaxPoint();
// Get the number of values (i.e. coordinates on the X axis)
- long int GetLength();
+ int64_t GetLength();
/// Get the number of points (i.e. # of points)
- long int GetCount();
+ int64_t GetCount();
/// Get the direction of the curve at a specific index (increasing or decreasing)
bool IsIncreasing(int index);
@@ -174,14 +174,14 @@ namespace openshot {
void RemovePoint(Point p) throw(OutOfBoundsPoint);
/// Remove a point by index
- void RemovePoint(long int index) throw(OutOfBoundsPoint);
+ void RemovePoint(int64_t index) throw(OutOfBoundsPoint);
/// Scale all points by a percentage (good for evenly lengthening or shortening an openshot::Keyframe)
/// 1.0 = same size, 1.05 = 5% increase, etc...
void ScalePoints(double scale);
/// Replace an existing point with a new point
- void UpdatePoint(long int index, Point p);
+ void UpdatePoint(int64_t index, Point p);
/// Print a list of points
void PrintPoints();
diff --git a/include/PlayerBase.h b/include/PlayerBase.h
index 01e7b148..80cdf708 100644
--- a/include/PlayerBase.h
+++ b/include/PlayerBase.h
@@ -81,7 +81,7 @@ namespace openshot
virtual int Position() = 0;
/// Seek to a specific frame in the player
- virtual void Seek(long int new_frame) = 0;
+ virtual void Seek(int64_t new_frame) = 0;
/// Get the Playback speed
virtual float Speed() = 0;
diff --git a/include/Qt/AudioPlaybackThread.h b/include/Qt/AudioPlaybackThread.h
index fce7b045..9f534749 100644
--- a/include/Qt/AudioPlaybackThread.h
+++ b/include/Qt/AudioPlaybackThread.h
@@ -102,13 +102,13 @@ namespace openshot
std::shared_ptr getFrame();
/// Get the current frame number being played
- long int getCurrentFramePosition();
+ int64_t getCurrentFramePosition();
/// Play the audio
void Play();
/// Seek the audio thread
- void Seek(long int new_position);
+ void Seek(int64_t new_position);
/// Stop the audio playback
void Stop();
diff --git a/include/Qt/PlayerPrivate.h b/include/Qt/PlayerPrivate.h
index 83f5ec48..3311dea9 100644
--- a/include/Qt/PlayerPrivate.h
+++ b/include/Qt/PlayerPrivate.h
@@ -47,15 +47,15 @@ namespace openshot
class PlayerPrivate : Thread
{
std::shared_ptr frame; /// The current frame
- long int video_position; /// The current frame position.
- long int audio_position; /// The current frame position.
+ int64_t video_position; /// The current frame position.
+ int64_t audio_position; /// The current frame position.
ReaderBase *reader; /// The reader which powers this player
AudioPlaybackThread *audioPlayback; /// The audio thread
VideoPlaybackThread *videoPlayback; /// The video thread
VideoCacheThread *videoCache; /// The cache thread
int speed; /// The speed and direction to playback a reader (1=normal, 2=fast, 3=faster, -1=rewind, etc...)
RendererBase *renderer;
- long int last_video_position; /// The last frame actually displayed
+ int64_t last_video_position; /// The last frame actually displayed
/// Constructor
PlayerPrivate(RendererBase *rb);
diff --git a/include/Qt/VideoCacheThread.h b/include/Qt/VideoCacheThread.h
index db48caa4..3f781f62 100644
--- a/include/Qt/VideoCacheThread.h
+++ b/include/Qt/VideoCacheThread.h
@@ -45,8 +45,8 @@ namespace openshot
std::shared_ptr frame;
int speed;
bool is_playing;
- long int position;
- long int current_display_frame;
+ int64_t position;
+ int64_t current_display_frame;
ReaderBase *reader;
int max_frames;
@@ -56,7 +56,7 @@ namespace openshot
~VideoCacheThread();
/// Get the currently playing frame number (if any)
- long int getCurrentFramePosition();
+ int64_t getCurrentFramePosition();
/// Get Speed (The speed and direction to playback a reader (1=normal, 2=fast, 3=faster, -1=rewind, etc...)
int getSpeed() const { return speed; }
@@ -65,10 +65,10 @@ namespace openshot
void Play();
/// Seek the reader to a particular frame number
- void Seek(long int new_position);
+ void Seek(int64_t new_position);
/// Set the currently displaying frame number
- void setCurrentFramePosition(long int current_frame_number);
+ void setCurrentFramePosition(int64_t current_frame_number);
/// Set Speed (The speed and direction to playback a reader (1=normal, 2=fast, 3=faster, -1=rewind, etc...)
void setSpeed(int new_speed) { speed = new_speed; }
diff --git a/include/Qt/VideoPlaybackThread.h b/include/Qt/VideoPlaybackThread.h
index 44d2af78..03ffe6d2 100644
--- a/include/Qt/VideoPlaybackThread.h
+++ b/include/Qt/VideoPlaybackThread.h
@@ -54,7 +54,7 @@ namespace openshot
~VideoPlaybackThread();
/// Get the currently playing frame number (if any)
- long int getCurrentFramePosition();
+ int64_t getCurrentFramePosition();
/// Start the thread
void run();
diff --git a/include/Qt/VideoRenderer.h b/include/Qt/VideoRenderer.h
index 18da9ae7..aaf973ca 100644
--- a/include/Qt/VideoRenderer.h
+++ b/include/Qt/VideoRenderer.h
@@ -45,7 +45,7 @@ public:
~VideoRenderer();
/// Override QWidget which needs to be painted
- void OverrideWidget(long long qwidget_address);
+ void OverrideWidget(int64_t qwidget_address);
signals:
void present(const QImage &image);
diff --git a/include/QtImageReader.h b/include/QtImageReader.h
index c45de4dc..c2180f90 100644
--- a/include/QtImageReader.h
+++ b/include/QtImageReader.h
@@ -96,7 +96,7 @@ namespace openshot
///
/// @returns The requested frame (containing the image)
/// @param requested_frame The frame number that is requested.
- std::shared_ptr GetFrame(long int requested_frame) throw(ReaderClosed);
+ std::shared_ptr GetFrame(int64_t requested_frame) throw(ReaderClosed);
/// Determine if reader is open or closed
bool IsOpen() { return is_open; };
diff --git a/include/QtPlayer.h b/include/QtPlayer.h
index 397adced..8774b886 100644
--- a/include/QtPlayer.h
+++ b/include/QtPlayer.h
@@ -75,7 +75,7 @@ namespace openshot
int Position();
/// Seek to a specific frame in the player
- void Seek(long int new_frame);
+ void Seek(int64_t new_frame);
/// Set the source URL/path of this player (which will create an internal Reader)
void SetSource(const std::string &source);
@@ -83,10 +83,10 @@ namespace openshot
/// Set the QWidget which will be used as the display (note: QLabel works well). This does not take a
/// normal pointer, but rather a LONG pointer id (and it re-casts the QWidget pointer inside libopenshot).
/// This is required due to SIP and SWIG incompatibility in the Python bindings.
- void SetQWidget(long long qwidget_address);
+ void SetQWidget(int64_t qwidget_address);
/// Get the Renderer pointer address (for Python to cast back into a QObject)
- long long GetRendererQObject();
+ int64_t GetRendererQObject();
/// Get the Playback speed
float Speed();
diff --git a/include/ReaderBase.h b/include/ReaderBase.h
index 27de85cd..5e26a29c 100644
--- a/include/ReaderBase.h
+++ b/include/ReaderBase.h
@@ -62,7 +62,7 @@ namespace openshot
bool has_audio; ///< Determines if this file has an audio stream
bool has_single_image; ///< Determines if this file only contains a single image
float duration; ///< Length of time (in seconds)
- long long file_size; ///< Size of file (in bytes)
+ int64_t file_size; ///< Size of file (in bytes)
int height; ///< The height of the video (in pixels)
int width; ///< The width of the video (in pixesl)
int pixel_format; ///< The pixel format (i.e. YUV420P, RGB24, etc...)
@@ -71,7 +71,7 @@ namespace openshot
Fraction pixel_ratio; ///< The pixel ratio of the video stream as a fraction (i.e. some pixels are not square)
Fraction display_ratio; ///< The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3)
string vcodec; ///< The name of the video codec used to encode / decode the video stream
- long int video_length; ///< The number of frames in the video stream
+ int64_t video_length; ///< The number of frames in the video stream
int video_stream_index; ///< The index of the video stream
Fraction video_timebase; ///< The video timebase determines how long each frame stays on the screen
bool interlaced_frame; // Are the contents of this frame interlaced
@@ -125,7 +125,7 @@ namespace openshot
///
/// @returns The requested frame of video
/// @param[in] number The frame number that is requested.
- virtual std::shared_ptr GetFrame(long int number) = 0;
+ virtual std::shared_ptr GetFrame(int64_t number) = 0;
/// Determine if reader is open or closed
virtual bool IsOpen() = 0;
diff --git a/include/RendererBase.h b/include/RendererBase.h
index 2bd2d1a9..3f1c0b1c 100644
--- a/include/RendererBase.h
+++ b/include/RendererBase.h
@@ -50,7 +50,7 @@ namespace openshot
void paint(const std::shared_ptr & frame);
/// Allow manual override of the QWidget that is used to display
- virtual void OverrideWidget(long long qwidget_address) = 0;
+ virtual void OverrideWidget(int64_t qwidget_address) = 0;
protected:
RendererBase();
diff --git a/include/TextReader.h b/include/TextReader.h
index 8d6c1200..de1f5a18 100644
--- a/include/TextReader.h
+++ b/include/TextReader.h
@@ -124,7 +124,7 @@ namespace openshot
///
/// @returns The requested frame (containing the image)
/// @param requested_frame The frame number that is requested.
- std::shared_ptr GetFrame(long int requested_frame) throw(ReaderClosed);
+ std::shared_ptr GetFrame(int64_t requested_frame) throw(ReaderClosed);
/// Determine if reader is open or closed
bool IsOpen() { return is_open; };
diff --git a/include/Timeline.h b/include/Timeline.h
index 88a89832..64752e41 100644
--- a/include/Timeline.h
+++ b/include/Timeline.h
@@ -153,7 +153,7 @@ namespace openshot {
CacheBase *final_cache; /// new_frame, Clip* source_clip, long int clip_frame_number, long int timeline_frame_number, bool is_top_clip);
+ void add_layer(std::shared_ptr new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip);
/// Apply a FrameMapper to a clip which matches the settings of this timeline
void apply_mapper_to_clip(Clip* clip);
@@ -165,7 +165,7 @@ namespace openshot {
void apply_json_to_timeline(Json::Value change) throw(InvalidJSONKey); /// find_intersecting_clips(long int requested_frame, int number_of_frames, bool include);
+ vector find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include);
/// Get or generate a blank frame
- std::shared_ptr GetOrCreateFrame(Clip* clip, long int number);
+ std::shared_ptr GetOrCreateFrame(Clip* clip, int64_t number);
/// Apply effects to the source frame (if any)
- std::shared_ptr apply_effects(std::shared_ptr frame, long int timeline_frame_number, int layer);
+ std::shared_ptr apply_effects(std::shared_ptr frame, int64_t timeline_frame_number, int layer);
/// Compare 2 floating point numbers for equality
bool isEqual(double a, double b);
@@ -243,7 +243,7 @@ namespace openshot {
///
/// @returns The requested frame (containing the image)
/// @param requested_frame The frame number that is requested.
- std::shared_ptr GetFrame(long int requested_frame) throw(ReaderClosed, OutOfBoundsFrame);
+ std::shared_ptr GetFrame(int64_t requested_frame) throw(ReaderClosed, OutOfBoundsFrame);
// Curves for the viewport
Keyframe viewport_scale; /// frame) throw(ErrorEncodingVideo, WriterClosed) = 0;
/// This method is required for all derived classes of WriterBase. Write a block of frames from a reader.
- virtual void WriteFrame(ReaderBase* reader, long int start, long int length) throw(ErrorEncodingVideo, WriterClosed) = 0;
+ virtual void WriteFrame(ReaderBase* reader, int64_t start, int64_t length) throw(ErrorEncodingVideo, WriterClosed) = 0;
/// Get and Set JSON methods
string Json(); ///< Generate JSON string of this object
diff --git a/include/effects/Blur.h b/include/effects/Blur.h
index 3ce2ddfc..910d3845 100644
--- a/include/effects/Blur.h
+++ b/include/effects/Blur.h
@@ -98,7 +98,7 @@ namespace openshot
/// @returns The modified openshot::Frame object
/// @param frame The frame object that needs the effect applied to it
/// @param frame_number The frame number (starting at 1) of the effect on the timeline.
- std::shared_ptr GetFrame(std::shared_ptr frame, long int frame_number);
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number);
/// Get and Set JSON methods
string Json(); ///< Generate JSON string of this object
@@ -108,7 +108,7 @@ namespace openshot
/// Get all properties for a specific frame (perfect for a UI to display the current state
/// of all properties at any time)
- string PropertiesJSON(long int requested_frame);
+ string PropertiesJSON(int64_t requested_frame);
};
}
diff --git a/include/effects/Brightness.h b/include/effects/Brightness.h
index e8307a32..023e1c2e 100644
--- a/include/effects/Brightness.h
+++ b/include/effects/Brightness.h
@@ -88,7 +88,7 @@ namespace openshot
/// @returns The modified openshot::Frame object
/// @param frame The frame object that needs the effect applied to it
/// @param frame_number The frame number (starting at 1) of the effect on the timeline.
- std::shared_ptr GetFrame(std::shared_ptr frame, long int frame_number);
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number);
/// Get and Set JSON methods
string Json(); ///< Generate JSON string of this object
@@ -98,7 +98,7 @@ namespace openshot
/// Get all properties for a specific frame (perfect for a UI to display the current state
/// of all properties at any time)
- string PropertiesJSON(long int requested_frame);
+ string PropertiesJSON(int64_t requested_frame);
};
}
diff --git a/include/effects/ChromaKey.h b/include/effects/ChromaKey.h
index bd4db01f..d6c9d62f 100644
--- a/include/effects/ChromaKey.h
+++ b/include/effects/ChromaKey.h
@@ -82,7 +82,7 @@ namespace openshot
/// @returns The modified openshot::Frame object
/// @param frame The frame object that needs the effect applied to it
/// @param frame_number The frame number (starting at 1) of the effect on the timeline.
- std::shared_ptr GetFrame(std::shared_ptr frame, long int frame_number);
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number);
/// Get and Set JSON methods
string Json(); ///< Generate JSON string of this object
@@ -91,7 +91,7 @@ namespace openshot
void SetJsonValue(Json::Value root); ///< Load Json::JsonValue into this object
// Get all properties for a specific frame
- string PropertiesJSON(long int requested_frame);
+ string PropertiesJSON(int64_t requested_frame);
};
}
diff --git a/include/effects/Deinterlace.h b/include/effects/Deinterlace.h
index 1bf53ef7..22832805 100644
--- a/include/effects/Deinterlace.h
+++ b/include/effects/Deinterlace.h
@@ -78,7 +78,7 @@ namespace openshot
/// @returns The modified openshot::Frame object
/// @param frame The frame object that needs the effect applied to it
/// @param frame_number The frame number (starting at 1) of the effect on the timeline.
- std::shared_ptr GetFrame(std::shared_ptr frame, long int frame_number);
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number);
/// Get and Set JSON methods
string Json(); ///< Generate JSON string of this object
@@ -87,7 +87,7 @@ namespace openshot
void SetJsonValue(Json::Value root); ///< Load Json::JsonValue into this object
// Get all properties for a specific frame
- string PropertiesJSON(long int requested_frame);
+ string PropertiesJSON(int64_t requested_frame);
};
}
diff --git a/include/effects/Mask.h b/include/effects/Mask.h
index 8b9ea3ff..35147c0b 100644
--- a/include/effects/Mask.h
+++ b/include/effects/Mask.h
@@ -101,7 +101,7 @@ namespace openshot
/// @returns The modified openshot::Frame object
/// @param frame The frame object that needs the effect applied to it
/// @param frame_number The frame number (starting at 1) of the effect on the timeline.
- std::shared_ptr GetFrame(std::shared_ptr frame, long int frame_number);
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number);
/// Get and Set JSON methods
string Json(); ///< Generate JSON string of this object
@@ -111,7 +111,7 @@ namespace openshot
/// Get all properties for a specific frame (perfect for a UI to display the current state
/// of all properties at any time)
- string PropertiesJSON(long int requested_frame);
+ string PropertiesJSON(int64_t requested_frame);
/// Get the reader object of the mask grayscale image
ReaderBase* Reader() { return reader; };
diff --git a/include/effects/Negate.h b/include/effects/Negate.h
index 4eca024f..e6b9b1ac 100644
--- a/include/effects/Negate.h
+++ b/include/effects/Negate.h
@@ -66,7 +66,7 @@ namespace openshot
/// @returns The modified openshot::Frame object
/// @param frame The frame object that needs the effect applied to it
/// @param frame_number The frame number (starting at 1) of the effect on the timeline.
- std::shared_ptr GetFrame(std::shared_ptr frame, long int frame_number);
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number);
/// Get and Set JSON methods
string Json(); ///< Generate JSON string of this object
@@ -75,7 +75,7 @@ namespace openshot
void SetJsonValue(Json::Value root); ///< Load Json::JsonValue into this object
// Get all properties for a specific frame
- string PropertiesJSON(long int requested_frame);
+ string PropertiesJSON(int64_t requested_frame);
};
}
diff --git a/include/effects/Saturation.h b/include/effects/Saturation.h
index 8d115bf3..b0c7f6d6 100644
--- a/include/effects/Saturation.h
+++ b/include/effects/Saturation.h
@@ -85,7 +85,7 @@ namespace openshot
/// @returns The modified openshot::Frame object
/// @param frame The frame object that needs the effect applied to it
/// @param frame_number The frame number (starting at 1) of the effect on the timeline.
- std::shared_ptr GetFrame(std::shared_ptr frame, long int frame_number);
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number);
/// Get and Set JSON methods
string Json(); ///< Generate JSON string of this object
@@ -95,7 +95,7 @@ namespace openshot
/// Get all properties for a specific frame (perfect for a UI to display the current state
/// of all properties at any time)
- string PropertiesJSON(long int requested_frame);
+ string PropertiesJSON(int64_t requested_frame);
};
}
diff --git a/src/AudioBufferSource.cpp b/src/AudioBufferSource.cpp
index d84f4fc8..3b00f742 100644
--- a/src/AudioBufferSource.cpp
+++ b/src/AudioBufferSource.cpp
@@ -95,7 +95,7 @@ void AudioBufferSource::prepareToPlay(int, double) { }
void AudioBufferSource::releaseResources() { }
// Set the next read position of this source
-void AudioBufferSource::setNextReadPosition (long long newPosition)
+void AudioBufferSource::setNextReadPosition (int64 newPosition)
{
// set position (if the new position is in range)
if (newPosition >= 0 && newPosition < buffer->getNumSamples())
@@ -103,14 +103,14 @@ void AudioBufferSource::setNextReadPosition (long long newPosition)
}
// Get the next read position of this source
-long long AudioBufferSource::getNextReadPosition() const
+int64 AudioBufferSource::getNextReadPosition() const
{
// return the next read position
return position;
}
// Get the total length (in samples) of this audio source
-long long AudioBufferSource::getTotalLength() const
+int64 AudioBufferSource::getTotalLength() const
{
// Get the length
return buffer->getNumSamples();
diff --git a/src/AudioReaderSource.cpp b/src/AudioReaderSource.cpp
index 708f476e..b1bb2cd3 100644
--- a/src/AudioReaderSource.cpp
+++ b/src/AudioReaderSource.cpp
@@ -31,7 +31,7 @@ using namespace std;
using namespace openshot;
// Constructor that reads samples from a reader
-AudioReaderSource::AudioReaderSource(ReaderBase *audio_reader, int64 starting_frame_number, int buffer_size)
+AudioReaderSource::AudioReaderSource(ReaderBase *audio_reader, int64_t starting_frame_number, int buffer_size)
: reader(audio_reader), frame_number(starting_frame_number), original_frame_number(starting_frame_number),
size(buffer_size), position(0), frame_position(0), estimated_frame(0), speed(1) {
@@ -245,7 +245,7 @@ void AudioReaderSource::prepareToPlay(int, double) { }
void AudioReaderSource::releaseResources() { }
// Set the next read position of this source
-void AudioReaderSource::setNextReadPosition (long long newPosition)
+void AudioReaderSource::setNextReadPosition (int64 newPosition)
{
// set position (if the new position is in range)
if (newPosition >= 0 && newPosition < buffer->getNumSamples())
@@ -253,14 +253,14 @@ void AudioReaderSource::setNextReadPosition (long long newPosition)
}
// Get the next read position of this source
-long long AudioReaderSource::getNextReadPosition() const
+int64 AudioReaderSource::getNextReadPosition() const
{
// return the next read position
return position;
}
// Get the total length (in samples) of this audio source
-long long AudioReaderSource::getTotalLength() const
+int64 AudioReaderSource::getTotalLength() const
{
// Get the length
if (reader)
diff --git a/src/CacheBase.cpp b/src/CacheBase.cpp
index c7218105..cffd995d 100644
--- a/src/CacheBase.cpp
+++ b/src/CacheBase.cpp
@@ -37,16 +37,16 @@ CacheBase::CacheBase() : max_bytes(0) {
};
// Constructor that sets the max frames to cache
-CacheBase::CacheBase(long long int max_bytes) : max_bytes(max_bytes) {
+CacheBase::CacheBase(int64_t max_bytes) : max_bytes(max_bytes) {
// Init the critical section
cacheCriticalSection = new CriticalSection();
};
// Set maximum bytes to a different amount based on a ReaderInfo struct
-void CacheBase::SetMaxBytesFromInfo(long int number_of_frames, int width, int height, int sample_rate, int channels)
+void CacheBase::SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
{
// n frames X height X width X 4 colors of chars X audio channels X 4 byte floats
- long long int bytes = number_of_frames * (height * width * 4 + (sample_rate * channels * 4));
+ int64_t bytes = number_of_frames * (height * width * 4 + (sample_rate * channels * 4));
SetMaxBytes(bytes);
}
diff --git a/src/CacheDisk.cpp b/src/CacheDisk.cpp
index 23b82a6e..4175f42d 100644
--- a/src/CacheDisk.cpp
+++ b/src/CacheDisk.cpp
@@ -47,7 +47,7 @@ CacheDisk::CacheDisk(string cache_path, string format, float quality, float scal
};
// Constructor that sets the max bytes to cache
-CacheDisk::CacheDisk(string cache_path, string format, float quality, float scale, long long int max_bytes) : CacheBase(max_bytes) {
+CacheDisk::CacheDisk(string cache_path, string format, float quality, float scale, int64_t max_bytes) : CacheBase(max_bytes) {
// Set cache type name
cache_type = "CacheDisk";
range_version = 0;
@@ -100,19 +100,19 @@ void CacheDisk::CalculateRanges() {
// Increment range version
range_version++;
- vector::iterator itr_ordered;
- long int starting_frame = *ordered_frame_numbers.begin();
- long int ending_frame = *ordered_frame_numbers.begin();
+ vector::iterator itr_ordered;
+ int64_t starting_frame = *ordered_frame_numbers.begin();
+ int64_t ending_frame = *ordered_frame_numbers.begin();
// Loop through all known frames (in sequential order)
for (itr_ordered = ordered_frame_numbers.begin(); itr_ordered != ordered_frame_numbers.end(); ++itr_ordered) {
- long int frame_number = *itr_ordered;
+ int64_t frame_number = *itr_ordered;
if (frame_number - ending_frame > 1) {
// End of range detected
Json::Value range;
// Add JSON object with start/end attributes
- // Use strings, since long ints are supported in JSON
+ // Use strings, since int64_ts are supported in JSON
stringstream start_str;
start_str << starting_frame;
stringstream end_str;
@@ -133,7 +133,7 @@ void CacheDisk::CalculateRanges() {
Json::Value range;
// Add JSON object with start/end attributes
- // Use strings, since long ints are supported in JSON
+ // Use strings, since int64_ts are supported in JSON
stringstream start_str;
start_str << starting_frame;
stringstream end_str;
@@ -167,7 +167,7 @@ void CacheDisk::Add(std::shared_ptr frame)
{
// Create a scoped lock, to protect the cache from multiple threads
const GenericScopedLock lock(*cacheCriticalSection);
- long int frame_number = frame->number;
+ int64_t frame_number = frame->number;
// Freshen frame if it already exists
if (frames.count(frame_number))
@@ -222,7 +222,7 @@ void CacheDisk::Add(std::shared_ptr frame)
}
// Get a frame from the cache (or NULL shared_ptr if no frame is found)
-std::shared_ptr CacheDisk::GetFrame(long int frame_number)
+std::shared_ptr CacheDisk::GetFrame(int64_t frame_number)
{
// Create a scoped lock, to protect the cache from multiple threads
const GenericScopedLock lock(*cacheCriticalSection);
@@ -299,8 +299,8 @@ std::shared_ptr CacheDisk::GetSmallestFrame()
std::shared_ptr f;
// Loop through frame numbers
- deque::iterator itr;
- long int smallest_frame = -1;
+ deque::iterator itr;
+ int64_t smallest_frame = -1;
for(itr = frame_numbers.begin(); itr != frame_numbers.end(); ++itr)
{
if (*itr < smallest_frame || smallest_frame == -1)
@@ -314,15 +314,15 @@ std::shared_ptr CacheDisk::GetSmallestFrame()
}
// Gets the maximum bytes value
-long long int CacheDisk::GetBytes()
+int64_t CacheDisk::GetBytes()
{
// Create a scoped lock, to protect the cache from multiple threads
const GenericScopedLock lock(*cacheCriticalSection);
- long long int total_bytes = 0;
+ int64_t total_bytes = 0;
// Loop through frames, and calculate total bytes
- deque::reverse_iterator itr;
+ deque::reverse_iterator itr;
for(itr = frame_numbers.rbegin(); itr != frame_numbers.rend(); ++itr)
total_bytes += frame_size_bytes;
@@ -330,22 +330,22 @@ long long int CacheDisk::GetBytes()
}
// Remove a specific frame
-void CacheDisk::Remove(long int frame_number)
+void CacheDisk::Remove(int64_t frame_number)
{
Remove(frame_number, frame_number);
}
// Remove range of frames
-void CacheDisk::Remove(long int start_frame_number, long int end_frame_number)
+void CacheDisk::Remove(int64_t start_frame_number, int64_t end_frame_number)
{
// Create a scoped lock, to protect the cache from multiple threads
const GenericScopedLock lock(*cacheCriticalSection);
// Loop through frame numbers
- deque::iterator itr;
+ deque::iterator itr;
for(itr = frame_numbers.begin(); itr != frame_numbers.end();)
{
- //deque::iterator current = itr++;
+ //deque::iterator current = itr++;
if (*itr >= start_frame_number && *itr <= end_frame_number)
{
// erase frame number
@@ -355,7 +355,7 @@ void CacheDisk::Remove(long int start_frame_number, long int end_frame_number)
}
// Loop through ordered frame numbers
- vector::iterator itr_ordered;
+ vector::iterator itr_ordered;
for(itr_ordered = ordered_frame_numbers.begin(); itr_ordered != ordered_frame_numbers.end();)
{
if (*itr_ordered >= start_frame_number && *itr_ordered <= end_frame_number)
@@ -385,7 +385,7 @@ void CacheDisk::Remove(long int start_frame_number, long int end_frame_number)
}
// Move frame to front of queue (so it lasts longer)
-void CacheDisk::MoveToFront(long int frame_number)
+void CacheDisk::MoveToFront(int64_t frame_number)
{
// Does frame exists in cache?
if (frames.count(frame_number))
@@ -394,7 +394,7 @@ void CacheDisk::MoveToFront(long int frame_number)
const GenericScopedLock lock(*cacheCriticalSection);
// Loop through frame numbers
- deque::iterator itr;
+ deque::iterator itr;
for(itr = frame_numbers.begin(); itr != frame_numbers.end(); ++itr)
{
if (*itr == frame_number)
@@ -432,7 +432,7 @@ void CacheDisk::Clear()
}
// Count the frames in the queue
-long int CacheDisk::Count()
+int64_t CacheDisk::Count()
{
// Create a scoped lock, to protect the cache from multiple threads
const GenericScopedLock lock(*cacheCriticalSection);
@@ -453,7 +453,7 @@ void CacheDisk::CleanUp()
while (GetBytes() > max_bytes && frame_numbers.size() > 20)
{
// Get the oldest frame number.
- long int frame_to_remove = frame_numbers.back();
+ int64_t frame_to_remove = frame_numbers.back();
// Remove frame_number and frame
Remove(frame_to_remove);
diff --git a/src/CacheMemory.cpp b/src/CacheMemory.cpp
index 61cf54e8..d1674fc5 100644
--- a/src/CacheMemory.cpp
+++ b/src/CacheMemory.cpp
@@ -39,7 +39,7 @@ CacheMemory::CacheMemory() : CacheBase(0) {
};
// Constructor that sets the max bytes to cache
-CacheMemory::CacheMemory(long long int max_bytes) : CacheBase(max_bytes) {
+CacheMemory::CacheMemory(int64_t max_bytes) : CacheBase(max_bytes) {
// Set cache type name
cache_type = "CacheMemory";
range_version = 0;
@@ -76,19 +76,19 @@ void CacheMemory::CalculateRanges() {
// Increment range version
range_version++;
- vector::iterator itr_ordered;
- long int starting_frame = *ordered_frame_numbers.begin();
- long int ending_frame = *ordered_frame_numbers.begin();
+ vector::iterator itr_ordered;
+ int64_t starting_frame = *ordered_frame_numbers.begin();
+ int64_t ending_frame = *ordered_frame_numbers.begin();
// Loop through all known frames (in sequential order)
for (itr_ordered = ordered_frame_numbers.begin(); itr_ordered != ordered_frame_numbers.end(); ++itr_ordered) {
- long int frame_number = *itr_ordered;
+ int64_t frame_number = *itr_ordered;
if (frame_number - ending_frame > 1) {
// End of range detected
Json::Value range;
// Add JSON object with start/end attributes
- // Use strings, since long ints are supported in JSON
+ // Use strings, since int64_ts are supported in JSON
stringstream start_str;
start_str << starting_frame;
stringstream end_str;
@@ -109,7 +109,7 @@ void CacheMemory::CalculateRanges() {
Json::Value range;
// Add JSON object with start/end attributes
- // Use strings, since long ints are not supported in JSON
+ // Use strings, since int64_ts are not supported in JSON
stringstream start_str;
start_str << starting_frame;
stringstream end_str;
@@ -131,7 +131,7 @@ void CacheMemory::Add(std::shared_ptr frame)
{
// Create a scoped lock, to protect the cache from multiple threads
const GenericScopedLock lock(*cacheCriticalSection);
- long int frame_number = frame->number;
+ int64_t frame_number = frame->number;
// Freshen frame if it already exists
if (frames.count(frame_number))
@@ -152,7 +152,7 @@ void CacheMemory::Add(std::shared_ptr frame)
}
// Get a frame from the cache (or NULL shared_ptr if no frame is found)
-std::shared_ptr CacheMemory::GetFrame(long int frame_number)
+std::shared_ptr CacheMemory::GetFrame(int64_t frame_number)
{
// Create a scoped lock, to protect the cache from multiple threads
const GenericScopedLock lock(*cacheCriticalSection);
@@ -175,8 +175,8 @@ std::shared_ptr CacheMemory::GetSmallestFrame()
std::shared_ptr f;
// Loop through frame numbers
- deque::iterator itr;
- long int smallest_frame = -1;
+ deque::iterator itr;
+ int64_t smallest_frame = -1;
for(itr = frame_numbers.begin(); itr != frame_numbers.end(); ++itr)
{
if (*itr < smallest_frame || smallest_frame == -1)
@@ -190,15 +190,15 @@ std::shared_ptr CacheMemory::GetSmallestFrame()
}
// Gets the maximum bytes value
-long long int CacheMemory::GetBytes()
+int64_t CacheMemory::GetBytes()
{
// Create a scoped lock, to protect the cache from multiple threads
const GenericScopedLock lock(*cacheCriticalSection);
- long long int total_bytes = 0;
+ int64_t total_bytes = 0;
// Loop through frames, and calculate total bytes
- deque::reverse_iterator itr;
+ deque::reverse_iterator itr;
for(itr = frame_numbers.rbegin(); itr != frame_numbers.rend(); ++itr)
{
total_bytes += frames[*itr]->GetBytes();
@@ -208,19 +208,19 @@ long long int CacheMemory::GetBytes()
}
// Remove a specific frame
-void CacheMemory::Remove(long int frame_number)
+void CacheMemory::Remove(int64_t frame_number)
{
Remove(frame_number, frame_number);
}
// Remove range of frames
-void CacheMemory::Remove(long int start_frame_number, long int end_frame_number)
+void CacheMemory::Remove(int64_t start_frame_number, int64_t end_frame_number)
{
// Create a scoped lock, to protect the cache from multiple threads
const GenericScopedLock lock(*cacheCriticalSection);
// Loop through frame numbers
- deque::iterator itr;
+ deque::iterator itr;
for(itr = frame_numbers.begin(); itr != frame_numbers.end();)
{
if (*itr >= start_frame_number && *itr <= end_frame_number)
@@ -232,7 +232,7 @@ void CacheMemory::Remove(long int start_frame_number, long int end_frame_number)
}
// Loop through ordered frame numbers
- vector::iterator itr_ordered;
+ vector::iterator itr_ordered;
for(itr_ordered = ordered_frame_numbers.begin(); itr_ordered != ordered_frame_numbers.end();)
{
if (*itr_ordered >= start_frame_number && *itr_ordered <= end_frame_number)
@@ -249,7 +249,7 @@ void CacheMemory::Remove(long int start_frame_number, long int end_frame_number)
}
// Move frame to front of queue (so it lasts longer)
-void CacheMemory::MoveToFront(long int frame_number)
+void CacheMemory::MoveToFront(int64_t frame_number)
{
// Create a scoped lock, to protect the cache from multiple threads
const GenericScopedLock lock(*cacheCriticalSection);
@@ -258,7 +258,7 @@ void CacheMemory::MoveToFront(long int frame_number)
if (frames.count(frame_number))
{
// Loop through frame numbers
- deque::iterator itr;
+ deque::iterator itr;
for(itr = frame_numbers.begin(); itr != frame_numbers.end(); ++itr)
{
if (*itr == frame_number)
@@ -287,7 +287,7 @@ void CacheMemory::Clear()
}
// Count the frames in the queue
-long int CacheMemory::Count()
+int64_t CacheMemory::Count()
{
// Create a scoped lock, to protect the cache from multiple threads
const GenericScopedLock lock(*cacheCriticalSection);
@@ -308,7 +308,7 @@ void CacheMemory::CleanUp()
while (GetBytes() > max_bytes && frame_numbers.size() > 20)
{
// Get the oldest frame number.
- long int frame_to_remove = frame_numbers.back();
+ int64_t frame_to_remove = frame_numbers.back();
// Remove frame_number and frame
Remove(frame_to_remove);
diff --git a/src/ChunkReader.cpp b/src/ChunkReader.cpp
index 3e13d05b..f3160bd8 100644
--- a/src/ChunkReader.cpp
+++ b/src/ChunkReader.cpp
@@ -123,14 +123,14 @@ void ChunkReader::load_json()
}
// Find the location of a frame in a chunk
-ChunkLocation ChunkReader::find_chunk_frame(long int requested_frame)
+ChunkLocation ChunkReader::find_chunk_frame(int64_t requested_frame)
{
// Determine which chunk contains this frame.
- int chunk_number = (requested_frame / chunk_size) + 1;
+ int64_t chunk_number = (requested_frame / chunk_size) + 1;
// Determine which frame in this chunk
- int start_frame_of_chunk = (chunk_number - 1) * chunk_size;
- int chunk_frame_number = (requested_frame - start_frame_of_chunk) + 1; // Add 1 to adjust for the 1st frame of every chunk is just there to "stoke" the audio samples from the previous chunk.
+ int64_t start_frame_of_chunk = (chunk_number - 1) * chunk_size;
+ int64_t chunk_frame_number = (requested_frame - start_frame_of_chunk) + 1; // Add 1 to adjust for the 1st frame of every chunk is just there to "stoke" the audio samples from the previous chunk.
// Prepare chunk location struct
ChunkLocation location = {chunk_number, chunk_frame_number};
@@ -164,7 +164,7 @@ void ChunkReader::Close()
}
// get a formatted path of a specific chunk
-string ChunkReader::get_chunk_path(int chunk_number, string folder, string extension)
+string ChunkReader::get_chunk_path(int64_t chunk_number, string folder, string extension)
{
// Create path of new chunk video
stringstream chunk_count_string;
@@ -187,7 +187,7 @@ string ChunkReader::get_chunk_path(int chunk_number, string folder, string exten
}
// Get an openshot::Frame object for a specific frame number of this reader.
-std::shared_ptr ChunkReader::GetFrame(long int requested_frame) throw(ReaderClosed, ChunkNotFound)
+std::shared_ptr ChunkReader::GetFrame(int64_t requested_frame) throw(ReaderClosed, ChunkNotFound)
{
// Determine what chunk contains this frame
ChunkLocation location = find_chunk_frame(requested_frame);
@@ -264,7 +264,9 @@ Json::Value ChunkReader::JsonValue() {
Json::Value root = ReaderBase::JsonValue(); // get parent properties
root["type"] = "ChunkReader";
root["path"] = path;
- root["chunk_size"] = chunk_size;
+ stringstream chunk_size_stream;
+ chunk_size_stream << chunk_size;
+ root["chunk_size"] = chunk_size_stream.str();
root["chunk_version"] = version;
// return JsonValue
@@ -304,7 +306,7 @@ void ChunkReader::SetJsonValue(Json::Value root) throw(InvalidFile) {
if (!root["path"].isNull())
path = root["path"].asString();
if (!root["chunk_size"].isNull())
- chunk_size = root["chunk_size"].asInt();
+ chunk_size = atoll(root["chunk_size"].asString().c_str());
if (!root["chunk_version"].isNull())
version = (ChunkVersion) root["chunk_version"].asInt();
diff --git a/src/ChunkWriter.cpp b/src/ChunkWriter.cpp
index aad8d2b1..8ebcff04 100644
--- a/src/ChunkWriter.cpp
+++ b/src/ChunkWriter.cpp
@@ -51,7 +51,7 @@ ChunkWriter::ChunkWriter(string path, ReaderBase *reader) throw (InvalidFile, In
}
// get a formatted path of a specific chunk
-string ChunkWriter::get_chunk_path(int chunk_number, string folder, string extension)
+string ChunkWriter::get_chunk_path(int64_t chunk_number, string folder, string extension)
{
// Create path of new chunk video
stringstream chunk_count_string;
@@ -193,10 +193,10 @@ void ChunkWriter::WriteFrame(std::shared_ptr frame) throw(WriterClosed)
// Write a block of frames from a reader
-void ChunkWriter::WriteFrame(ReaderBase* reader, int start, int length) throw(WriterClosed)
+void ChunkWriter::WriteFrame(ReaderBase* reader, int64_t start, int64_t length) throw(WriterClosed)
{
// Loop through each frame (and encoded it)
- for (int number = start; number <= length; number++)
+ for (int64_t number = start; number <= length; number++)
{
// Get the frame
std::shared_ptr f = reader->GetFrame(number);
@@ -207,10 +207,10 @@ void ChunkWriter::WriteFrame(ReaderBase* reader, int start, int length) throw(Wr
}
// Write a block of frames from the local cached reader
-void ChunkWriter::WriteFrame(int start, int length) throw(WriterClosed)
+void ChunkWriter::WriteFrame(int64_t start, int64_t length) throw(WriterClosed)
{
// Loop through each frame (and encoded it)
- for (int number = start; number <= length; number++)
+ for (int64_t number = start; number <= length; number++)
{
// Get the frame
std::shared_ptr f = local_reader->GetFrame(number);
diff --git a/src/Clip.cpp b/src/Clip.cpp
index 27106bc0..5951bfaa 100644
--- a/src/Clip.cpp
+++ b/src/Clip.cpp
@@ -255,7 +255,7 @@ float Clip::End() throw(ReaderClosed)
}
// Get an openshot::Frame object for a specific frame number of this reader.
-std::shared_ptr Clip::GetFrame(long int requested_frame) throw(ReaderClosed)
+std::shared_ptr Clip::GetFrame(int64_t requested_frame) throw(ReaderClosed)
{
if (reader)
{
@@ -275,8 +275,8 @@ std::shared_ptr Clip::GetFrame(long int requested_frame) throw(ReaderClos
enabled_video = 0;
// Is a time map detected
- long int new_frame_number = requested_frame;
- long int time_mapped_number = adjust_frame_number_minimum(time.GetLong(requested_frame));
+ int64_t new_frame_number = requested_frame;
+ int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(requested_frame));
if (time.Values.size() > 1)
new_frame_number = time_mapped_number;
@@ -347,7 +347,7 @@ void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
}
// Adjust the audio and image of a time mapped frame
-std::shared_ptr Clip::get_time_mapped_frame(std::shared_ptr frame, long int frame_number) throw(ReaderClosed)
+std::shared_ptr Clip::get_time_mapped_frame(std::shared_ptr frame, int64_t frame_number) throw(ReaderClosed)
{
// Check for valid reader
if (!reader)
@@ -569,7 +569,7 @@ std::shared_ptr Clip::get_time_mapped_frame(std::shared_ptr frame,
}
// Adjust frame number minimum value
-long int Clip::adjust_frame_number_minimum(long int frame_number)
+int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
{
// Never return a frame number 0 or below
if (frame_number < 1)
@@ -580,7 +580,7 @@ long int Clip::adjust_frame_number_minimum(long int frame_number)
}
// Get or generate a blank frame
-std::shared_ptr Clip::GetOrCreateFrame(long int number)
+std::shared_ptr Clip::GetOrCreateFrame(int64_t number)
{
std::shared_ptr new_frame;
@@ -653,7 +653,7 @@ string Clip::Json() {
}
// Get all properties for a specific frame
-string Clip::PropertiesJSON(long int requested_frame) {
+string Clip::PropertiesJSON(int64_t requested_frame) {
// Generate JSON properties list
Json::Value root;
diff --git a/src/ClipBase.cpp b/src/ClipBase.cpp
index a5c24342..80cad87d 100644
--- a/src/ClipBase.cpp
+++ b/src/ClipBase.cpp
@@ -62,7 +62,7 @@ void ClipBase::SetJsonValue(Json::Value root) {
}
// Generate JSON for a property
-Json::Value ClipBase::add_property_json(string name, float value, string type, string memo, Keyframe* keyframe, float min_value, float max_value, bool readonly, long int requested_frame) {
+Json::Value ClipBase::add_property_json(string name, float value, string type, string memo, Keyframe* keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) {
// Requested Point
Point requested_point(requested_frame, requested_frame);
diff --git a/src/Color.cpp b/src/Color.cpp
index d9cd7db9..1bb322cf 100644
--- a/src/Color.cpp
+++ b/src/Color.cpp
@@ -61,7 +61,7 @@ Color::Color(string color_hex)
}
// Get the HEX value of a color at a specific frame
-string Color::GetColorHex(long int frame_number) {
+string Color::GetColorHex(int64_t frame_number) {
int r = red.GetInt(frame_number);
int g = green.GetInt(frame_number);
diff --git a/src/DecklinkInput.cpp b/src/DecklinkInput.cpp
index b447f7e9..99d13341 100644
--- a/src/DecklinkInput.cpp
+++ b/src/DecklinkInput.cpp
@@ -104,7 +104,7 @@ unsigned long DeckLinkInputDelegate::GetCurrentFrameNumber()
return 0;
}
-std::shared_ptr DeckLinkInputDelegate::GetFrame(long int requested_frame)
+std::shared_ptr DeckLinkInputDelegate::GetFrame(int64_t requested_frame)
{
std::shared_ptr f;
diff --git a/src/DecklinkReader.cpp b/src/DecklinkReader.cpp
index b7754255..6df2fc1c 100644
--- a/src/DecklinkReader.cpp
+++ b/src/DecklinkReader.cpp
@@ -231,7 +231,7 @@ unsigned long DecklinkReader::GetCurrentFrameNumber()
}
// Get an openshot::Frame object for the next available LIVE frame
-std::shared_ptr DecklinkReader::GetFrame(long int requested_frame) throw(ReaderClosed)
+std::shared_ptr DecklinkReader::GetFrame(int64_t requested_frame) throw(ReaderClosed)
{
// Get a frame from the delegate decklink class (which is collecting them on another thread)
std::shared_ptr f = delegate->GetFrame(requested_frame);
diff --git a/src/DummyReader.cpp b/src/DummyReader.cpp
index 9237e6a0..83c5617b 100644
--- a/src/DummyReader.cpp
+++ b/src/DummyReader.cpp
@@ -97,7 +97,7 @@ void DummyReader::Close()
}
// Get an openshot::Frame object for a specific frame number of this reader.
-std::shared_ptr DummyReader::GetFrame(long int requested_frame) throw(ReaderClosed)
+std::shared_ptr DummyReader::GetFrame(int64_t requested_frame) throw(ReaderClosed)
{
// Check for open reader (or throw exception)
if (!is_open)
diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp
index 5fd1bcf0..8862475a 100644
--- a/src/FFmpegReader.cpp
+++ b/src/FFmpegReader.cpp
@@ -83,7 +83,7 @@ FFmpegReader::~FFmpegReader() {
}
// This struct holds the associated video frame and starting sample # for an audio packet.
-bool AudioLocation::is_near(AudioLocation location, int samples_per_frame, long int amount)
+bool AudioLocation::is_near(AudioLocation location, int samples_per_frame, int64_t amount)
{
// Is frame even close to this one?
if (abs(location.frame - frame) >= 2)
@@ -92,7 +92,7 @@ bool AudioLocation::is_near(AudioLocation location, int samples_per_frame, long
// Note that samples_per_frame can vary slightly frame to frame when the
// audio sampling rate is not an integer multiple of the video fps.
- long int diff = samples_per_frame * (location.frame - frame) + location.sample_start - sample_start;
+ int64_t diff = samples_per_frame * (location.frame - frame) + location.sample_start - sample_start;
if (abs(diff) <= amount)
// close
return true;
@@ -393,7 +393,7 @@ void FFmpegReader::UpdateVideoInfo()
}
-std::shared_ptr FFmpegReader::GetFrame(long int requested_frame) throw(OutOfBoundsFrame, ReaderClosed, TooManySeeks)
+std::shared_ptr FFmpegReader::GetFrame(int64_t requested_frame) throw(OutOfBoundsFrame, ReaderClosed, TooManySeeks)
{
// Check for open reader (or throw exception)
if (!is_open)
@@ -447,7 +447,7 @@ std::shared_ptr FFmpegReader::GetFrame(long int requested_frame) throw(Ou
ReadStream(1);
// Are we within X frames of the requested frame?
- long int diff = requested_frame - last_frame;
+ int64_t diff = requested_frame - last_frame;
if (diff >= 1 && diff <= 20)
{
// Continue walking the stream
@@ -475,7 +475,7 @@ std::shared_ptr FFmpegReader::GetFrame(long int requested_frame) throw(Ou
}
// Read the stream until we find the requested Frame
-std::shared_ptr FFmpegReader::ReadStream(long int requested_frame)
+std::shared_ptr FFmpegReader::ReadStream(int64_t requested_frame)
{
// Allocate video frame
bool end_of_stream = false;
@@ -723,7 +723,7 @@ bool FFmpegReader::CheckSeek(bool is_video)
return false;
// Determine max seeked frame
- long int max_seeked_frame = seek_audio_frame_found; // determine max seeked frame
+ int64_t max_seeked_frame = seek_audio_frame_found; // determine max seeked frame
if (seek_video_frame_found > max_seeked_frame)
max_seeked_frame = seek_video_frame_found;
@@ -753,10 +753,10 @@ bool FFmpegReader::CheckSeek(bool is_video)
}
// Process a video packet
-void FFmpegReader::ProcessVideoPacket(long int requested_frame)
+void FFmpegReader::ProcessVideoPacket(int64_t requested_frame)
{
// Calculate current frame #
- long int current_frame = ConvertVideoPTStoFrame(GetVideoPTS());
+ int64_t current_frame = ConvertVideoPTStoFrame(GetVideoPTS());
// Track 1st video packet after a successful seek
if (!seek_video_frame_found && is_seeking)
@@ -782,7 +782,7 @@ void FFmpegReader::ProcessVideoPacket(long int requested_frame)
PixelFormat pix_fmt = pCodecCtx->pix_fmt;
int height = info.height;
int width = info.width;
- long int video_length = info.video_length;
+ int64_t video_length = info.video_length;
AVPicture *my_frame = pFrame;
// Add video frame to list of processing video frames
@@ -875,7 +875,7 @@ void FFmpegReader::ProcessVideoPacket(long int requested_frame)
}
// Process an audio packet
-void FFmpegReader::ProcessAudioPacket(long int requested_frame, long int target_frame, int starting_sample)
+void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_frame, int starting_sample)
{
// Track 1st audio packet after a successful seek
if (!seek_audio_frame_found && is_seeking)
@@ -924,7 +924,7 @@ void FFmpegReader::ProcessAudioPacket(long int requested_frame, long int target_
int pts_remaining_samples = packet_samples / info.channels; // Adjust for zero based array
// DEBUG (FOR AUDIO ISSUES) - Get the audio packet start time (in seconds)
- long int adjusted_pts = packet->pts + audio_pts_offset;
+ int64_t adjusted_pts = packet->pts + audio_pts_offset;
double audio_seconds = double(adjusted_pts) * info.audio_timebase.ToDouble();
double sample_seconds = double(pts_total) / info.sample_rate;
@@ -1022,7 +1022,7 @@ void FFmpegReader::ProcessAudioPacket(long int requested_frame, long int target_
av_free(audio_converted->data[0]);
AV_FREE_FRAME(&audio_converted);
- long int starting_frame_number = -1;
+ int64_t starting_frame_number = -1;
bool partial_frame = true;
for (int channel_filter = 0; channel_filter < info.channels; channel_filter++)
{
@@ -1121,7 +1121,7 @@ void FFmpegReader::ProcessAudioPacket(long int requested_frame, long int target_
{
const GenericScopedLock lock(processingCriticalSection);
// Update all frames as completed
- for (long int f = target_frame; f < starting_frame_number; f++) {
+ for (int64_t f = target_frame; f < starting_frame_number; f++) {
// Remove the frame # from the processing list. NOTE: If more than one thread is
// processing this frame, the frame # will be in this list multiple times. We are only
// removing a single instance of it here.
@@ -1150,7 +1150,7 @@ void FFmpegReader::ProcessAudioPacket(long int requested_frame, long int target_
// Seek to a specific frame. This is not always frame accurate, it's more of an estimation on many codecs.
-void FFmpegReader::Seek(long int requested_frame) throw(TooManySeeks)
+void FFmpegReader::Seek(int64_t requested_frame) throw(TooManySeeks)
{
// Adjust for a requested frame that is too small or too large
if (requested_frame < 1)
@@ -1313,9 +1313,9 @@ void FFmpegReader::Seek(long int requested_frame) throw(TooManySeeks)
}
// Get the PTS for the current video packet
-long int FFmpegReader::GetVideoPTS()
+int64_t FFmpegReader::GetVideoPTS()
{
- long int current_pts = 0;
+ int64_t current_pts = 0;
if(packet->dts != AV_NOPTS_VALUE)
current_pts = packet->dts;
@@ -1354,17 +1354,17 @@ void FFmpegReader::UpdatePTSOffset(bool is_video)
}
// Convert PTS into Frame Number
-long int FFmpegReader::ConvertVideoPTStoFrame(long int pts)
+int64_t FFmpegReader::ConvertVideoPTStoFrame(int64_t pts)
{
// Apply PTS offset
pts = pts + video_pts_offset;
- long int previous_video_frame = current_video_frame;
+ int64_t previous_video_frame = current_video_frame;
// Get the video packet start time (in seconds)
double video_seconds = double(pts) * info.video_timebase.ToDouble();
// Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
- long int frame = round(video_seconds * info.fps.ToDouble()) + 1;
+ int64_t frame = round(video_seconds * info.fps.ToDouble()) + 1;
// Keep track of the expected video frame #
if (current_video_frame == 0)
@@ -1390,8 +1390,8 @@ long int FFmpegReader::ConvertVideoPTStoFrame(long int pts)
while (current_video_frame < frame) {
if (!missing_video_frames.count(current_video_frame)) {
ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ConvertVideoPTStoFrame (tracking missing frame)", "current_video_frame", current_video_frame, "previous_video_frame", previous_video_frame, "", -1, "", -1, "", -1, "", -1);
- missing_video_frames.insert(pair(current_video_frame, previous_video_frame));
- missing_video_frames_source.insert(pair(previous_video_frame, current_video_frame));
+ missing_video_frames.insert(pair(current_video_frame, previous_video_frame));
+ missing_video_frames_source.insert(pair(previous_video_frame, current_video_frame));
}
// Mark this reader as containing missing frames
@@ -1407,33 +1407,33 @@ long int FFmpegReader::ConvertVideoPTStoFrame(long int pts)
}
// Convert Frame Number into Video PTS
-long int FFmpegReader::ConvertFrameToVideoPTS(long int frame_number)
+int64_t FFmpegReader::ConvertFrameToVideoPTS(int64_t frame_number)
{
// Get timestamp of this frame (in seconds)
double seconds = double(frame_number) / info.fps.ToDouble();
// Calculate the # of video packets in this timestamp
- long int video_pts = round(seconds / info.video_timebase.ToDouble());
+ int64_t video_pts = round(seconds / info.video_timebase.ToDouble());
// Apply PTS offset (opposite)
return video_pts - video_pts_offset;
}
// Convert Frame Number into Video PTS
-long int FFmpegReader::ConvertFrameToAudioPTS(long int frame_number)
+int64_t FFmpegReader::ConvertFrameToAudioPTS(int64_t frame_number)
{
// Get timestamp of this frame (in seconds)
double seconds = double(frame_number) / info.fps.ToDouble();
// Calculate the # of audio packets in this timestamp
- long int audio_pts = round(seconds / info.audio_timebase.ToDouble());
+ int64_t audio_pts = round(seconds / info.audio_timebase.ToDouble());
// Apply PTS offset (opposite)
return audio_pts - audio_pts_offset;
}
// Calculate Starting video frame and sample # for an audio PTS
-AudioLocation FFmpegReader::GetAudioPTSLocation(long int pts)
+AudioLocation FFmpegReader::GetAudioPTSLocation(int64_t pts)
{
// Apply PTS offset
pts = pts + audio_pts_offset;
@@ -1445,7 +1445,7 @@ AudioLocation FFmpegReader::GetAudioPTSLocation(long int pts)
double frame = (audio_seconds * info.fps.ToDouble()) + 1;
// Frame # as a whole number (no more decimals)
- long int whole_frame = long(frame);
+ int64_t whole_frame = int64_t(frame);
// Remove the whole number, and only get the decimal of the frame
double sample_start_percentage = frame - double(whole_frame);
@@ -1469,7 +1469,7 @@ AudioLocation FFmpegReader::GetAudioPTSLocation(long int pts)
if (previous_packet_location.frame != -1) {
if (location.is_near(previous_packet_location, samples_per_frame, samples_per_frame))
{
- long int orig_frame = location.frame;
+ int64_t orig_frame = location.frame;
int orig_start = location.sample_start;
// Update sample start, to prevent gaps in audio
@@ -1484,10 +1484,10 @@ AudioLocation FFmpegReader::GetAudioPTSLocation(long int pts)
ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAudioPTSLocation (Audio Gap Ignored - too big)", "Previous location frame", previous_packet_location.frame, "Target Frame", location.frame, "Target Audio Sample", location.sample_start, "pts", pts, "", -1, "", -1);
const GenericScopedLock lock(processingCriticalSection);
- for (long int audio_frame = previous_packet_location.frame; audio_frame < location.frame; audio_frame++) {
+ for (int64_t audio_frame = previous_packet_location.frame; audio_frame < location.frame; audio_frame++) {
if (!missing_audio_frames.count(audio_frame)) {
ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetAudioPTSLocation (tracking missing frame)", "missing_audio_frame", audio_frame, "previous_audio_frame", previous_packet_location.frame, "new location frame", location.frame, "", -1, "", -1, "", -1);
- missing_audio_frames.insert(pair(previous_packet_location.frame - 1, audio_frame));
+ missing_audio_frames.insert(pair(previous_packet_location.frame - 1, audio_frame));
}
}
}
@@ -1501,7 +1501,7 @@ AudioLocation FFmpegReader::GetAudioPTSLocation(long int pts)
}
// Create a new Frame (or return an existing one) and add it to the working queue.
-std::shared_ptr FFmpegReader::CreateFrame(long int requested_frame)
+std::shared_ptr FFmpegReader::CreateFrame(int64_t requested_frame)
{
// Check working cache
std::shared_ptr output = working_cache.GetFrame(requested_frame);
@@ -1525,11 +1525,11 @@ std::shared_ptr FFmpegReader::CreateFrame(long int requested_frame)
}
// Determine if frame is partial due to seek
-bool FFmpegReader::IsPartialFrame(long int requested_frame) {
+bool FFmpegReader::IsPartialFrame(int64_t requested_frame) {
// Sometimes a seek gets partial frames, and we need to remove them
bool seek_trash = false;
- long int max_seeked_frame = seek_audio_frame_found; // determine max seeked frame
+ int64_t max_seeked_frame = seek_audio_frame_found; // determine max seeked frame
if (seek_video_frame_found > max_seeked_frame)
max_seeked_frame = seek_video_frame_found;
if ((info.has_audio && seek_audio_frame_found && max_seeked_frame >= requested_frame) ||
@@ -1540,7 +1540,7 @@ bool FFmpegReader::IsPartialFrame(long int requested_frame) {
}
// Check if a frame is missing and attempt to replace it's frame image (and
-bool FFmpegReader::CheckMissingFrame(long int requested_frame)
+bool FFmpegReader::CheckMissingFrame(int64_t requested_frame)
{
// Lock
const GenericScopedLock lock(processingCriticalSection);
@@ -1560,12 +1560,12 @@ bool FFmpegReader::CheckMissingFrame(long int requested_frame)
// Missing frames (sometimes frame #'s are skipped due to invalid or missing timestamps)
- map::iterator itr;
+ map::iterator itr;
bool found_missing_frame = false;
// Check if requested frame is a missing frame
if (missing_video_frames.count(requested_frame) || missing_audio_frames.count(requested_frame)) {
- long int missing_source_frame = -1;
+ int64_t missing_source_frame = -1;
if (missing_video_frames.count(requested_frame))
missing_source_frame = missing_video_frames.find(requested_frame)->second;
else if (missing_audio_frames.count(requested_frame))
@@ -1623,7 +1623,7 @@ bool FFmpegReader::CheckMissingFrame(long int requested_frame)
}
// Check the working queue, and move finished frames to the finished queue
-void FFmpegReader::CheckWorkingFrames(bool end_of_stream, long int requested_frame)
+void FFmpegReader::CheckWorkingFrames(bool end_of_stream, int64_t requested_frame)
{
// Loop through all working queue frames
bool checked_count_tripped = false;
@@ -1771,7 +1771,7 @@ void FFmpegReader::CheckFPS()
UpdatePTSOffset(true);
// Get PTS of this packet
- long int pts = GetVideoPTS();
+ int64_t pts = GetVideoPTS();
// Remove pFrame
RemoveAVFrame(pFrame);
@@ -1877,11 +1877,11 @@ void FFmpegReader::RemoveAVPacket(AVPacket* remove_packet)
}
/// Get the smallest video frame that is still being processed
-long int FFmpegReader::GetSmallestVideoFrame()
+int64_t FFmpegReader::GetSmallestVideoFrame()
{
// Loop through frame numbers
- map::iterator itr;
- long int smallest_frame = -1;
+ map::iterator itr;
+ int64_t smallest_frame = -1;
const GenericScopedLock lock(processingCriticalSection);
for(itr = processing_video_frames.begin(); itr != processing_video_frames.end(); ++itr)
{
@@ -1894,11 +1894,11 @@ long int FFmpegReader::GetSmallestVideoFrame()
}
/// Get the smallest audio frame that is still being processed
-long int FFmpegReader::GetSmallestAudioFrame()
+int64_t FFmpegReader::GetSmallestAudioFrame()
{
// Loop through frame numbers
- map::iterator itr;
- long int smallest_frame = -1;
+ map::iterator itr;
+ int64_t smallest_frame = -1;
const GenericScopedLock lock(processingCriticalSection);
for(itr = processing_audio_frames.begin(); itr != processing_audio_frames.end(); ++itr)
{
diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp
index 55e02b73..4d35b6fb 100644
--- a/src/FFmpegWriter.cpp
+++ b/src/FFmpegWriter.cpp
@@ -500,12 +500,12 @@ void FFmpegWriter::write_queued_frames() throw (ErrorEncodingVideo)
}
// Write a block of frames from a reader
-void FFmpegWriter::WriteFrame(ReaderBase* reader, long int start, long int length) throw(ErrorEncodingVideo, WriterClosed)
+void FFmpegWriter::WriteFrame(ReaderBase* reader, int64_t start, int64_t length) throw(ErrorEncodingVideo, WriterClosed)
{
ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::WriteFrame (from Reader)", "start", start, "length", length, "", -1, "", -1, "", -1, "", -1);
// Loop through each frame (and encoded it)
- for (long int number = start; number <= length; number++)
+ for (int64_t number = start; number <= length; number++)
{
// Get the frame
std::shared_ptr f = reader->GetFrame(number);
@@ -896,9 +896,14 @@ AVStream* FFmpegWriter::add_video_stream()
/* Init video encoder options */
c->bit_rate = info.video_bit_rate;
- c->rc_min_rate = info.video_bit_rate - (info.video_bit_rate / 6);
- c->rc_max_rate = info.video_bit_rate;
- c->rc_buffer_size = FFMAX(c->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
+
+ //TODO: Implement variable bitrate feature (which actually works). This implementation throws
+ //invalid bitrate errors and rc buffer underflow errors, etc...
+ //c->rc_min_rate = info.video_bit_rate;
+ //c->rc_max_rate = info.video_bit_rate;
+ //c->rc_buffer_size = FFMAX(c->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
+ //if ( !c->rc_initial_buffer_occupancy )
+ // c->rc_initial_buffer_occupancy = c->rc_buffer_size * 3/4;
c->qmin = 2;
c->qmax = 30;
diff --git a/src/Frame.cpp b/src/Frame.cpp
index cac828f2..3d6bf324 100644
--- a/src/Frame.cpp
+++ b/src/Frame.cpp
@@ -42,7 +42,7 @@ Frame::Frame() : number(1), pixel_ratio(1,1), channels(2), width(1), height(1),
};
// Constructor - image only (48kHz audio silence)
-Frame::Frame(long int number, int width, int height, string color)
+Frame::Frame(int64_t number, int width, int height, string color)
: number(number), pixel_ratio(1,1), channels(2), width(width), height(height),
channel_layout(LAYOUT_STEREO), sample_rate(44100), qbuffer(NULL), has_audio_data(false), has_image_data(false)
{
@@ -54,7 +54,7 @@ Frame::Frame(long int number, int width, int height, string color)
};
// Constructor - audio only (300x200 blank image)
-Frame::Frame(long int number, int samples, int channels) :
+Frame::Frame(int64_t number, int samples, int channels) :
number(number), pixel_ratio(1,1), channels(channels), width(1), height(1),
channel_layout(LAYOUT_STEREO), sample_rate(44100), qbuffer(NULL), has_audio_data(false), has_image_data(false)
{
@@ -66,7 +66,7 @@ Frame::Frame(long int number, int samples, int channels) :
};
// Constructor - image & audio
-Frame::Frame(long int number, int width, int height, string color, int samples, int channels)
+Frame::Frame(int64_t number, int width, int height, string color, int samples, int channels)
: number(number), pixel_ratio(1,1), channels(channels), width(width), height(height),
channel_layout(LAYOUT_STEREO), sample_rate(44100), qbuffer(NULL), has_audio_data(false), has_image_data(false)
{
@@ -433,9 +433,9 @@ juce::AudioSampleBuffer *Frame::GetAudioSampleBuffer()
}
// Get the size in bytes of this frame (rough estimate)
-int64 Frame::GetBytes()
+int64_t Frame::GetBytes()
{
- int64 total_bytes = 0;
+ int64_t total_bytes = 0;
if (image)
total_bytes += (width * height * sizeof(char) * 4);
if (audio) {
@@ -474,13 +474,13 @@ void Frame::SetPixelRatio(int num, int den)
}
// Set frame number
-void Frame::SetFrameNumber(long int new_number)
+void Frame::SetFrameNumber(int64_t new_number)
{
number = new_number;
}
// Calculate the # of samples per video frame (for a specific frame number and frame rate)
-int Frame::GetSamplesPerFrame(long int number, Fraction fps, int sample_rate, int channels)
+int Frame::GetSamplesPerFrame(int64_t number, Fraction fps, int sample_rate, int channels)
{
// Get the total # of samples for the previous frame, and the current frame (rounded)
double fps_rate = fps.Reciprocal().ToDouble();
diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp
index a1f044b2..7557e14c 100644
--- a/src/FrameMapper.cpp
+++ b/src/FrameMapper.cpp
@@ -76,7 +76,7 @@ ReaderBase* FrameMapper::Reader() throw(ReaderClosed)
throw ReaderClosed("No Reader has been initialized for FrameMapper. Call Reader(*reader) before calling this method.", "");
}
-void FrameMapper::AddField(long int frame)
+void FrameMapper::AddField(int64_t frame)
{
// Add a field, and toggle the odd / even field
AddField(Field(frame, field_toggle));
@@ -136,11 +136,11 @@ void FrameMapper::Init()
// Calculate # of fields to map
- long int frame = 1;
- long int number_of_fields = reader->info.video_length * 2;
+ int64_t frame = 1;
+ int64_t number_of_fields = reader->info.video_length * 2;
// Loop through all fields in the original video file
- for (long int field = 1; field <= number_of_fields; field++)
+ for (int64_t field = 1; field <= number_of_fields; field++)
{
if (difference == 0) // Same frame rate, NO pull-down or special techniques required
@@ -208,7 +208,7 @@ void FrameMapper::Init()
// Map the remaining framerates using a simple Keyframe curve
// Calculate the difference (to be used as a multiplier)
double rate_diff = target.ToDouble() / original.ToDouble();
- long int new_length = reader->info.video_length * rate_diff;
+ int64_t new_length = reader->info.video_length * rate_diff;
// Build curve for framerate mapping
Keyframe rate_curve;
@@ -216,7 +216,7 @@ void FrameMapper::Init()
rate_curve.AddPoint(new_length, reader->info.video_length, LINEAR);
// Loop through curve, and build list of frames
- for (long int frame_num = 1; frame_num <= new_length; frame_num++)
+ for (int64_t frame_num = 1; frame_num <= new_length; frame_num++)
{
// Add 2 fields per frame
AddField(rate_curve.GetInt(frame_num));
@@ -229,10 +229,10 @@ void FrameMapper::Init()
Field Even(0, true); // temp field used to track the EVEN field
// Variables used to remap audio samples
- long int start_samples_frame = 1;
+ int64_t start_samples_frame = 1;
int start_samples_position = 0;
- for (long int field = 1; field <= fields.size(); field++)
+ for (int64_t field = 1; field <= fields.size(); field++)
{
// Get the current field
Field f = fields[field - 1];
@@ -241,7 +241,7 @@ void FrameMapper::Init()
if (field % 2 == 0 && field > 0)
{
// New frame number
- long int frame_number = field / 2 + timeline_frame_offset;
+ int64_t frame_number = field / 2 + timeline_frame_offset;
// Set the bottom frame
if (f.isOdd)
@@ -252,7 +252,7 @@ void FrameMapper::Init()
// Determine the range of samples (from the original rate). Resampling happens in real-time when
// calling the GetFrame() method. So this method only needs to redistribute the original samples with
// the original sample rate.
- long int end_samples_frame = start_samples_frame;
+ int64_t end_samples_frame = start_samples_frame;
int end_samples_position = start_samples_position;
int remaining_samples = Frame::GetSamplesPerFrame(frame_number, target, reader->info.sample_rate, reader->info.channels);
@@ -308,7 +308,7 @@ void FrameMapper::Init()
fields.clear();
}
-MappedFrame FrameMapper::GetMappedFrame(long int TargetFrameNumber) throw(OutOfBoundsFrame)
+MappedFrame FrameMapper::GetMappedFrame(int64_t TargetFrameNumber) throw(OutOfBoundsFrame)
{
// Ignore mapping on single image readers
if (info.has_video and !info.has_audio and info.has_single_image) {
@@ -341,7 +341,7 @@ MappedFrame FrameMapper::GetMappedFrame(long int TargetFrameNumber) throw(OutOfB
}
// Get or generate a blank frame
-std::shared_ptr FrameMapper::GetOrCreateFrame(long int number)
+std::shared_ptr FrameMapper::GetOrCreateFrame(int64_t number)
{
std::shared_ptr new_frame;
@@ -380,7 +380,7 @@ std::shared_ptr FrameMapper::GetOrCreateFrame(long int number)
}
// Get an openshot::Frame object for a specific frame number of this reader.
-std::shared_ptr FrameMapper::GetFrame(long int requested_frame) throw(ReaderClosed)
+std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame) throw(ReaderClosed)
{
// Check final cache, and just return the frame (if it's available)
std::shared_ptr final_frame = final_cache.GetFrame(requested_frame);
@@ -406,7 +406,7 @@ std::shared_ptr FrameMapper::GetFrame(long int requested_frame) throw(Rea
ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::GetFrame (Loop through frames)", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "", -1, "", -1, "", -1, "", -1);
// Loop through all requested frames
- for (long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
+ for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
{
// Debug output
@@ -516,7 +516,7 @@ std::shared_ptr FrameMapper::GetFrame(long int requested_frame) throw(Rea
// Copy the samples
int samples_copied = 0;
- long int starting_frame = copy_samples.frame_start;
+ int64_t starting_frame = copy_samples.frame_start;
while (info.has_audio && samples_copied < copy_samples.total)
{
// Init number of samples to copy this iteration
@@ -751,7 +751,7 @@ void FrameMapper::ChangeMapping(Fraction target_fps, PulldownType target_pulldow
}
// Set offset relative to parent timeline
-void FrameMapper::SetTimelineFrameOffset(long int offset)
+void FrameMapper::SetTimelineFrameOffset(int64_t offset)
{
ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::SetTimelineFrameOffset", "offset", offset, "", -1, "", -1, "", -1, "", -1, "", -1);
@@ -763,7 +763,7 @@ void FrameMapper::SetTimelineFrameOffset(long int offset)
}
// Resample audio and map channels (if needed)
-void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, long int original_frame_number)
+void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t original_frame_number)
{
// Init audio buffers / variables
int total_frame_samples = 0;
diff --git a/src/ImageReader.cpp b/src/ImageReader.cpp
index 5e7e820d..30ff54f2 100644
--- a/src/ImageReader.cpp
+++ b/src/ImageReader.cpp
@@ -113,7 +113,7 @@ void ImageReader::Close()
}
// Get an openshot::Frame object for a specific frame number of this reader.
-std::shared_ptr ImageReader::GetFrame(long int requested_frame) throw(ReaderClosed)
+std::shared_ptr ImageReader::GetFrame(int64_t requested_frame) throw(ReaderClosed)
{
// Check for open reader (or throw exception)
if (!is_open)
diff --git a/src/ImageWriter.cpp b/src/ImageWriter.cpp
index f8099c87..4d7aa6bb 100644
--- a/src/ImageWriter.cpp
+++ b/src/ImageWriter.cpp
@@ -120,12 +120,12 @@ void ImageWriter::WriteFrame(std::shared_ptr frame) throw(WriterClosed)
}
// Write a block of frames from a reader
-void ImageWriter::WriteFrame(ReaderBase* reader, long int start, long int length) throw(WriterClosed)
+void ImageWriter::WriteFrame(ReaderBase* reader, int64_t start, int64_t length) throw(WriterClosed)
{
ZmqLogger::Instance()->AppendDebugMethod("ImageWriter::WriteFrame (from Reader)", "start", start, "length", length, "", -1, "", -1, "", -1, "", -1);
// Loop through each frame (and encoded it)
- for (long int number = start; number <= length; number++)
+ for (int64_t number = start; number <= length; number++)
{
// Get the frame
std::shared_ptr f = reader->GetFrame(number);
diff --git a/src/KeyFrame.cpp b/src/KeyFrame.cpp
index c87d7e53..dd1cc963 100644
--- a/src/KeyFrame.cpp
+++ b/src/KeyFrame.cpp
@@ -35,11 +35,11 @@ using namespace openshot;
// processing the curve, due to all the points going from left to right.
void Keyframe::ReorderPoints() {
// Loop through all coordinates, and sort them by the X attribute
- for (long int x = 0; x < Points.size(); x++) {
- long int compare_index = x;
- long int smallest_index = x;
+ for (int64_t x = 0; x < Points.size(); x++) {
+ int64_t compare_index = x;
+ int64_t smallest_index = x;
- for (long int compare_index = x + 1; compare_index < Points.size(); compare_index++) {
+ for (int64_t compare_index = x + 1; compare_index < Points.size(); compare_index++) {
if (Points[compare_index].co.X < Points[smallest_index].co.X) {
smallest_index = compare_index;
}
@@ -107,9 +107,9 @@ void Keyframe::AddPoint(double x, double y, InterpolationType interpolate)
}
// Get the index of a point by matching a coordinate
-long int Keyframe::FindIndex(Point p) throw(OutOfBoundsPoint) {
+int64_t Keyframe::FindIndex(Point p) throw(OutOfBoundsPoint) {
// loop through points, and find a matching coordinate
- for (long int x = 0; x < Points.size(); x++) {
+ for (int64_t x = 0; x < Points.size(); x++) {
// Get each point
Point existing_point = Points[x];
@@ -127,7 +127,7 @@ long int Keyframe::FindIndex(Point p) throw(OutOfBoundsPoint) {
// Determine if point already exists
bool Keyframe::Contains(Point p) {
// loop through points, and find a matching coordinate
- for (long int x = 0; x < Points.size(); x++) {
+ for (int64_t x = 0; x < Points.size(); x++) {
// Get each point
Point existing_point = Points[x];
@@ -147,7 +147,7 @@ Point Keyframe::GetClosestPoint(Point p, bool useLeft) {
Point closest(-1, -1);
// loop through points, and find a matching coordinate
- for (long int x = 0; x < Points.size(); x++) {
+ for (int64_t x = 0; x < Points.size(); x++) {
// Get each point
Point existing_point = Points[x];
@@ -189,7 +189,7 @@ Point Keyframe::GetPreviousPoint(Point p) {
// Lookup the index of this point
try {
- long int index = FindIndex(p);
+ int64_t index = FindIndex(p);
// If not the 1st point
if (index > 0)
@@ -208,7 +208,7 @@ Point Keyframe::GetMaxPoint() {
Point maxPoint(-1, -1);
// loop through points, and find the largest Y value
- for (long int x = 0; x < Points.size(); x++) {
+ for (int64_t x = 0; x < Points.size(); x++) {
// Get each point
Point existing_point = Points[x];
@@ -223,7 +223,7 @@ Point Keyframe::GetMaxPoint() {
}
// Get the value at a specific index
-double Keyframe::GetValue(long int index)
+double Keyframe::GetValue(int64_t index)
{
// Check if it needs to be processed
if (needs_update)
@@ -245,7 +245,7 @@ double Keyframe::GetValue(long int index)
}
// Get the rounded INT value at a specific index
-int Keyframe::GetInt(long int index)
+int Keyframe::GetInt(int64_t index)
{
// Check if it needs to be processed
if (needs_update)
@@ -267,7 +267,7 @@ int Keyframe::GetInt(long int index)
}
// Get the rounded INT value at a specific index
-long int Keyframe::GetLong(long int index)
+int64_t Keyframe::GetLong(int64_t index)
{
// Check if it needs to be processed
if (needs_update)
@@ -369,7 +369,7 @@ void Keyframe::SetJsonValue(Json::Value root) {
if (!root["Points"].isNull())
// loop through points
- for (long int x = 0; x < root["Points"].size(); x++) {
+ for (int64_t x = 0; x < root["Points"].size(); x++) {
// Get each point
Json::Value existing_point = root["Points"][(Json::UInt) x];
@@ -385,7 +385,7 @@ void Keyframe::SetJsonValue(Json::Value root) {
}
// Get the fraction that represents how many times this value is repeated in the curve
-Fraction Keyframe::GetRepeatFraction(long int index)
+Fraction Keyframe::GetRepeatFraction(int64_t index)
{
// Check if it needs to be processed
if (needs_update)
@@ -407,7 +407,7 @@ Fraction Keyframe::GetRepeatFraction(long int index)
}
// Get the change in Y value (from the previous Y value)
-double Keyframe::GetDelta(long int index)
+double Keyframe::GetDelta(int64_t index)
{
// Check if it needs to be processed
if (needs_update)
@@ -429,7 +429,7 @@ double Keyframe::GetDelta(long int index)
}
// Get a point at a specific index
-Point& Keyframe::GetPoint(long int index) throw(OutOfBoundsPoint) {
+Point& Keyframe::GetPoint(int64_t index) throw(OutOfBoundsPoint) {
// Is index a valid point?
if (index >= 0 && index < Points.size())
return Points[index];
@@ -439,7 +439,7 @@ Point& Keyframe::GetPoint(long int index) throw(OutOfBoundsPoint) {
}
// Get the number of values (i.e. coordinates on the X axis)
-long int Keyframe::GetLength() {
+int64_t Keyframe::GetLength() {
// Check if it needs to be processed
if (needs_update)
Process();
@@ -449,7 +449,7 @@ long int Keyframe::GetLength() {
}
// Get the number of points (i.e. # of points)
-long int Keyframe::GetCount() {
+int64_t Keyframe::GetCount() {
// return the size of the Values vector
return Points.size();
@@ -461,7 +461,7 @@ void Keyframe::RemovePoint(Point p) throw(OutOfBoundsPoint) {
needs_update = true;
// loop through points, and find a matching coordinate
- for (long int x = 0; x < Points.size(); x++) {
+ for (int64_t x = 0; x < Points.size(); x++) {
// Get each point
Point existing_point = Points[x];
@@ -478,7 +478,7 @@ void Keyframe::RemovePoint(Point p) throw(OutOfBoundsPoint) {
}
// Remove a point by index
-void Keyframe::RemovePoint(long int index) throw(OutOfBoundsPoint) {
+void Keyframe::RemovePoint(int64_t index) throw(OutOfBoundsPoint) {
// mark as dirty
needs_update = true;
@@ -493,7 +493,7 @@ void Keyframe::RemovePoint(long int index) throw(OutOfBoundsPoint) {
throw OutOfBoundsPoint("Invalid point requested", index, Points.size());
}
-void Keyframe::UpdatePoint(long int index, Point p) {
+void Keyframe::UpdatePoint(int64_t index, Point p) {
// mark as dirty
needs_update = true;
@@ -551,7 +551,7 @@ void Keyframe::Process() {
Point p1 = Points[0];
if (Points.size() > 1)
// Fill in previous X values (before 1st point)
- for (long int x = 0; x < p1.co.X; x++)
+ for (int64_t x = 0; x < p1.co.X; x++)
Values.push_back(Coordinate(Values.size(), p1.co.Y));
else
// Add a single value (since we only have 1 point)
@@ -560,7 +560,7 @@ void Keyframe::Process() {
// Loop through each pair of points (1 less than the max points). Each
// pair of points is used to process a segment of the keyframe.
Point p2(0, 0);
- for (long int x = 0; x < Points.size() - 1; x++) {
+ for (int64_t x = 0; x < Points.size() - 1; x++) {
p1 = Points[x];
p2 = Points[x + 1];
@@ -572,11 +572,11 @@ void Keyframe::Process() {
// when time mapping, to determine what direction the audio waveforms play.
bool increasing = true;
int repeat_count = 1;
- long int last_value = 0;
+ int64_t last_value = 0;
for (vector::iterator it = Values.begin() + 1; it != Values.end(); it++) {
int current_value = long(round((*it).Y));
- long int next_value = long(round((*it).Y));
- long int prev_value = long(round((*it).Y));
+ int64_t next_value = long(round((*it).Y));
+ int64_t prev_value = long(round((*it).Y));
if (it + 1 != Values.end())
next_value = long(round((*(it + 1)).Y));
if (it - 1 >= Values.begin())
@@ -584,7 +584,7 @@ void Keyframe::Process() {
// Loop forward and look for the next unique value (to determine direction)
for (vector::iterator direction_it = it + 1; direction_it != Values.end(); direction_it++) {
- long int next = long(round((*direction_it).Y));
+ int64_t next = long(round((*direction_it).Y));
// Detect direction
if (current_value < next)
@@ -613,7 +613,7 @@ void Keyframe::Process() {
// Detect how many 'more' times it's repeated
int additional_repeats = 0;
for (vector::iterator repeat_it = it + 1; repeat_it != Values.end(); repeat_it++) {
- long int next = long(round((*repeat_it).Y));
+ int64_t next = long(round((*repeat_it).Y));
if (next == current_value)
// repeated, so increment count
additional_repeats++;
@@ -639,7 +639,7 @@ void Keyframe::Process() {
void Keyframe::ProcessSegment(int Segment, Point p1, Point p2) {
// Determine the number of values for this segment
- long int number_of_values = round(p2.co.X) - round(p1.co.X);
+ int64_t number_of_values = round(p2.co.X) - round(p1.co.X);
// Exit function if no values
if (number_of_values == 0)
@@ -669,7 +669,7 @@ void Keyframe::ProcessSegment(int Segment, Point p1, Point p2) {
current_value += value_increment;
// Add each increment to the values vector
- for (long int x = 0; x < number_of_values; x++) {
+ for (int64_t x = 0; x < number_of_values; x++) {
// add value as a coordinate to the "values" vector
Values.push_back(Coordinate(Values.size(), current_value));
@@ -700,8 +700,8 @@ void Keyframe::ProcessSegment(int Segment, Point p1, Point p2) {
segment_coordinates.push_back(p2.co);
vector raw_coordinates;
- long int npts = segment_coordinates.size();
- long int icount, jcount;
+ int64_t npts = segment_coordinates.size();
+ int64_t icount, jcount;
double step, t;
double last_x = -1; // small number init, to track the last used x
@@ -711,7 +711,7 @@ void Keyframe::ProcessSegment(int Segment, Point p1, Point p2) {
step = (double) 1.0 / (number_of_values - 1);
- for (long int i1 = 0; i1 < number_of_values; i1++) {
+ for (int64_t i1 = 0; i1 < number_of_values; i1++) {
if ((1.0 - t) < 5e-6)
t = 1.0;
@@ -720,7 +720,7 @@ void Keyframe::ProcessSegment(int Segment, Point p1, Point p2) {
double new_x = 0.0f;
double new_y = 0.0f;
- for (long int i = 0; i < npts; i++) {
+ for (int64_t i = 0; i < npts; i++) {
Coordinate co = segment_coordinates[i];
double basis = Bernstein(npts - 1, i, t);
new_x += basis * co.X;
@@ -740,9 +740,9 @@ void Keyframe::ProcessSegment(int Segment, Point p1, Point p2) {
// Loop through the raw coordinates, and map them correctly to frame numbers. For example,
// we can't have duplicate X values, since X represents our frame numbers.
- long int current_frame = p1.co.X;
+ int64_t current_frame = p1.co.X;
double current_value = p1.co.Y;
- for (long int i = 0; i < raw_coordinates.size(); i++)
+ for (int64_t i = 0; i < raw_coordinates.size(); i++)
{
// Get the raw coordinate
Coordinate raw = raw_coordinates[i];
@@ -753,8 +753,8 @@ void Keyframe::ProcessSegment(int Segment, Point p1, Point p2) {
else
{
// Missing X values (use last known Y values)
- long int number_of_missing = round(raw.X) - current_frame;
- for (long int missing = 0; missing < number_of_missing; missing++)
+ int64_t number_of_missing = round(raw.X) - current_frame;
+ for (int64_t missing = 0; missing < number_of_missing; missing++)
{
// Add new value to the vector
Coordinate new_coord(current_frame, current_value);
@@ -789,7 +789,7 @@ void Keyframe::ProcessSegment(int Segment, Point p1, Point p2) {
number_of_values++;
// Add each increment to the values vector
- for (long int x = 0; x < number_of_values; x++) {
+ for (int64_t x = 0; x < number_of_values; x++) {
if (x < (number_of_values - 1)) {
// Not the last value of this segment
// add coordinate to "values"
@@ -816,13 +816,13 @@ void Keyframe::CreateFactorialTable() {
}
// Get a factorial for a coordinate
-double Keyframe::Factorial(long int n) {
+double Keyframe::Factorial(int64_t n) {
assert(n >= 0 && n <= 3);
return FactorialLookup[n]; /* returns the value n! as a SUMORealing point number */
}
// Calculate the factorial function for Bernstein basis
-double Keyframe::Ni(long int n, long int i) {
+double Keyframe::Ni(int64_t n, int64_t i) {
double ni;
double a1 = Factorial(n);
double a2 = Factorial(i);
@@ -832,7 +832,7 @@ double Keyframe::Ni(long int n, long int i) {
}
// Calculate Bernstein basis
-double Keyframe::Bernstein(long int n, long int i, double t) {
+double Keyframe::Bernstein(int64_t n, int64_t i, double t) {
double basis;
double ti; /* t^i */
double tni; /* (1 - t)^i */
@@ -858,7 +858,7 @@ double Keyframe::Bernstein(long int n, long int i, double t) {
void Keyframe::ScalePoints(double scale)
{
// Loop through each point (skipping the 1st point)
- for (long int point_index = 0; point_index < Points.size(); point_index++) {
+ for (int64_t point_index = 0; point_index < Points.size(); point_index++) {
// Skip the 1st point
if (point_index == 0)
continue;
@@ -876,7 +876,7 @@ void Keyframe::FlipPoints()
{
// Loop through each point
vector FlippedPoints;
- for (long int point_index = 0, reverse_index = Points.size() - 1; point_index < Points.size(); point_index++, reverse_index--) {
+ for (int64_t point_index = 0, reverse_index = Points.size() - 1; point_index < Points.size(); point_index++, reverse_index--) {
// Flip the points
Point p = Points[point_index];
p.co.Y = Points[reverse_index].co.Y;
diff --git a/src/Qt/AudioPlaybackThread.cpp b/src/Qt/AudioPlaybackThread.cpp
index 55d40446..2d5d6efb 100644
--- a/src/Qt/AudioPlaybackThread.cpp
+++ b/src/Qt/AudioPlaybackThread.cpp
@@ -110,13 +110,13 @@ namespace openshot
}
// Get the currently playing frame number
- long int AudioPlaybackThread::getCurrentFramePosition()
+ int64_t AudioPlaybackThread::getCurrentFramePosition()
{
return source ? source->getEstimatedFrame() : 0;
}
// Seek the audio thread
- void AudioPlaybackThread::Seek(long int new_position)
+ void AudioPlaybackThread::Seek(int64_t new_position)
{
source->Seek(new_position);
}
diff --git a/src/Qt/PlayerPrivate.cpp b/src/Qt/PlayerPrivate.cpp
index 98b3b7f1..ab21f7ea 100644
--- a/src/Qt/PlayerPrivate.cpp
+++ b/src/Qt/PlayerPrivate.cpp
@@ -90,7 +90,7 @@ namespace openshot
last_video_position = video_position;
// How many frames ahead or behind is the video thread?
- long int video_frame_diff = 0;
+ int64_t video_frame_diff = 0;
if (reader->info.has_audio && reader->info.has_video) {
if (speed != 1)
// Set audio frame again (since we are not in normal speed, and not paused)
@@ -105,7 +105,7 @@ namespace openshot
const Time t2 = Time::getCurrentTime();
// Determine how many milliseconds it took to render the frame
- int64 render_time = t2.toMilliseconds() - t1.toMilliseconds();
+ int64_t render_time = t2.toMilliseconds() - t1.toMilliseconds();
// Calculate the amount of time to sleep (by subtracting the render time)
int sleep_time = int(frame_time - render_time);
diff --git a/src/Qt/VideoCacheThread.cpp b/src/Qt/VideoCacheThread.cpp
index 896d40a8..5653e84b 100644
--- a/src/Qt/VideoCacheThread.cpp
+++ b/src/Qt/VideoCacheThread.cpp
@@ -42,7 +42,7 @@ namespace openshot
}
// Get the currently playing frame number (if any)
- long int VideoCacheThread::getCurrentFramePosition()
+ int64_t VideoCacheThread::getCurrentFramePosition()
{
if (frame)
return frame->number;
@@ -51,13 +51,13 @@ namespace openshot
}
// Set the currently playing frame number (if any)
- void VideoCacheThread::setCurrentFramePosition(long int current_frame_number)
+ void VideoCacheThread::setCurrentFramePosition(int64_t current_frame_number)
{
current_display_frame = current_frame_number;
}
// Seek the reader to a particular frame number
- void VideoCacheThread::Seek(long int new_position)
+ void VideoCacheThread::Seek(int64_t new_position)
{
position = new_position;
}
diff --git a/src/Qt/VideoPlaybackThread.cpp b/src/Qt/VideoPlaybackThread.cpp
index 2ab3cbf1..cd116162 100644
--- a/src/Qt/VideoPlaybackThread.cpp
+++ b/src/Qt/VideoPlaybackThread.cpp
@@ -43,7 +43,7 @@ namespace openshot
}
// Get the currently playing frame number (if any)
- long int VideoPlaybackThread::getCurrentFramePosition()
+ int64_t VideoPlaybackThread::getCurrentFramePosition()
{
if (frame)
return frame->number;
diff --git a/src/Qt/VideoRenderer.cpp b/src/Qt/VideoRenderer.cpp
index 1b5e47c6..21e2e07b 100644
--- a/src/Qt/VideoRenderer.cpp
+++ b/src/Qt/VideoRenderer.cpp
@@ -38,7 +38,7 @@ VideoRenderer::~VideoRenderer()
}
/// Override QWidget which needs to be painted
-void VideoRenderer::OverrideWidget(long long qwidget_address)
+void VideoRenderer::OverrideWidget(int64_t qwidget_address)
{
// re-cast QWidget pointer (long) as an actual QWidget
override_widget = reinterpret_cast(qwidget_address);
diff --git a/src/QtImageReader.cpp b/src/QtImageReader.cpp
index 66597f96..c59e4b85 100644
--- a/src/QtImageReader.cpp
+++ b/src/QtImageReader.cpp
@@ -127,7 +127,7 @@ void QtImageReader::SetMaxSize(int width, int height)
}
// Get an openshot::Frame object for a specific frame number of this reader.
-std::shared_ptr QtImageReader::GetFrame(long int requested_frame) throw(ReaderClosed)
+std::shared_ptr QtImageReader::GetFrame(int64_t requested_frame) throw(ReaderClosed)
{
// Check for open reader (or throw exception)
if (!is_open)
diff --git a/src/QtPlayer.cpp b/src/QtPlayer.cpp
index b865349e..e95726d1 100644
--- a/src/QtPlayer.cpp
+++ b/src/QtPlayer.cpp
@@ -114,7 +114,7 @@ int QtPlayer::Position()
return p->video_position;
}
-void QtPlayer::Seek(long int new_frame)
+void QtPlayer::Seek(int64_t new_frame)
{
// Check for seek
if (new_frame > 0) {
@@ -164,14 +164,14 @@ ReaderBase* QtPlayer::Reader() {
}
// Set the QWidget pointer to display the video on (as a LONG pointer id)
-void QtPlayer::SetQWidget(long long qwidget_address) {
+void QtPlayer::SetQWidget(int64_t qwidget_address) {
// Update override QWidget address on the video renderer
p->renderer->OverrideWidget(qwidget_address);
}
// Get the Renderer pointer address (for Python to cast back into a QObject)
-long long QtPlayer::GetRendererQObject() {
- return (long long)(VideoRenderer*)p->renderer;
+int64_t QtPlayer::GetRendererQObject() {
+ return (int64_t)(VideoRenderer*)p->renderer;
}
// Get the Playback speed
diff --git a/src/TextReader.cpp b/src/TextReader.cpp
index a6a703aa..dcaef865 100644
--- a/src/TextReader.cpp
+++ b/src/TextReader.cpp
@@ -143,7 +143,7 @@ void TextReader::Close()
}
// Get an openshot::Frame object for a specific frame number of this reader.
-std::shared_ptr TextReader::GetFrame(long int requested_frame) throw(ReaderClosed)
+std::shared_ptr TextReader::GetFrame(int64_t requested_frame) throw(ReaderClosed)
{
if (image)
{
diff --git a/src/Timeline.cpp b/src/Timeline.cpp
index ef928a31..3b8ff45d 100644
--- a/src/Timeline.cpp
+++ b/src/Timeline.cpp
@@ -160,7 +160,7 @@ void Timeline::ApplyMapperToClips()
}
// Calculate time of a frame number, based on a framerate
-double Timeline::calculate_time(long int number, Fraction rate)
+double Timeline::calculate_time(int64_t number, Fraction rate)
{
// Get float version of fps fraction
double raw_fps = rate.ToFloat();
@@ -170,7 +170,7 @@ double Timeline::calculate_time(long int number, Fraction rate)
}
// Apply effects to the source frame (if any)
-std::shared_ptr Timeline::apply_effects(std::shared_ptr frame, long int timeline_frame_number, int layer)
+std::shared_ptr Timeline::apply_effects(std::shared_ptr frame, int64_t timeline_frame_number, int layer)
{
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects", "frame->number", frame->number, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1, "", -1);
@@ -212,7 +212,7 @@ std::shared_ptr Timeline::apply_effects(std::shared_ptr frame, lon
}
// Get or generate a blank frame
-std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, long int number)
+std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
{
std::shared_ptr new_frame;
@@ -251,7 +251,7 @@ std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, long int number)
}
// Process a new layer of video or audio
-void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, long int clip_frame_number, long int timeline_frame_number, bool is_top_clip)
+void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip)
{
// Get the clip's frame & image
std::shared_ptr source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
@@ -634,7 +634,7 @@ bool Timeline::isEqual(double a, double b)
}
// Get an openshot::Frame object for a specific frame number of this reader.
-std::shared_ptr Timeline::GetFrame(long int requested_frame) throw(ReaderClosed, OutOfBoundsFrame)
+std::shared_ptr Timeline::GetFrame(int64_t requested_frame) throw(ReaderClosed, OutOfBoundsFrame)
{
// Adjust out of bounds frame number
if (requested_frame < 1)
@@ -684,7 +684,7 @@ std::shared_ptr Timeline::GetFrame(long int requested_frame) throw(Reader
// GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
// Determine all clip frames, and request them in order (to keep resampled audio in sequence)
- for (long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
+ for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
{
// Loop through clips
for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
@@ -710,7 +710,7 @@ std::shared_ptr Timeline::GetFrame(long int requested_frame) throw(Reader
{
// Loop through all requested frames
#pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames)
- for (long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
+ for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
{
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
@@ -805,7 +805,7 @@ std::shared_ptr Timeline::GetFrame(long int requested_frame) throw(Reader
// Find intersecting clips (or non intersecting clips)
-vector Timeline::find_intersecting_clips(long int requested_frame, int number_of_frames, bool include)
+vector Timeline::find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include)
{
// Find matching clips
vector matching_clips;
@@ -1103,8 +1103,8 @@ void Timeline::apply_json_to_clips(Json::Value change) throw(InvalidJSONKey) {
apply_json_to_effects(change, e);
// Calculate start and end frames that this impacts, and remove those frames from the cache
- long int new_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
- long int new_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
+ int64_t new_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
+ int64_t new_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
return; // effect found, don't update clip
@@ -1116,8 +1116,8 @@ void Timeline::apply_json_to_clips(Json::Value change) throw(InvalidJSONKey) {
// Calculate start and end frames that this impacts, and remove those frames from the cache
if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
- long int new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
- long int new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
+ int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
+ int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
}
@@ -1135,8 +1135,8 @@ void Timeline::apply_json_to_clips(Json::Value change) throw(InvalidJSONKey) {
if (existing_clip) {
// Calculate start and end frames that this impacts, and remove those frames from the cache
- long int old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
- long int old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
+ int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
+ int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
// Remove cache on clip's Reader (if found)
@@ -1164,8 +1164,8 @@ void Timeline::apply_json_to_clips(Json::Value change) throw(InvalidJSONKey) {
if (existing_clip) {
// Calculate start and end frames that this impacts, and remove those frames from the cache
- long int old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
- long int old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
+ int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
+ int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
// Remove clip from timeline
@@ -1225,8 +1225,8 @@ void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_ef
// Calculate start and end frames that this impacts, and remove those frames from the cache
if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
- long int new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
- long int new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
+ int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
+ int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
}
@@ -1254,8 +1254,8 @@ void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_ef
if (existing_effect) {
// Calculate start and end frames that this impacts, and remove those frames from the cache
- long int old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
- long int old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
+ int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
+ int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
// Update effect properties from JSON
@@ -1268,8 +1268,8 @@ void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_ef
if (existing_effect) {
// Calculate start and end frames that this impacts, and remove those frames from the cache
- long int old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
- long int old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
+ int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
+ int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
// Remove effect from timeline
diff --git a/src/WriterBase.cpp b/src/WriterBase.cpp
index 65a10f7d..0778c94f 100644
--- a/src/WriterBase.cpp
+++ b/src/WriterBase.cpp
@@ -227,7 +227,7 @@ void WriterBase::SetJsonValue(Json::Value root) {
if (!root["duration"].isNull())
info.duration = root["duration"].asDouble();
if (!root["file_size"].isNull())
- info.file_size = (long long) root["file_size"].asUInt();
+ info.file_size = (int64_t) root["file_size"].asUInt();
if (!root["height"].isNull())
info.height = root["height"].asInt();
if (!root["width"].isNull())
@@ -257,7 +257,7 @@ void WriterBase::SetJsonValue(Json::Value root) {
if (!root["vcodec"].isNull())
info.vcodec = root["vcodec"].asString();
if (!root["video_length"].isNull())
- info.video_length = (long int) root["video_length"].asUInt();
+ info.video_length = (int64_t) root["video_length"].asUInt();
if (!root["video_stream_index"].isNull())
info.video_stream_index = root["video_stream_index"].asInt();
if (!root["video_timebase"].isNull() && root["video_timebase"].isObject()) {
diff --git a/src/bindings/python/openshot.i b/src/bindings/python/openshot.i
index 8a1b6586..4841a00e 100644
--- a/src/bindings/python/openshot.i
+++ b/src/bindings/python/openshot.i
@@ -35,6 +35,7 @@
%include "std_string.i"
%include "std_list.i"
%include "std_vector.i"
+%include
/* Unhandled STL Exception Handling */
%include
diff --git a/src/effects/Blur.cpp b/src/effects/Blur.cpp
index b806307b..d9ee6549 100644
--- a/src/effects/Blur.cpp
+++ b/src/effects/Blur.cpp
@@ -60,7 +60,7 @@ void Blur::init_effect_details()
// This method is required for all derived classes of EffectBase, and returns a
// modified openshot::Frame object
-std::shared_ptr Blur::GetFrame(std::shared_ptr frame, long int frame_number)
+std::shared_ptr Blur::GetFrame(std::shared_ptr frame, int64_t frame_number)
{
// Get the frame's image
std::shared_ptr frame_image = frame->GetImage();
@@ -299,7 +299,7 @@ void Blur::SetJsonValue(Json::Value root) {
}
// Get all properties for a specific frame
-string Blur::PropertiesJSON(long int requested_frame) {
+string Blur::PropertiesJSON(int64_t requested_frame) {
// Generate JSON properties list
Json::Value root;
diff --git a/src/effects/Brightness.cpp b/src/effects/Brightness.cpp
index 3f119d08..49329460 100644
--- a/src/effects/Brightness.cpp
+++ b/src/effects/Brightness.cpp
@@ -71,7 +71,7 @@ int Brightness::constrain(int color_value)
// This method is required for all derived classes of EffectBase, and returns a
// modified openshot::Frame object
-std::shared_ptr Brightness::GetFrame(std::shared_ptr frame, long int frame_number)
+std::shared_ptr Brightness::GetFrame(std::shared_ptr frame, int64_t frame_number)
{
// Get the frame's image
std::shared_ptr frame_image = frame->GetImage();
@@ -174,7 +174,7 @@ void Brightness::SetJsonValue(Json::Value root) {
}
// Get all properties for a specific frame
-string Brightness::PropertiesJSON(long int requested_frame) {
+string Brightness::PropertiesJSON(int64_t requested_frame) {
// Generate JSON properties list
Json::Value root;
diff --git a/src/effects/ChromaKey.cpp b/src/effects/ChromaKey.cpp
index 176524b1..7a8ae4d5 100644
--- a/src/effects/ChromaKey.cpp
+++ b/src/effects/ChromaKey.cpp
@@ -63,7 +63,7 @@ void ChromaKey::init_effect_details()
// This method is required for all derived classes of EffectBase, and returns a
// modified openshot::Frame object
-std::shared_ptr ChromaKey::GetFrame(std::shared_ptr frame, long int frame_number)
+std::shared_ptr ChromaKey::GetFrame(std::shared_ptr frame, int64_t frame_number)
{
// Determine the current HSL (Hue, Saturation, Lightness) for the Chrome
int threshold = fuzz.GetInt(frame_number);
@@ -154,7 +154,7 @@ void ChromaKey::SetJsonValue(Json::Value root) {
}
// Get all properties for a specific frame
-string ChromaKey::PropertiesJSON(long int requested_frame) {
+string ChromaKey::PropertiesJSON(int64_t requested_frame) {
// Generate JSON properties list
Json::Value root;
diff --git a/src/effects/Deinterlace.cpp b/src/effects/Deinterlace.cpp
index 08b180fa..764e6fda 100644
--- a/src/effects/Deinterlace.cpp
+++ b/src/effects/Deinterlace.cpp
@@ -59,7 +59,7 @@ void Deinterlace::init_effect_details()
// This method is required for all derived classes of EffectBase, and returns a
// modified openshot::Frame object
-std::shared_ptr Deinterlace::GetFrame(std::shared_ptr frame, long int frame_number)
+std::shared_ptr Deinterlace::GetFrame(std::shared_ptr frame, int64_t frame_number)
{
// Get original size of frame's image
int original_width = frame->GetImage()->width();
@@ -146,7 +146,7 @@ void Deinterlace::SetJsonValue(Json::Value root) {
}
// Get all properties for a specific frame
-string Deinterlace::PropertiesJSON(long int requested_frame) {
+string Deinterlace::PropertiesJSON(int64_t requested_frame) {
// Generate JSON properties list
Json::Value root;
diff --git a/src/effects/Mask.cpp b/src/effects/Mask.cpp
index de6757d8..a73ff9db 100644
--- a/src/effects/Mask.cpp
+++ b/src/effects/Mask.cpp
@@ -107,7 +107,7 @@ void Mask::set_grayscale_mask(std::shared_ptr mask_frame_image, int widt
// This method is required for all derived classes of EffectBase, and returns a
// modified openshot::Frame object
-std::shared_ptr Mask::GetFrame(std::shared_ptr frame, long int frame_number)
+std::shared_ptr Mask::GetFrame(std::shared_ptr frame, int64_t frame_number)
{
// Get the mask image (from the mask reader)
std::shared_ptr frame_image = frame->GetImage();
@@ -276,7 +276,7 @@ void Mask::SetJsonValue(Json::Value root) {
}
// Get all properties for a specific frame
-string Mask::PropertiesJSON(long int requested_frame) {
+string Mask::PropertiesJSON(int64_t requested_frame) {
// Generate JSON properties list
Json::Value root;
diff --git a/src/effects/Negate.cpp b/src/effects/Negate.cpp
index bf6ab2d6..420c7add 100644
--- a/src/effects/Negate.cpp
+++ b/src/effects/Negate.cpp
@@ -45,7 +45,7 @@ Negate::Negate()
// This method is required for all derived classes of EffectBase, and returns a
// modified openshot::Frame object
-std::shared_ptr Negate::GetFrame(std::shared_ptr frame, long int frame_number)
+std::shared_ptr Negate::GetFrame(std::shared_ptr frame, int64_t frame_number)
{
// Make a negative of the images pixels
frame->GetImage()->invertPixels();
@@ -104,7 +104,7 @@ void Negate::SetJsonValue(Json::Value root) {
}
// Get all properties for a specific frame
-string Negate::PropertiesJSON(long int requested_frame) {
+string Negate::PropertiesJSON(int64_t requested_frame) {
// Generate JSON properties list
Json::Value root;
diff --git a/src/effects/Saturation.cpp b/src/effects/Saturation.cpp
index 1c4ac90a..815606c2 100644
--- a/src/effects/Saturation.cpp
+++ b/src/effects/Saturation.cpp
@@ -70,7 +70,7 @@ int Saturation::constrain(int color_value)
// This method is required for all derived classes of EffectBase, and returns a
// modified openshot::Frame object
-std::shared_ptr Saturation::GetFrame(std::shared_ptr frame, long int frame_number)
+std::shared_ptr Saturation::GetFrame(std::shared_ptr frame, int64_t frame_number)
{
// Get the frame's image
std::shared_ptr frame_image = frame->GetImage();
@@ -176,7 +176,7 @@ void Saturation::SetJsonValue(Json::Value root) {
}
// Get all properties for a specific frame
-string Saturation::PropertiesJSON(long int requested_frame) {
+string Saturation::PropertiesJSON(int64_t requested_frame) {
// Generate JSON properties list
Json::Value root;
diff --git a/src/examples/Example.cpp b/src/examples/Example.cpp
index 71b7447e..6c9b2ff8 100644
--- a/src/examples/Example.cpp
+++ b/src/examples/Example.cpp
@@ -50,7 +50,7 @@ int main(int argc, char* argv[]) {
cout << "** Attempt " << attempt << " **" << endl;
// Read every frame in reader as fast as possible
- for (int frame_number = 1; frame_number < r.info.video_length; frame_number++) {
+ for (int64_t frame_number = 1; frame_number < r.info.video_length; frame_number++) {
// Get frame object
std::shared_ptr f = r.GetFrame(frame_number);
diff --git a/tests/ReaderBase_Tests.cpp b/tests/ReaderBase_Tests.cpp
index 9277eadf..afa4ccab 100644
--- a/tests/ReaderBase_Tests.cpp
+++ b/tests/ReaderBase_Tests.cpp
@@ -41,7 +41,7 @@ TEST(ReaderBase_Derived_Class)
public:
TestReader() { };
CacheBase* GetCache() { return NULL; };
- std::shared_ptr GetFrame(long int number) { std::shared_ptr f(new Frame()); return f; }
+ std::shared_ptr GetFrame(int64_t number) { std::shared_ptr f(new Frame()); return f; }
void Close() { };
void Open() { };
string Json() { };