Bug 1060179 - Use unix eol consistently in content/media/. r=whitespace.

This commit is contained in:
Chris Pearce 2014-10-13 11:53:42 +13:00
parent f68f29db65
commit 2499e01958
9 changed files with 74 additions and 74 deletions

View File

@ -142,12 +142,12 @@ nsresult FileBlockCache::ReadFromFile(int64_t aOffset,
nsresult res = Seek(aOffset);
if (NS_FAILED(res)) return res;
aBytesRead = PR_Read(mFD, aDest, aBytesToRead);
if (aBytesRead <= 0)
return NS_ERROR_FAILURE;
mFDCurrentPos += aBytesRead;
return NS_OK;
}
@ -327,7 +327,7 @@ nsresult FileBlockCache::MoveBlock(int32_t aSourceBlockIndex, int32_t aDestBlock
// the block to file yet.
mChangeIndexList.PushBack(aDestBlockIndex);
}
// If the source block hasn't yet been written to file then the dest block
// simply contains that same write. Resolve this as a write instead.
if (sourceBlock && sourceBlock->IsWrite()) {

View File

@ -21,7 +21,7 @@ namespace mozilla {
// Manages file I/O for the media cache. Data comes in over the network
// via callbacks on the main thread, however we don't want to write the
// incoming data to the media cache on the main thread, as this could block
// causing UI jank.
// causing UI jank.
//
// So FileBlockCache provides an abstraction for a temporary file accessible
// as an array of blocks, which supports a block move operation, and

View File

@ -31,7 +31,7 @@ class ReentrantMonitorAutoEnter;
* and you don't have much control over the rate. Also, transferring data
* over the Internet can be slow and/or unpredictable, so we want to read
* ahead to buffer and cache as much data as possible.
*
*
* The job of the media cache is to resolve this impedance mismatch.
* The media cache reads data from Necko channels into file-backed storage,
* and offers a random-access file-like API to the stream data
@ -56,25 +56,25 @@ class ReentrantMonitorAutoEnter;
* can distribute data to any thread
* -- The cache exposes APIs so clients can detect what data is
* currently held
*
*
* Note that although HTTP is the most important transport and we only
* support transport-level seeking via HTTP byte-ranges, the media cache
* works with any kind of Necko channels and provides random access to
* cached data even for, e.g., FTP streams.
*
*
* The media cache is not persistent. It does not currently allow
* data from one load to be used by other loads, either within the same
* browser session or across browser sessions. The media cache file
* is marked "delete on close" so it will automatically disappear in the
* event of a browser crash or shutdown.
*
*
* The media cache is block-based. Streams are divided into blocks of a
* fixed size (currently 4K) and we cache blocks. A single cache contains
* blocks for all streams.
*
*
* The cache size is controlled by the media.cache_size preference
* (which is in KB). The default size is 500MB.
*
*
* The replacement policy predicts a "time of next use" for each block
* in the cache. When we need to free a block, the block with the latest
* "time of next use" will be evicted. Blocks are divided into
@ -108,19 +108,19 @@ class ReentrantMonitorAutoEnter;
* next use. READAHEAD_BLOCKS have one linked list per stream, since their
* time of next use depends on stream parameters, but the other lists
* are global.
*
*
* A block containing a current decoder read point can contain data
* both behind and ahead of the read point. It will be classified as a
* PLAYED_BLOCK but we will give it special treatment so it is never
* evicted --- it actually contains the highest-priority readahead data
* as well as played data.
*
*
* "Time of next use" estimates are also used for flow control. When
* reading ahead we can predict the time of next use for the data that
* will be read. If the predicted time of next use is later then the
* prediction for all currently cached blocks, and the cache is full, then
* we should suspend reading from the Necko channel.
*
*
* Unfortunately suspending the Necko channel can't immediately stop the
* flow of data from the server. First our desire to suspend has to be
* transmitted to the server (in practice, Necko stops reading from the
@ -132,7 +132,7 @@ class ReentrantMonitorAutoEnter;
* moving overflowing blocks back into the body of the cache, replacing
* less valuable blocks as they become available. We try to avoid simply
* discarding overflowing readahead data.
*
*
* All changes to the actual contents of the cache happen on the main
* thread, since that's where Necko's notifications happen.
*
@ -142,7 +142,7 @@ class ReentrantMonitorAutoEnter;
* the loading of data from the beginning of the file.) The Necko channel
* is managed through ChannelMediaResource; MediaCache does not
* depend on Necko directly.
*
*
* Every time something changes that might affect whether we want to
* read from a Necko channel, or whether we want to seek on the Necko
* channel --- such as data arriving or data being consumed by the
@ -152,20 +152,20 @@ class ReentrantMonitorAutoEnter;
* offset we should seek to, if any. It is also responsible for trimming
* back the cache size to its desired limit by moving overflowing blocks
* into the main part of the cache.
*
*
* Streams can be opened in non-seekable mode. In non-seekable mode,
* the cache will only call ChannelMediaResource::CacheClientSeek with
* a 0 offset. The cache tries hard not to discard readahead data
* for non-seekable streams, since that could trigger a potentially
* disastrous re-read of the entire stream. It's up to cache clients
* to try to avoid requesting seeks on such streams.
*
*
* MediaCache has a single internal monitor for all synchronization.
* This is treated as the lowest level monitor in the media code. So,
* we must not acquire any MediaDecoder locks or MediaResource locks
* while holding the MediaCache lock. But it's OK to hold those locks
* and then get the MediaCache lock.
*
*
* MediaCache associates a principal with each stream. CacheClientSeek
* can trigger new HTTP requests; due to redirects to other domains,
* each HTTP load can return data with a different principal. This
@ -178,7 +178,7 @@ class MediaCache;
/**
* If the cache fails to initialize then Init will fail, so nonstatic
* methods of this class can assume gMediaCache is non-null.
*
*
* This class can be directly embedded as a value.
*/
class MediaCacheStream {
@ -357,7 +357,7 @@ private:
* constant time. We declare this here so that a stream can contain a
* BlockList of its read-ahead blocks. Blocks are referred to by index
* into the MediaCache::mIndex array.
*
*
* Blocks can belong to more than one list at the same time, because
* the next/prev pointers are not stored in the block.
*/

View File

@ -242,7 +242,7 @@ bool AndroidMediaReader::DecodeVideoFrame(bool &aKeyframeSkip,
-1,
picture);
}
if (!v) {
return false;
}

View File

@ -1,34 +1,34 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
include protocol PGMP;
include GMPTypes;
using GMPErr from "gmp-errors.h";
namespace mozilla {
namespace gmp {
async protocol PGMPStorage
{
manager PGMP;
child:
OpenComplete(nsCString aRecordName, GMPErr aStatus);
ReadComplete(nsCString aRecordName, GMPErr aStatus, uint8_t[] aBytes);
WriteComplete(nsCString aRecordName, GMPErr aStatus);
Shutdown();
parent:
Open(nsCString aRecordName);
Read(nsCString aRecordName);
Write(nsCString aRecordName, uint8_t[] aBytes);
Close(nsCString aRecordName);
__delete__();
};
} // namespace gmp
} // namespace mozilla
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
include protocol PGMP;
include GMPTypes;
using GMPErr from "gmp-errors.h";
namespace mozilla {
namespace gmp {
async protocol PGMPStorage
{
manager PGMP;
child:
OpenComplete(nsCString aRecordName, GMPErr aStatus);
ReadComplete(nsCString aRecordName, GMPErr aStatus, uint8_t[] aBytes);
WriteComplete(nsCString aRecordName, GMPErr aStatus);
Shutdown();
parent:
Open(nsCString aRecordName);
Read(nsCString aRecordName);
Write(nsCString aRecordName, uint8_t[] aBytes);
Close(nsCString aRecordName);
__delete__();
};
} // namespace gmp
} // namespace mozilla

View File

@ -297,7 +297,7 @@ TheoraState::DecodeHeader(ogg_packet* aPacket)
&mComment,
&mSetup,
aPacket);
// We must determine when we've read the last header packet.
// th_decode_headerin() does not tell us when it's read the last header, so
// we must keep track of the headers externally.
@ -350,7 +350,7 @@ int64_t TheoraState::Time(th_info* aInfo, int64_t aGranulepos)
}
// Implementation of th_granule_frame inlined here to operate
// on the th_info structure instead of the theora_state.
int shift = aInfo->keyframe_granule_shift;
int shift = aInfo->keyframe_granule_shift;
ogg_int64_t iframe = aGranulepos >> shift;
ogg_int64_t pframe = aGranulepos - (iframe << shift);
int64_t frameno = iframe + pframe - TH_VERSION_CHECK(aInfo, 3, 2, 1);
@ -380,7 +380,7 @@ TheoraState::MaxKeyframeOffset()
// Therefore the maximum possible time by which any frame could be offset
// from a keyframe is the duration of (1 << granule_shift) - 1) frames.
int64_t frameDuration;
// Max number of frames keyframe could possibly be offset.
int64_t keyframeDiff = (1 << mInfo.keyframe_granule_shift) - 1;
@ -492,7 +492,7 @@ void TheoraState::ReconstructTheoraGranulepos()
// Theora 3.2.1+ granulepos store frame number [1..N], so granulepos
// should be > 0.
// Theora 3.2.0 granulepos store the frame index [0..(N-1)], so
// granulepos should be >= 0.
// granulepos should be >= 0.
NS_ASSERTION(granulepos >= version_3_2_1,
"Invalid granulepos for Theora version");
@ -724,7 +724,7 @@ nsresult VorbisState::ReconstructVorbisGranulepos()
if (packet->e_o_s && packet->granulepos >= mGranulepos) {
samples = packet->granulepos - mGranulepos;
}
mGranulepos = packet->granulepos;
RecordVorbisPacketSamples(packet, samples);
return NS_OK;
@ -1088,7 +1088,7 @@ SkeletonState::SkeletonState(ogg_page* aBosPage) :
{
MOZ_COUNT_CTOR(SkeletonState);
}
SkeletonState::~SkeletonState()
{
MOZ_COUNT_DTOR(SkeletonState);
@ -1223,11 +1223,11 @@ bool SkeletonState::DecodeIndex(ogg_packet* aPacket)
{
return (mActive = false);
}
int64_t sizeofIndex = aPacket->bytes - INDEX_KEYPOINT_OFFSET;
int64_t maxNumKeyPoints = sizeofIndex / MIN_KEY_POINT_SIZE;
if (aPacket->bytes < minPacketSize.value() ||
numKeyPoints > maxNumKeyPoints ||
numKeyPoints > maxNumKeyPoints ||
numKeyPoints < 0)
{
// Packet size is less than the theoretical minimum size, or the packet is
@ -1244,7 +1244,7 @@ bool SkeletonState::DecodeIndex(ogg_packet* aPacket)
}
nsAutoPtr<nsKeyFrameIndex> keyPoints(new nsKeyFrameIndex(startTime, endTime));
p = aPacket->packet + INDEX_KEYPOINT_OFFSET;
const unsigned char* limit = aPacket->packet + aPacket->bytes;
int64_t numKeyPointsRead = 0;

View File

@ -65,7 +65,7 @@ nsresult RawReader::ReadMetadata(MediaInfo* aInfo,
return NS_ERROR_FAILURE; // Invalid data
// Determine and verify frame display size.
float pixelAspectRatio = static_cast<float>(mMetadata.aspectNumerator) /
float pixelAspectRatio = static_cast<float>(mMetadata.aspectNumerator) /
mMetadata.aspectDenominator;
nsIntSize display(mMetadata.frameWidth, mMetadata.frameHeight);
ScaleDisplayByAspectRatio(display, pixelAspectRatio);
@ -126,7 +126,7 @@ RawReader::IsMediaSeekable()
return false;
}
// Helper method that either reads until it gets aLength bytes
// Helper method that either reads until it gets aLength bytes
// or returns false
bool RawReader::ReadFromResource(MediaResource *aResource, uint8_t* aBuf,
uint32_t aLength)

View File

@ -265,7 +265,7 @@ nsresult WaveReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime,
return NS_ERROR_FAILURE;
}
double d = BytesToTime(GetDataLength());
NS_ASSERTION(d < INT64_MAX / USECS_PER_S, "Duration overflow");
NS_ASSERTION(d < INT64_MAX / USECS_PER_S, "Duration overflow");
int64_t duration = static_cast<int64_t>(d * USECS_PER_S);
double seekTime = std::min(aTarget, duration) / static_cast<double>(USECS_PER_S);
int64_t position = RoundDownToFrame(static_cast<int64_t>(TimeToBytes(seekTime)));
@ -291,7 +291,7 @@ nsresult WaveReader::GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime)
NS_ASSERTION(endOffset >= mWavePCMOffset, "Integer underflow in GetBuffered");
// We need to round the buffered ranges' times to microseconds so that they
// have the same precision as the currentTime and duration attribute on
// have the same precision as the currentTime and duration attribute on
// the media element.
aBuffered->Add(RoundToUsecs(BytesToTime(startOffset - mWavePCMOffset)),
RoundToUsecs(BytesToTime(endOffset - mWavePCMOffset)));

View File

@ -71,20 +71,20 @@ class WebMPacketQueue : private nsDeque {
WebMPacketQueue()
: nsDeque(new PacketQueueDeallocator())
{}
~WebMPacketQueue() {
Reset();
}
inline int32_t GetSize() {
inline int32_t GetSize() {
return nsDeque::GetSize();
}
inline void Push(NesteggPacketHolder* aItem) {
NS_ASSERTION(aItem, "NULL pushed to WebMPacketQueue");
nsDeque::Push(aItem);
}
inline void PushFront(NesteggPacketHolder* aItem) {
NS_ASSERTION(aItem, "NULL pushed to WebMPacketQueue");
nsDeque::PushFront(aItem);
@ -93,7 +93,7 @@ class WebMPacketQueue : private nsDeque {
inline NesteggPacketHolder* PopFront() {
return static_cast<NesteggPacketHolder*>(nsDeque::PopFront());
}
void Reset() {
while (GetSize() > 0) {
delete PopFront();
@ -116,7 +116,7 @@ public:
// If the Theora granulepos has not been captured, it may read several packets
// until one with a granulepos has been captured, to ensure that all packets
// read have valid time info.
// read have valid time info.
virtual bool DecodeVideoFrame(bool &aKeyframeSkip,
int64_t aTimeThreshold);