mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 686805 part 4 - Make the linker load libraries with on-demand decompression when they are seekable compressed streams. r=tglek,r=sewardj
This commit is contained in:
parent
5af592a8a5
commit
d8fa15fe94
@ -326,6 +326,7 @@ private:
|
||||
void (*func)(void);
|
||||
} f;
|
||||
f.ptr = ptr;
|
||||
debug("%s: Calling function @%p", GetPath(), ptr);
|
||||
f.func();
|
||||
}
|
||||
|
||||
|
@ -226,17 +226,19 @@ ElfLoader::Load(const char *path, int flags, LibHandle *parent)
|
||||
zip = zips.GetZip(zip_path);
|
||||
Zip::Stream s;
|
||||
if (zip && zip->GetStream(subpath, &s)) {
|
||||
if (s.GetType() == Zip::Stream::DEFLATE) {
|
||||
/* When the MOZ_LINKER_EXTRACT environment variable is set to "1",
|
||||
* compressed libraries are going to be (temporarily) extracted as
|
||||
* files, in the directory pointed by the MOZ_LINKER_CACHE
|
||||
* environment variable. */
|
||||
const char *extract = getenv("MOZ_LINKER_EXTRACT");
|
||||
if (extract && !strncmp(extract, "1", 2 /* Including '\0' */))
|
||||
mappable = MappableExtractFile::Create(name, &s);
|
||||
/* The above may fail in some cases. */
|
||||
if (!mappable)
|
||||
/* When the MOZ_LINKER_EXTRACT environment variable is set to "1",
|
||||
* compressed libraries are going to be (temporarily) extracted as
|
||||
* files, in the directory pointed by the MOZ_LINKER_CACHE
|
||||
* environment variable. */
|
||||
const char *extract = getenv("MOZ_LINKER_EXTRACT");
|
||||
if (extract && !strncmp(extract, "1", 2 /* Including '\0' */))
|
||||
mappable = MappableExtractFile::Create(name, &s);
|
||||
if (!mappable) {
|
||||
if (s.GetType() == Zip::Stream::DEFLATE) {
|
||||
mappable = MappableDeflate::Create(name, zip, &s);
|
||||
} else if (s.GetType() == Zip::Stream::STORE) {
|
||||
mappable = MappableSeekableZStream::Create(name, zip, &s);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ CPPSRCS += \
|
||||
ElfLoader.cpp \
|
||||
CustomElf.cpp \
|
||||
Mappable.cpp \
|
||||
SeekableZStream.cpp \
|
||||
$(NULL)
|
||||
endif
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/ashmem.h>
|
||||
#endif
|
||||
#include "ElfLoader.h"
|
||||
#include "SeekableZStream.h"
|
||||
#include "Logging.h"
|
||||
|
||||
#ifndef PAGE_SIZE
|
||||
@ -80,35 +81,61 @@ MappableExtractFile::Create(const char *name, Zip::Stream *stream)
|
||||
return NULL;
|
||||
}
|
||||
AutoUnlinkFile file = path.forget();
|
||||
if (ftruncate(fd, stream->GetUncompressedSize()) == -1) {
|
||||
log("Couldn't ftruncate %s to decompress library", file.get());
|
||||
return NULL;
|
||||
}
|
||||
/* Map the temporary file for use as inflate buffer */
|
||||
MappedPtr buffer(::mmap(NULL, stream->GetUncompressedSize(), PROT_WRITE,
|
||||
MAP_SHARED, fd, 0), stream->GetUncompressedSize());
|
||||
if (buffer == MAP_FAILED) {
|
||||
log("Couldn't map %s to decompress library", file.get());
|
||||
return NULL;
|
||||
}
|
||||
z_stream zStream = stream->GetZStream(buffer);
|
||||
if (stream->GetType() == Zip::Stream::DEFLATE) {
|
||||
if (ftruncate(fd, stream->GetUncompressedSize()) == -1) {
|
||||
log("Couldn't ftruncate %s to decompress library", file.get());
|
||||
return NULL;
|
||||
}
|
||||
/* Map the temporary file for use as inflate buffer */
|
||||
MappedPtr buffer(::mmap(NULL, stream->GetUncompressedSize(), PROT_WRITE,
|
||||
MAP_SHARED, fd, 0), stream->GetUncompressedSize());
|
||||
if (buffer == MAP_FAILED) {
|
||||
log("Couldn't map %s to decompress library", file.get());
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Decompress */
|
||||
if (inflateInit2(&zStream, -MAX_WBITS) != Z_OK) {
|
||||
log("inflateInit failed: %s", zStream.msg);
|
||||
return NULL;
|
||||
}
|
||||
if (inflate(&zStream, Z_FINISH) != Z_STREAM_END) {
|
||||
log("inflate failed: %s", zStream.msg);
|
||||
return NULL;
|
||||
}
|
||||
if (inflateEnd(&zStream) != Z_OK) {
|
||||
log("inflateEnd failed: %s", zStream.msg);
|
||||
return NULL;
|
||||
}
|
||||
if (zStream.total_out != stream->GetUncompressedSize()) {
|
||||
log("File not fully uncompressed! %ld / %d", zStream.total_out,
|
||||
static_cast<unsigned int>(stream->GetUncompressedSize()));
|
||||
z_stream zStream = stream->GetZStream(buffer);
|
||||
|
||||
/* Decompress */
|
||||
if (inflateInit2(&zStream, -MAX_WBITS) != Z_OK) {
|
||||
log("inflateInit failed: %s", zStream.msg);
|
||||
return NULL;
|
||||
}
|
||||
if (inflate(&zStream, Z_FINISH) != Z_STREAM_END) {
|
||||
log("inflate failed: %s", zStream.msg);
|
||||
return NULL;
|
||||
}
|
||||
if (inflateEnd(&zStream) != Z_OK) {
|
||||
log("inflateEnd failed: %s", zStream.msg);
|
||||
return NULL;
|
||||
}
|
||||
if (zStream.total_out != stream->GetUncompressedSize()) {
|
||||
log("File not fully uncompressed! %ld / %d", zStream.total_out,
|
||||
static_cast<unsigned int>(stream->GetUncompressedSize()));
|
||||
return NULL;
|
||||
}
|
||||
} else if (stream->GetType() == Zip::Stream::STORE) {
|
||||
SeekableZStream zStream;
|
||||
if (!zStream.Init(stream->GetBuffer())) {
|
||||
log("Couldn't initialize SeekableZStream for %s", name);
|
||||
return NULL;
|
||||
}
|
||||
if (ftruncate(fd, zStream.GetUncompressedSize()) == -1) {
|
||||
log("Couldn't ftruncate %s to decompress library", file.get());
|
||||
return NULL;
|
||||
}
|
||||
MappedPtr buffer(::mmap(NULL, zStream.GetUncompressedSize(), PROT_WRITE,
|
||||
MAP_SHARED, fd, 0), zStream.GetUncompressedSize());
|
||||
if (buffer == MAP_FAILED) {
|
||||
log("Couldn't map %s to decompress library", file.get());
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!zStream.Decompress(buffer, 0, zStream.GetUncompressedSize())) {
|
||||
log("%s: failed to decompress", name);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -296,3 +323,178 @@ MappableDeflate::finalize()
|
||||
/* Remove reference to Zip archive */
|
||||
zip = NULL;
|
||||
}
|
||||
|
||||
MappableSeekableZStream *
|
||||
MappableSeekableZStream::Create(const char *name, Zip *zip,
|
||||
Zip::Stream *stream)
|
||||
{
|
||||
MOZ_ASSERT(stream->GetType() == Zip::Stream::STORE);
|
||||
AutoDeletePtr<MappableSeekableZStream> mappable =
|
||||
new MappableSeekableZStream(zip);
|
||||
|
||||
if (pthread_mutex_init(&mappable->mutex, NULL))
|
||||
return NULL;
|
||||
|
||||
if (!mappable->zStream.Init(stream->GetBuffer()))
|
||||
return NULL;
|
||||
|
||||
mappable->buffer = _MappableBuffer::Create(name,
|
||||
mappable->zStream.GetUncompressedSize());
|
||||
if (!mappable->buffer)
|
||||
return NULL;
|
||||
|
||||
mappable->chunkAvail = new unsigned char[mappable->zStream.GetChunksNum()];
|
||||
memset(mappable->chunkAvail, 0, mappable->zStream.GetChunksNum());
|
||||
|
||||
return mappable.forget();
|
||||
}
|
||||
|
||||
MappableSeekableZStream::MappableSeekableZStream(Zip *zip)
|
||||
: zip(zip) { }
|
||||
|
||||
MappableSeekableZStream::~MappableSeekableZStream()
|
||||
{
|
||||
pthread_mutex_destroy(&mutex);
|
||||
}
|
||||
|
||||
void *
|
||||
MappableSeekableZStream::mmap(const void *addr, size_t length, int prot,
|
||||
int flags, off_t offset)
|
||||
{
|
||||
/* Map with PROT_NONE so that accessing the mapping would segfault, and
|
||||
* bring us to ensure() */
|
||||
void *res = buffer->mmap(addr, length, PROT_NONE, flags, offset);
|
||||
if (res == MAP_FAILED)
|
||||
return MAP_FAILED;
|
||||
|
||||
/* Store the mapping, ordered by offset and length */
|
||||
std::vector<LazyMap>::reverse_iterator it;
|
||||
for (it = lazyMaps.rbegin(); it < lazyMaps.rend(); ++it) {
|
||||
if ((it->offset < offset) ||
|
||||
((it->offset == offset) && (it->length < length)))
|
||||
break;
|
||||
}
|
||||
LazyMap map = { res, length, prot, offset };
|
||||
lazyMaps.insert(it.base(), map);
|
||||
return res;
|
||||
}
|
||||
|
||||
void
|
||||
MappableSeekableZStream::munmap(void *addr, size_t length)
|
||||
{
|
||||
std::vector<LazyMap>::iterator it;
|
||||
for (it = lazyMaps.begin(); it < lazyMaps.end(); ++it)
|
||||
if ((it->addr = addr) && (it->length == length)) {
|
||||
lazyMaps.erase(it);
|
||||
::munmap(addr, length);
|
||||
return;
|
||||
}
|
||||
MOZ_NOT_REACHED("munmap called with unknown mapping");
|
||||
}
|
||||
|
||||
void
|
||||
MappableSeekableZStream::finalize() { }
|
||||
|
||||
class AutoLock {
|
||||
public:
|
||||
AutoLock(pthread_mutex_t *mutex): mutex(mutex)
|
||||
{
|
||||
if (pthread_mutex_lock(mutex))
|
||||
MOZ_NOT_REACHED("pthread_mutex_lock failed");
|
||||
}
|
||||
~AutoLock()
|
||||
{
|
||||
if (pthread_mutex_unlock(mutex))
|
||||
MOZ_NOT_REACHED("pthread_mutex_unlock failed");
|
||||
}
|
||||
private:
|
||||
pthread_mutex_t *mutex;
|
||||
};
|
||||
|
||||
bool
|
||||
MappableSeekableZStream::ensure(const void *addr)
|
||||
{
|
||||
debug("ensure @%p", addr);
|
||||
void *addrPage = reinterpret_cast<void *>
|
||||
(reinterpret_cast<uintptr_t>(addr) & PAGE_MASK);
|
||||
/* Find the mapping corresponding to the given page */
|
||||
std::vector<LazyMap>::iterator map;
|
||||
for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) {
|
||||
if (map->Contains(addrPage))
|
||||
break;
|
||||
}
|
||||
if (map == lazyMaps.end())
|
||||
return false;
|
||||
|
||||
/* Find corresponding chunk */
|
||||
off_t mapOffset = map->offsetOf(addrPage);
|
||||
size_t chunk = mapOffset / zStream.GetChunkSize();
|
||||
|
||||
/* In the typical case, we just need to decompress the chunk entirely. But
|
||||
* when the current mapping ends in the middle of the chunk, we want to
|
||||
* stop there. However, if another mapping needs the last part of the
|
||||
* chunk, we still need to continue. As mappings are ordered by offset
|
||||
* and length, we don't need to scan the entire list of mappings.
|
||||
* It is safe to run through lazyMaps here because the linker is never
|
||||
* going to call mmap (which adds lazyMaps) while this function is
|
||||
* called. */
|
||||
size_t length = zStream.GetChunkSize(chunk);
|
||||
size_t chunkStart = chunk * zStream.GetChunkSize();
|
||||
size_t chunkEnd = chunkStart + length;
|
||||
std::vector<LazyMap>::iterator it;
|
||||
for (it = map; it < lazyMaps.end(); ++it) {
|
||||
if (chunkEnd <= it->endOffset())
|
||||
break;
|
||||
}
|
||||
if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) {
|
||||
/* The mapping "it" points at now is past the interesting one */
|
||||
--it;
|
||||
length = it->endOffset() - chunkStart;
|
||||
}
|
||||
|
||||
AutoLock lock(&mutex);
|
||||
|
||||
/* The very first page is mapped and accessed separately of the rest, and
|
||||
* as such, only the first page of the first chunk is decompressed this way.
|
||||
* When we fault in the remaining pages of that chunk, we want to decompress
|
||||
* the complete chunk again. Short of doing that, we would end up with
|
||||
* no data between PAGE_SIZE and chunkSize, which would effectively corrupt
|
||||
* symbol resolution in the underlying library. */
|
||||
if (chunkAvail[chunk] < (length + PAGE_SIZE - 1) / PAGE_SIZE) {
|
||||
if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length))
|
||||
return false;
|
||||
|
||||
#if defined(ANDROID) && defined(__arm__)
|
||||
if (map->prot & PROT_EXEC) {
|
||||
/* We just extracted data that may be executed in the future.
|
||||
* We thus need to ensure Instruction and Data cache coherency. */
|
||||
debug("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length));
|
||||
cacheflush(reinterpret_cast<uintptr_t>(*buffer + chunkStart),
|
||||
reinterpret_cast<uintptr_t>(*buffer + (chunkStart + length)), 0);
|
||||
}
|
||||
#endif
|
||||
chunkAvail[chunk] = (length + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* Flip the chunk mapping protection to the recorded flags. We could
|
||||
* also flip the protection for other mappings of the same chunk,
|
||||
* but it's easier to skip that and let further segfaults call
|
||||
* ensure again. */
|
||||
const void *chunkAddr = reinterpret_cast<const void *>
|
||||
(reinterpret_cast<uintptr_t>(addrPage)
|
||||
- mapOffset % zStream.GetChunkSize());
|
||||
const void *chunkEndAddr = reinterpret_cast<const void *>
|
||||
(reinterpret_cast<uintptr_t>(chunkAddr) + length);
|
||||
|
||||
const void *start = std::max(map->addr, chunkAddr);
|
||||
const void *end = std::min(map->end(), chunkEndAddr);
|
||||
length = reinterpret_cast<uintptr_t>(end)
|
||||
- reinterpret_cast<uintptr_t>(start);
|
||||
|
||||
debug("mprotect @%p, 0x%x, 0x%x", start, length, map->prot);
|
||||
if (mprotect(const_cast<void *>(start), length, map->prot) == 0)
|
||||
return true;
|
||||
|
||||
log("mprotect failed");
|
||||
return false;
|
||||
}
|
||||
|
@ -6,7 +6,9 @@
|
||||
#define Mappable_h
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <pthread.h>
|
||||
#include "Zip.h"
|
||||
#include "SeekableZStream.h"
|
||||
#include "mozilla/RefPtr.h"
|
||||
#include "zlib.h"
|
||||
|
||||
@ -155,4 +157,84 @@ private:
|
||||
z_stream zStream;
|
||||
};
|
||||
|
||||
/**
|
||||
* Mappable implementation for seekable zStreams.
|
||||
* Inflates the mapped bits in a temporary buffer, on demand.
|
||||
*/
|
||||
class MappableSeekableZStream: public Mappable
|
||||
{
|
||||
public:
|
||||
~MappableSeekableZStream();
|
||||
|
||||
/**
|
||||
* Create a MappableSeekableZStream instance for the given Zip stream. The
|
||||
* name argument is used for an appropriately named temporary file, and the
|
||||
* Zip instance is given for the MappableSeekableZStream to keep a reference
|
||||
* of it.
|
||||
*/
|
||||
static MappableSeekableZStream *Create(const char *name, Zip *zip,
|
||||
Zip::Stream *stream);
|
||||
|
||||
/* Inherited from Mappable */
|
||||
virtual void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
|
||||
virtual void munmap(void *addr, size_t length);
|
||||
virtual void finalize();
|
||||
virtual bool ensure(const void *addr);
|
||||
|
||||
private:
|
||||
MappableSeekableZStream(Zip *zip);
|
||||
|
||||
/* Zip reference */
|
||||
mozilla::RefPtr<Zip> zip;
|
||||
|
||||
/* Decompression buffer */
|
||||
AutoDeletePtr<_MappableBuffer> buffer;
|
||||
|
||||
/* Seekable ZStream */
|
||||
SeekableZStream zStream;
|
||||
|
||||
/* Keep track of mappings performed with MappableSeekableZStream::mmap so
|
||||
* that they can be realized by MappableSeekableZStream::ensure.
|
||||
* Values stored in the struct are those passed to mmap */
|
||||
struct LazyMap
|
||||
{
|
||||
const void *addr;
|
||||
size_t length;
|
||||
int prot;
|
||||
off_t offset;
|
||||
|
||||
/* Returns addr + length, as a pointer */
|
||||
const void *end() const {
|
||||
return reinterpret_cast<const void *>
|
||||
(reinterpret_cast<const unsigned char *>(addr) + length);
|
||||
}
|
||||
|
||||
/* Returns offset + length */
|
||||
const off_t endOffset() const {
|
||||
return offset + length;
|
||||
}
|
||||
|
||||
/* Returns the offset corresponding to the given address */
|
||||
const off_t offsetOf(const void *ptr) const {
|
||||
return reinterpret_cast<uintptr_t>(ptr)
|
||||
- reinterpret_cast<uintptr_t>(addr) + offset;
|
||||
}
|
||||
|
||||
/* Returns whether the given address is in the LazyMap range */
|
||||
const bool Contains(const void *ptr) const {
|
||||
return (ptr >= addr) && (ptr < end());
|
||||
}
|
||||
};
|
||||
|
||||
/* List of all mappings */
|
||||
std::vector<LazyMap> lazyMaps;
|
||||
|
||||
/* Array keeping track of which chunks have already been decompressed.
|
||||
* Each value is the number of pages decompressed for the given chunk. */
|
||||
AutoDeleteArray<unsigned char> chunkAvail;
|
||||
|
||||
/* Mutex protecting decompression */
|
||||
pthread_mutex_t mutex;
|
||||
};
|
||||
|
||||
#endif /* Mappable_h */
|
||||
|
100
mozglue/linker/SeekableZStream.cpp
Normal file
100
mozglue/linker/SeekableZStream.cpp
Normal file
@ -0,0 +1,100 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include <algorithm>
|
||||
#include "SeekableZStream.h"
|
||||
#include "Logging.h"
|
||||
|
||||
#ifndef PAGE_SIZE
|
||||
#define PAGE_SIZE 4096
|
||||
#endif
|
||||
|
||||
#ifndef PAGE_MASK
|
||||
#define PAGE_MASK (~ (PAGE_SIZE - 1))
|
||||
#endif
|
||||
|
||||
bool
|
||||
SeekableZStream::Init(const void *buf)
|
||||
{
|
||||
const SeekableZStreamHeader *header = SeekableZStreamHeader::validate(buf);
|
||||
if (!header) {
|
||||
log("Not a seekable zstream");
|
||||
return false;
|
||||
}
|
||||
|
||||
buffer = reinterpret_cast<const unsigned char *>(buf);
|
||||
totalSize = header->totalSize;
|
||||
chunkSize = header->chunkSize;
|
||||
lastChunkSize = header->lastChunkSize;
|
||||
offsetTable.Init(&header[1], header->nChunks);
|
||||
|
||||
/* Sanity check */
|
||||
if ((chunkSize == 0) ||
|
||||
(chunkSize % PAGE_SIZE) ||
|
||||
(chunkSize > 8 * PAGE_SIZE) ||
|
||||
(offsetTable.numElements() < 1) ||
|
||||
(lastChunkSize == 0) ||
|
||||
(lastChunkSize > chunkSize)) {
|
||||
log("Malformed or broken seekable zstream");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
SeekableZStream::Decompress(void *where, size_t chunk, size_t length)
|
||||
{
|
||||
while (length) {
|
||||
size_t len = std::min(length, static_cast<size_t>(chunkSize));
|
||||
if (!DecompressChunk(where, chunk, len))
|
||||
return false;
|
||||
where = reinterpret_cast<unsigned char *>(where) + len;
|
||||
length -= len;
|
||||
chunk++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
SeekableZStream::DecompressChunk(void *where, size_t chunk, size_t length)
|
||||
{
|
||||
if (chunk >= offsetTable.numElements()) {
|
||||
log("DecompressChunk: chunk #%ld out of range [0-%ld)",
|
||||
chunk, offsetTable.numElements());
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isLastChunk = (chunk == offsetTable.numElements() - 1);
|
||||
|
||||
size_t chunkLen = isLastChunk ? lastChunkSize : chunkSize;
|
||||
|
||||
if (length == 0 || length > chunkLen)
|
||||
length = chunkLen;
|
||||
|
||||
debug("DecompressChunk #%ld @%p (%ld/%ld)", chunk, where, length, chunkLen);
|
||||
z_stream zStream;
|
||||
memset(&zStream, 0, sizeof(zStream));
|
||||
zStream.avail_in = (isLastChunk ? totalSize : uint32_t(offsetTable[chunk + 1]))
|
||||
- uint32_t(offsetTable[chunk]);
|
||||
zStream.next_in = const_cast<Bytef *>(buffer + uint32_t(offsetTable[chunk]));
|
||||
zStream.avail_out = length;
|
||||
zStream.next_out = reinterpret_cast<Bytef *>(where);
|
||||
|
||||
/* Decompress chunk */
|
||||
if (inflateInit(&zStream) != Z_OK) {
|
||||
log("inflateInit failed: %s", zStream.msg);
|
||||
return false;
|
||||
}
|
||||
if (inflate(&zStream, (length == chunkLen) ? Z_FINISH : Z_SYNC_FLUSH)
|
||||
!= (length == chunkLen) ? Z_STREAM_END : Z_OK) {
|
||||
log("inflate failed: %s", zStream.msg);
|
||||
return false;
|
||||
}
|
||||
if (inflateEnd(&zStream) != Z_OK) {
|
||||
log("inflateEnd failed: %s", zStream.msg);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
@ -45,4 +45,58 @@ struct SeekableZStreamHeader: public Zip::SignedEntity<SeekableZStreamHeader>
|
||||
MOZ_STATIC_ASSERT(sizeof(SeekableZStreamHeader) == 5 * 4,
|
||||
"SeekableZStreamHeader should be 5 32-bits words");
|
||||
|
||||
/**
|
||||
* Helper class used to decompress Seekable ZStreams.
|
||||
*/
|
||||
class SeekableZStream {
|
||||
public:
|
||||
/* Initialize from the given buffer. Returns whether initialization
|
||||
* succeeded (true) or failed (false). */
|
||||
bool Init(const void *buf);
|
||||
|
||||
/* Decompresses starting from the given chunk. The decompressed data is
|
||||
* stored at the given location. The given length, in bytes, indicates
|
||||
* how much data to decompress. If length is 0, then exactly one chunk
|
||||
* is decompressed.
|
||||
* Returns whether decompression succeeded (true) or failed (false). */
|
||||
bool Decompress(void *where, size_t chunk, size_t length = 0);
|
||||
|
||||
/* Decompresses the given chunk at the given address. If a length is given,
|
||||
* only decompresses that amount of data instead of the entire chunk.
|
||||
* Returns whether decompression succeeded (true) or failed (false). */
|
||||
bool DecompressChunk(void *where, size_t chunk, size_t length = 0);
|
||||
|
||||
/* Returns the uncompressed size of the complete zstream */
|
||||
const size_t GetUncompressedSize() const
|
||||
{
|
||||
return (offsetTable.numElements() - 1) * chunkSize + lastChunkSize;
|
||||
}
|
||||
|
||||
/* Returns the chunk size of the given chunk */
|
||||
const size_t GetChunkSize(size_t chunk = 0) const {
|
||||
return (chunk == offsetTable.numElements() - 1) ? lastChunkSize : chunkSize;
|
||||
}
|
||||
|
||||
/* Returns the number of chunks */
|
||||
const size_t GetChunksNum() const {
|
||||
return offsetTable.numElements();
|
||||
}
|
||||
|
||||
private:
|
||||
/* RAW Seekable SZtream buffer */
|
||||
const unsigned char *buffer;
|
||||
|
||||
/* Total size of the stream, including the 4 magic bytes. */
|
||||
uint32_t totalSize;
|
||||
|
||||
/* Chunk size */
|
||||
uint32_t chunkSize;
|
||||
|
||||
/* Size of last chunk (> 0, <= Chunk size) */
|
||||
uint32_t lastChunkSize;
|
||||
|
||||
/* Offsets table */
|
||||
Array<le_uint32> offsetTable;
|
||||
};
|
||||
|
||||
#endif /* SeekableZStream_h */
|
||||
|
Loading…
Reference in New Issue
Block a user