Bug 882608 - Fix various issues in faulty.lib with incomplete pages and MOZ_LINKER_ONDEMAND=0. r=nfroyd

This commit is contained in:
Mike Hommey 2013-07-23 07:26:07 +09:00
parent c383ac6899
commit b145205964
3 changed files with 23 additions and 26 deletions

View File

@ -381,9 +381,10 @@ CustomElf::LoadSegment(const Phdr *pt_load) const
/* Mmap at page boundary */
Addr align = PageSize();
Addr align_offset;
void *mapped, *where;
do {
Addr align_offset = pt_load->p_vaddr - AlignedPtr(pt_load->p_vaddr, align);
align_offset = pt_load->p_vaddr - AlignedPtr(pt_load->p_vaddr, align);
where = GetPtr(pt_load->p_vaddr - align_offset);
DEBUG_LOG("%s: Loading segment @%p %c%c%c", GetPath(), where,
prot & PROT_READ ? 'r' : '-',
@ -420,22 +421,28 @@ CustomElf::LoadSegment(const Phdr *pt_load) const
const char *ondemand = getenv("MOZ_LINKER_ONDEMAND");
if (!ElfLoader::Singleton.hasRegisteredHandler() ||
(ondemand && !strncmp(ondemand, "0", 2 /* Including '\0' */))) {
for (Addr off = 0; off < pt_load->p_filesz; off += PageSize()) {
for (Addr off = 0; off < pt_load->p_filesz + align_offset;
off += PageSize()) {
mappable->ensure(reinterpret_cast<char *>(mapped) + off);
}
}
/* When p_memsz is greater than p_filesz, we need to have nulled out memory
* after p_filesz and before p_memsz.
* Mappable::mmap already guarantees that after p_filesz and up to the end
* of the page p_filesz is in, memory is nulled out.
* Above the end of that page, and up to p_memsz, we already have nulled out
* memory because we mapped anonymous memory on the whole library virtual
* Above the end of the last page, and up to p_memsz, we already have nulled
* out memory because we mapped anonymous memory on the whole library virtual
* address space. We just need to adjust this anonymous memory protection
* flags. */
if (pt_load->p_memsz > pt_load->p_filesz) {
Addr file_end = pt_load->p_vaddr + pt_load->p_filesz;
Addr mem_end = pt_load->p_vaddr + pt_load->p_memsz;
Addr next_page = PageAlignedEndPtr(file_end);
if (next_page > file_end) {
/* The library is not registered at this point, so we can't rely on
* on-demand decompression to handle missing pages here. */
void *ptr = GetPtr(file_end);
mappable->ensure(ptr);
memset(ptr, 0, next_page - file_end);
}
if (mem_end > next_page) {
if (mprotect(GetPtr(next_page), mem_end - next_page, prot) < 0) {
LOG("%s: Failed to mprotect", GetPath());

View File

@ -35,18 +35,8 @@ MappableFile::mmap(const void *addr, size_t length, int prot, int flags,
MOZ_ASSERT(!(flags & MAP_SHARED));
flags |= MAP_PRIVATE;
MemoryRange mapped = MemoryRange::mmap(const_cast<void *>(addr), length,
prot, flags, fd, offset);
if (mapped == MAP_FAILED)
return mapped;
/* Fill the remainder of the last page with zeroes when the requested
* protection has write bits. */
if ((mapped != MAP_FAILED) && (prot & PROT_WRITE) &&
(PageAlignedSize(length) > length)) {
memset(mapped + length, 0, PageAlignedSize(length) - length);
}
return mapped;
return MemoryRange::mmap(const_cast<void *>(addr), length, prot, flags,
fd, offset);
}
void
@ -453,9 +443,10 @@ MappableSeekableZStream::ensure(const void *addr)
/* In the typical case, we just need to decompress the chunk entirely. But
* when the current mapping ends in the middle of the chunk, we want to
* stop there. However, if another mapping needs the last part of the
* chunk, we still need to continue. As mappings are ordered by offset
* and length, we don't need to scan the entire list of mappings.
* stop at the end of the corresponding page.
* However, if another mapping needs the last part of the chunk, we still
* need to continue. As mappings are ordered by offset and length, we don't
* need to scan the entire list of mappings.
* It is safe to run through lazyMaps here because the linker is never
* going to call mmap (which adds lazyMaps) while this function is
* called. */
@ -473,6 +464,8 @@ MappableSeekableZStream::ensure(const void *addr)
length = it->endOffset() - chunkStart;
}
length = PageAlignedSize(length);
AutoLock lock(&mutex);
/* The very first page is mapped and accessed separately of the rest, and

View File

@ -16,11 +16,8 @@
/**
* Abstract class to handle mmap()ing from various kind of entities, such as
* plain files or Zip entries. The virtual members are meant to act as the
* equivalent system functions, with a few differences:
* - mapped memory is always MAP_PRIVATE, even though a given implementation
* may use something different internally.
* - memory after length and up to the end of the corresponding page is nulled
* out.
* equivalent system functions, except mapped memory is always MAP_PRIVATE,
* even though a given implementation may use something different internally.
*/
class Mappable: public mozilla::RefCounted<Mappable>
{