From ed50f18c4de7ebff81bff4d0a69fe535d7b1d78b Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Thu, 23 Apr 2020 10:42:01 +0200 Subject: [PATCH 1/9] macro: add READ_NOW() macro for force reading of memory, making a copy When accessing journal files we generally are fine when values change beneath our feet, while we are looking at them, as long as they change from something valid to zero. This is required since we nowadays forcibly unallocate journal files on vacuuming, to ensure they are actually released. However, we need to make sure that the validity checks we enforce are done on suitable copies of the fields in the file. Thus provide a macro that forces a copy, and disallows the compiler from merging our copy with the actually memory where it is from. --- src/basic/macro.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/basic/macro.h b/src/basic/macro.h index 79530132e3..6ee2286833 100644 --- a/src/basic/macro.h +++ b/src/basic/macro.h @@ -585,4 +585,17 @@ static inline int __coverity_check_and_return__(int condition) { DEFINE_PUBLIC_TRIVIAL_REF_FUNC(type, name); \ DEFINE_PUBLIC_TRIVIAL_UNREF_FUNC(type, name, free_func); +/* A macro to force copying of a variable from memory. This is useful whenever we want to read something from + * memory and want to make sure the compiler won't optimize away the destination variable for us. It's not + * supposed to be a full CPU memory barrier, i.e. CPU is still allowed to reorder the reads, but it is not + * allowed to remove our local copies of the variables. We want this to work for unaligned memory, hence + * memcpy() is great for our purposes. */ +#define READ_NOW(x) \ + ({ \ + typeof(x) _copy; \ + memcpy(&_copy, &(x), sizeof(_copy)); \ + asm volatile ("" : : : "memory"); \ + _copy; \ + }) + #include "log.h" From 20ee282bb7386fcc6980027b1956c07fc89fb8ad Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Thu, 23 Apr 2020 10:45:31 +0200 Subject: [PATCH 2/9] journal-file: avoid risky subtraction when validity checking object The value might change beneath what we do, and hence let's avoid any chance of underflow. --- src/journal/journal-file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index bd53635860..b823a3850e 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -760,7 +760,7 @@ static int journal_file_check_object(JournalFile *f, uint64_t offset, Object *o) le64toh(o->data.n_entries), offset); - if (le64toh(o->object.size) - offsetof(DataObject, payload) <= 0) + if (le64toh(o->object.size) <= offsetof(DataObject, payload)) return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG), "Bad object size (<= %zu): %" PRIu64 ": %" PRIu64, offsetof(DataObject, payload), @@ -782,7 +782,7 @@ static int journal_file_check_object(JournalFile *f, uint64_t offset, Object *o) break; case OBJECT_FIELD: - if (le64toh(o->object.size) - offsetof(FieldObject, payload) <= 0) + if (le64toh(o->object.size) <= offsetof(FieldObject, payload)) return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG), "Bad field size (<= %zu): %" PRIu64 ": %" PRIu64, offsetof(FieldObject, payload), From e6fea3063b0b91bd9444362e101af0638075b5fb Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Thu, 23 Apr 2020 10:46:10 +0200 Subject: [PATCH 3/9] journal: use a bitfield where appropriate --- src/journal/journal-internal.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/journal/journal-internal.h b/src/journal/journal-internal.h index 1454df602b..e1fb01a6e3 100644 --- a/src/journal/journal-internal.h +++ b/src/journal/journal-internal.h @@ -41,10 +41,10 @@ struct Match { struct Location { LocationType type; - bool seqnum_set; - bool realtime_set; - bool monotonic_set; - bool xor_hash_set; + bool seqnum_set:1; + bool realtime_set:1; + bool monotonic_set:1; + bool xor_hash_set:1; uint64_t seqnum; sd_id128_t seqnum_id; From bba6e4aeecc4d223c959f5c89a0f2a68b889bf58 Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Thu, 23 Apr 2020 10:47:10 +0200 Subject: [PATCH 4/9] journal: use structured initialization for Location structure --- src/journal/sd-journal.c | 74 +++++++++++++++++++++++----------------- 1 file changed, 42 insertions(+), 32 deletions(-) diff --git a/src/journal/sd-journal.c b/src/journal/sd-journal.c index a739fa8aaf..720ec36145 100644 --- a/src/journal/sd-journal.c +++ b/src/journal/sd-journal.c @@ -115,28 +115,25 @@ static void detach_location(sd_journal *j) { journal_file_reset_location(f); } -static void reset_location(sd_journal *j) { - assert(j); - - detach_location(j); - zero(j->current_location); -} - static void init_location(Location *l, LocationType type, JournalFile *f, Object *o) { assert(l); assert(IN_SET(type, LOCATION_DISCRETE, LOCATION_SEEK)); assert(f); assert(o->object.type == OBJECT_ENTRY); - l->type = type; - l->seqnum = le64toh(o->entry.seqnum); - l->seqnum_id = f->header->seqnum_id; - l->realtime = le64toh(o->entry.realtime); - l->monotonic = le64toh(o->entry.monotonic); - l->boot_id = o->entry.boot_id; - l->xor_hash = le64toh(o->entry.xor_hash); - - l->seqnum_set = l->realtime_set = l->monotonic_set = l->xor_hash_set = true; + *l = (Location) { + .type = type, + .seqnum = le64toh(o->entry.seqnum), + .seqnum_id = f->header->seqnum_id, + .realtime = le64toh(o->entry.realtime), + .monotonic = le64toh(o->entry.monotonic), + .boot_id = o->entry.boot_id, + .xor_hash = le64toh(o->entry.xor_hash), + .seqnum_set = true, + .realtime_set = true, + .monotonic_set = true, + .xor_hash_set = true, + }; } static void set_location(sd_journal *j, JournalFile *f, Object *o) { @@ -1014,9 +1011,10 @@ _public_ int sd_journal_seek_cursor(sd_journal *j, const char *cursor) { !realtime_set) return -EINVAL; - reset_location(j); - - j->current_location.type = LOCATION_SEEK; + detach_location(j); + j->current_location = (Location) { + .type = LOCATION_SEEK, + }; if (realtime_set) { j->current_location.realtime = (uint64_t) realtime; @@ -1129,11 +1127,14 @@ _public_ int sd_journal_seek_monotonic_usec(sd_journal *j, sd_id128_t boot_id, u assert_return(j, -EINVAL); assert_return(!journal_pid_changed(j), -ECHILD); - reset_location(j); - j->current_location.type = LOCATION_SEEK; - j->current_location.boot_id = boot_id; - j->current_location.monotonic = usec; - j->current_location.monotonic_set = true; + detach_location(j); + + j->current_location = (Location) { + .type = LOCATION_SEEK, + .boot_id = boot_id, + .monotonic = usec, + .monotonic_set = true, + }; return 0; } @@ -1142,10 +1143,13 @@ _public_ int sd_journal_seek_realtime_usec(sd_journal *j, uint64_t usec) { assert_return(j, -EINVAL); assert_return(!journal_pid_changed(j), -ECHILD); - reset_location(j); - j->current_location.type = LOCATION_SEEK; - j->current_location.realtime = usec; - j->current_location.realtime_set = true; + detach_location(j); + + j->current_location = (Location) { + .type = LOCATION_SEEK, + .realtime = usec, + .realtime_set = true, + }; return 0; } @@ -1154,8 +1158,11 @@ _public_ int sd_journal_seek_head(sd_journal *j) { assert_return(j, -EINVAL); assert_return(!journal_pid_changed(j), -ECHILD); - reset_location(j); - j->current_location.type = LOCATION_HEAD; + detach_location(j); + + j->current_location = (Location) { + .type = LOCATION_HEAD, + }; return 0; } @@ -1164,8 +1171,11 @@ _public_ int sd_journal_seek_tail(sd_journal *j) { assert_return(j, -EINVAL); assert_return(!journal_pid_changed(j), -ECHILD); - reset_location(j); - j->current_location.type = LOCATION_TAIL; + detach_location(j); + + j->current_location = (Location) { + .type = LOCATION_TAIL, + }; return 0; } From 13933c6b6f7d01285d7ccf9db894273dd3528e8d Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Thu, 23 Apr 2020 11:53:22 +0200 Subject: [PATCH 5/9] memory-util: add missing () in macro evaulation --- src/basic/memory-util.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/basic/memory-util.h b/src/basic/memory-util.h index b7e2e67e84..a6a2ccdbc2 100644 --- a/src/basic/memory-util.h +++ b/src/basic/memory-util.h @@ -12,7 +12,7 @@ size_t page_size(void) _pure_; #define PAGE_ALIGN(l) ALIGN_TO((l), page_size()) -#define PAGE_ALIGN_DOWN(l) (l & ~(page_size() - 1)) +#define PAGE_ALIGN_DOWN(l) ((l) & ~(page_size() - 1)) /* Normal memcpy requires src to be nonnull. We do nothing if n is 0. */ static inline void memcpy_safe(void *dst, const void *src, size_t n) { From 0600ff0e66f844ade3fe4e44c54dc05e67c28026 Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Thu, 23 Apr 2020 11:53:51 +0200 Subject: [PATCH 6/9] journal: don't assert on mmap'ed object type Mappings canbe replaced by all zeroes under our feet if vacuuming decides to unallocate some file. Hence let's not check for this kind of stuff in an assert. (Typically, we should genreate runtime errors in this case, in particular EBADMSG, which the callers generally look for. But in this case this is just an extra precaution check anyway, so let's just remove it.) --- src/journal/sd-journal.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/journal/sd-journal.c b/src/journal/sd-journal.c index 720ec36145..ca80eb2f1c 100644 --- a/src/journal/sd-journal.c +++ b/src/journal/sd-journal.c @@ -119,7 +119,6 @@ static void init_location(Location *l, LocationType type, JournalFile *f, Object assert(l); assert(IN_SET(type, LOCATION_DISCRETE, LOCATION_SEEK)); assert(f); - assert(o->object.type == OBJECT_ENTRY); *l = (Location) { .type = type, From 711398986efc2e31d7506a8bcb53e6ee13c6d9e4 Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Thu, 23 Apr 2020 11:56:57 +0200 Subject: [PATCH 7/9] journal: several minor coding style fixes/clean-ups --- src/journal/journal-file.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index b823a3850e..92c939abb5 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -702,7 +702,15 @@ static unsigned type_to_context(ObjectType type) { return type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX ? type : 0; } -static int journal_file_move_to(JournalFile *f, ObjectType type, bool keep_always, uint64_t offset, uint64_t size, void **ret, size_t *ret_size) { +static int journal_file_move_to( + JournalFile *f, + ObjectType type, + bool keep_always, + uint64_t offset, + uint64_t size, + void **ret, + size_t *ret_size) { + int r; assert(f); @@ -1011,10 +1019,10 @@ int journal_file_append_object(JournalFile *f, ObjectType type, uint64_t size, O return r; o = (Object*) t; - - zero(o->object); - o->object.type = type; - o->object.size = htole64(size); + o->object = (ObjectHeader) { + .type = type, + .size = htole64(size), + }; f->header->tail_object_offset = htole64(p); f->header->n_objects = htole64(le64toh(f->header->n_objects) + 1); @@ -3038,7 +3046,7 @@ void journal_file_dump(JournalFile *f) { if (p == le64toh(f->header->tail_object_offset)) p = 0; else - p = p + ALIGN64(le64toh(o->object.size)); + p += ALIGN64(le64toh(o->object.size)); } return; From 893e0f8fb670caff2f5717683902be08bd449f39 Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Thu, 23 Apr 2020 12:05:15 +0200 Subject: [PATCH 8/9] journal: make sure to explicitly copy out values of mmap before doing arithmetics on them Our journal code is generally supposed to be written in a fashion that the underlying file can be deallocated any time, i.e. our mmap of it suddenly becomes all zeroes. The idea is that we catch that when parsing everything. For that to work safely we need to make sure that when doing arithmetics or comparisons on values read from the map we don't run into TTOCTTOU issues when determining validity. Hence we need to copy out the values before use and operate on the copies. This requires some special care since the C compiler could suppress our copies as optimization. Hence use the new READ_NOW() macro to force a copy by using memcpy(), and use it whenever we start doing an arithmetic operation on it, or validity checking of multiple steps. Fixes: #14943 --- src/journal/journal-file.c | 162 +++++++++++++++++++++++++------------ src/journal/sd-journal.c | 5 +- 2 files changed, 116 insertions(+), 51 deletions(-) diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index 92c939abb5..f481efb6d7 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -533,7 +533,7 @@ static int journal_file_verify_header(JournalFile *f) { if (f->header->state >= _STATE_MAX) return -EBADMSG; - header_size = le64toh(f->header->header_size); + header_size = le64toh(READ_NOW(f->header->header_size)); /* The first addition was n_data, so check that we are at least this large */ if (header_size < HEADER_SIZE_MIN) @@ -542,7 +542,7 @@ static int journal_file_verify_header(JournalFile *f) { if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays)) return -EBADMSG; - arena_size = le64toh(f->header->arena_size); + arena_size = le64toh(READ_NOW(f->header->arena_size)); if (UINT64_MAX - header_size < arena_size || header_size + arena_size > (uint64_t) f->last_stat.st_size) return -ENODATA; @@ -625,26 +625,29 @@ int journal_file_fstat(JournalFile *f) { } static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) { - uint64_t old_size, new_size; + uint64_t old_size, new_size, old_header_size, old_arena_size; int r; assert(f); assert(f->header); - /* We assume that this file is not sparse, and we know that - * for sure, since we always call posix_fallocate() - * ourselves */ + /* We assume that this file is not sparse, and we know that for sure, since we always call + * posix_fallocate() ourselves */ + + if (size > PAGE_ALIGN_DOWN(UINT64_MAX) - offset) + return -EINVAL; if (mmap_cache_got_sigbus(f->mmap, f->cache_fd)) return -EIO; - old_size = - le64toh(f->header->header_size) + - le64toh(f->header->arena_size); + old_header_size = le64toh(READ_NOW(f->header->header_size)); + old_arena_size = le64toh(READ_NOW(f->header->arena_size)); + if (old_arena_size > PAGE_ALIGN_DOWN(UINT64_MAX) - old_header_size) + return -EBADMSG; - new_size = PAGE_ALIGN(offset + size); - if (new_size < le64toh(f->header->header_size)) - new_size = le64toh(f->header->header_size); + old_size = old_header_size + old_arena_size; + + new_size = MAX(PAGE_ALIGN(offset + size), old_header_size); if (new_size <= old_size) { @@ -690,7 +693,7 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) if (r != 0) return -r; - f->header->arena_size = htole64(new_size - le64toh(f->header->header_size)); + f->header->arena_size = htole64(new_size - old_header_size); return journal_file_fstat(f); } @@ -719,6 +722,9 @@ static int journal_file_move_to( if (size <= 0) return -EINVAL; + if (size > UINT64_MAX - offset) + return -EBADMSG; + /* Avoid SIGBUS on invalid accesses */ if (offset + size > (uint64_t) f->last_stat.st_size) { /* Hmm, out of range? Let's refresh the fstat() data @@ -806,18 +812,22 @@ static int journal_file_check_object(JournalFile *f, uint64_t offset, Object *o) offset); break; - case OBJECT_ENTRY: - if ((le64toh(o->object.size) - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0) + case OBJECT_ENTRY: { + uint64_t sz; + + sz = le64toh(READ_NOW(o->object.size)); + if (sz < offsetof(EntryObject, items) || + (sz - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0) return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG), "Bad entry size (<= %zu): %" PRIu64 ": %" PRIu64, offsetof(EntryObject, items), - le64toh(o->object.size), + sz, offset); - if ((le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0) + if ((sz - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0) return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG), "Invalid number items in entry: %" PRIu64 ": %" PRIu64, - (le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem), + (sz - offsetof(EntryObject, items)) / sizeof(EntryItem), offset); if (le64toh(o->entry.seqnum) <= 0) @@ -839,25 +849,35 @@ static int journal_file_check_object(JournalFile *f, uint64_t offset, Object *o) offset); break; + } case OBJECT_DATA_HASH_TABLE: - case OBJECT_FIELD_HASH_TABLE: - if ((le64toh(o->object.size) - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0 || - (le64toh(o->object.size) - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0) + case OBJECT_FIELD_HASH_TABLE: { + uint64_t sz; + + sz = le64toh(READ_NOW(o->object.size)); + if (sz < offsetof(HashTableObject, items) || + (sz - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0 || + (sz - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0) return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG), "Invalid %s hash table size: %" PRIu64 ": %" PRIu64, o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field", - le64toh(o->object.size), + sz, offset); break; + } - case OBJECT_ENTRY_ARRAY: - if ((le64toh(o->object.size) - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0 || - (le64toh(o->object.size) - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0) + case OBJECT_ENTRY_ARRAY: { + uint64_t sz; + + sz = le64toh(READ_NOW(o->object.size)); + if (sz < offsetof(EntryArrayObject, items) || + (sz - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0 || + (sz - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0) return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG), "Invalid object entry array size: %" PRIu64 ": %" PRIu64, - le64toh(o->object.size), + sz, offset); if (!VALID64(le64toh(o->entry_array.next_entry_array_offset))) @@ -867,6 +887,7 @@ static int journal_file_check_object(JournalFile *f, uint64_t offset, Object *o) offset); break; + } case OBJECT_TAG: if (le64toh(o->object.size) != sizeof(TagObject)) @@ -913,7 +934,7 @@ int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset return r; o = (Object*) t; - s = le64toh(o->object.size); + s = le64toh(READ_NOW(o->object.size)); if (s == 0) return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG), @@ -1003,11 +1024,21 @@ int journal_file_append_object(JournalFile *f, ObjectType type, uint64_t size, O if (p == 0) p = le64toh(f->header->header_size); else { + uint64_t sz; + r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail); if (r < 0) return r; - p += ALIGN64(le64toh(tail->object.size)); + sz = le64toh(READ_NOW(tail->object.size)); + if (sz > UINT64_MAX - sizeof(uint64_t) + 1) + return -EBADMSG; + + sz = ALIGN64(sz); + if (p > UINT64_MAX - sz) + return -EBADMSG; + + p += sz; } r = journal_file_allocate(f, p, size); @@ -1164,7 +1195,7 @@ static int journal_file_link_field( if (o->object.type != OBJECT_FIELD) return -EINVAL; - m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem); + m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem); if (m <= 0) return -EBADMSG; @@ -1209,7 +1240,7 @@ static int journal_file_link_data( if (o->object.type != OBJECT_DATA) return -EINVAL; - m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); + m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem); if (m <= 0) return -EBADMSG; @@ -1265,7 +1296,7 @@ int journal_file_find_field_object_with_hash( osize = offsetof(Object, field.payload) + size; - m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem); + m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem); if (m <= 0) return -EBADMSG; @@ -1337,7 +1368,7 @@ int journal_file_find_data_object_with_hash( osize = offsetof(Object, data.payload) + size; - m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem); + m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem); if (m <= 0) return -EBADMSG; @@ -1359,7 +1390,7 @@ int journal_file_find_data_object_with_hash( uint64_t l; size_t rsize = 0; - l = le64toh(o->object.size); + l = le64toh(READ_NOW(o->object.size)); if (l <= offsetof(Object, data.payload)) return -EBADMSG; @@ -1584,30 +1615,47 @@ static int journal_file_append_data( } uint64_t journal_file_entry_n_items(Object *o) { + uint64_t sz; assert(o); if (o->object.type != OBJECT_ENTRY) return 0; - return (le64toh(o->object.size) - offsetof(Object, entry.items)) / sizeof(EntryItem); + sz = le64toh(READ_NOW(o->object.size)); + if (sz < offsetof(Object, entry.items)) + return 0; + + return (sz - offsetof(Object, entry.items)) / sizeof(EntryItem); } uint64_t journal_file_entry_array_n_items(Object *o) { + uint64_t sz; + assert(o); if (o->object.type != OBJECT_ENTRY_ARRAY) return 0; - return (le64toh(o->object.size) - offsetof(Object, entry_array.items)) / sizeof(uint64_t); + sz = le64toh(READ_NOW(o->object.size)); + if (sz < offsetof(Object, entry_array.items)) + return 0; + + return (sz - offsetof(Object, entry_array.items)) / sizeof(uint64_t); } uint64_t journal_file_hash_table_n_items(Object *o) { + uint64_t sz; + assert(o); if (!IN_SET(o->object.type, OBJECT_DATA_HASH_TABLE, OBJECT_FIELD_HASH_TABLE)) return 0; - return (le64toh(o->object.size) - offsetof(Object, hash_table.items)) / sizeof(HashItem); + sz = le64toh(READ_NOW(o->object.size)); + if (sz < offsetof(Object, hash_table.items)) + return 0; + + return (sz - offsetof(Object, hash_table.items)) / sizeof(HashItem); } static int link_entry_into_array(JournalFile *f, @@ -1625,7 +1673,7 @@ static int link_entry_into_array(JournalFile *f, assert(p > 0); a = le64toh(*first); - i = hidx = le64toh(*idx); + i = hidx = le64toh(READ_NOW(*idx)); while (a > 0) { r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o); @@ -1690,6 +1738,7 @@ static int link_entry_into_array_plus_one(JournalFile *f, le64_t *idx, uint64_t p) { + uint64_t hidx; int r; assert(f); @@ -1698,18 +1747,21 @@ static int link_entry_into_array_plus_one(JournalFile *f, assert(idx); assert(p > 0); - if (*idx == 0) + hidx = le64toh(READ_NOW(*idx)); + if (hidx == UINT64_MAX) + return -EBADMSG; + if (hidx == 0) *extra = htole64(p); else { le64_t i; - i = htole64(le64toh(*idx) - 1); + i = htole64(hidx - 1); r = link_entry_into_array(f, first, &i, p); if (r < 0) return r; } - *idx = htole64(le64toh(*idx) + 1); + *idx = htole64(hidx + 1); return 0; } @@ -2443,6 +2495,7 @@ _pure_ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle } static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) { + uint64_t sq; Object *o; int r; @@ -2453,9 +2506,10 @@ static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) { if (r < 0) return r; - if (le64toh(o->entry.seqnum) == needle) + sq = le64toh(READ_NOW(o->entry.seqnum)); + if (sq == needle) return TEST_FOUND; - else if (le64toh(o->entry.seqnum) < needle) + else if (sq < needle) return TEST_LEFT; else return TEST_RIGHT; @@ -2481,6 +2535,7 @@ int journal_file_move_to_entry_by_seqnum( static int test_object_realtime(JournalFile *f, uint64_t p, uint64_t needle) { Object *o; + uint64_t rt; int r; assert(f); @@ -2490,9 +2545,10 @@ static int test_object_realtime(JournalFile *f, uint64_t p, uint64_t needle) { if (r < 0) return r; - if (le64toh(o->entry.realtime) == needle) + rt = le64toh(READ_NOW(o->entry.realtime)); + if (rt == needle) return TEST_FOUND; - else if (le64toh(o->entry.realtime) < needle) + else if (rt < needle) return TEST_LEFT; else return TEST_RIGHT; @@ -2518,6 +2574,7 @@ int journal_file_move_to_entry_by_realtime( static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) { Object *o; + uint64_t m; int r; assert(f); @@ -2527,9 +2584,10 @@ static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) { if (r < 0) return r; - if (le64toh(o->entry.monotonic) == needle) + m = le64toh(READ_NOW(o->entry.monotonic)); + if (m == needle) return TEST_FOUND; - else if (le64toh(o->entry.monotonic) < needle) + else if (m < needle) return TEST_LEFT; else return TEST_RIGHT; @@ -2687,7 +2745,7 @@ int journal_file_next_entry( assert(f); assert(f->header); - n = le64toh(f->header->n_entries); + n = le64toh(READ_NOW(f->header->n_entries)); if (n <= 0) return 0; @@ -2760,7 +2818,7 @@ int journal_file_next_entry_for_data( if (r < 0) return r; - n = le64toh(d->data.n_entries); + n = le64toh(READ_NOW(d->data.n_entries)); if (n <= 0) return n; @@ -2989,7 +3047,7 @@ void journal_file_dump(JournalFile *f) { journal_file_print_header(f); - p = le64toh(f->header->header_size); + p = le64toh(READ_NOW(f->header->header_size)); while (p != 0) { r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o); if (r < 0) @@ -3667,7 +3725,11 @@ int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint6 if (le_hash != o->data.hash) return -EBADMSG; - l = le64toh(o->object.size) - offsetof(Object, data.payload); + l = le64toh(READ_NOW(o->object.size)); + if (l < offsetof(Object, data.payload)) + return -EBADMSG; + + l -= offsetof(Object, data.payload); t = (size_t) l; /* We hit the limit on 32bit machines */ diff --git a/src/journal/sd-journal.c b/src/journal/sd-journal.c index ca80eb2f1c..dc3e61e489 100644 --- a/src/journal/sd-journal.c +++ b/src/journal/sd-journal.c @@ -2365,7 +2365,10 @@ static int return_data(sd_journal *j, JournalFile *f, Object *o, const void **da uint64_t l; int compression; - l = le64toh(o->object.size) - offsetof(Object, data.payload); + l = le64toh(READ_NOW(o->object.size)); + if (l < offsetof(Object, data.payload)) + return -EBADMSG; + l -= offsetof(Object, data.payload); t = (size_t) l; /* We can't read objects larger than 4G on a 32bit machine */ From bfbd5be02ac9b02d02cc8c35027272ac3e7b9ac7 Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Thu, 23 Apr 2020 12:11:15 +0200 Subject: [PATCH 9/9] journal: no need to check offset twice, journal_file_move_to_object() does it again --- src/journal/journal-file.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c index f481efb6d7..b1e092224f 100644 --- a/src/journal/journal-file.c +++ b/src/journal/journal-file.c @@ -1768,14 +1768,12 @@ static int link_entry_into_array_plus_one(JournalFile *f, static int journal_file_link_entry_item(JournalFile *f, Object *o, uint64_t offset, uint64_t i) { uint64_t p; int r; + assert(f); assert(o); assert(offset > 0); p = le64toh(o->entry.items[i].object_offset); - if (p == 0) - return -EINVAL; - r = journal_file_move_to_object(f, OBJECT_DATA, p, &o); if (r < 0) return r;