mirror of
https://github.com/valkey-io/valkey.git
synced 2026-05-06 05:26:42 -04:00
Fix invalid memory access in RESTORE with malformed zipmap (CVE-2026-25243) (#3621)
Changes applied (3 files): 1. src/zipmap.c: Add sanity checks in zipmapValidateIntegrity() to reject entries where the decoded length < ZIPMAP_BIGLEN (254) but the encoding uses more than 1 byte. This prevents a pointer arithmetic mismatch between validation and zipmapNext() that leads to heap buffer over-reads. 2. src/rdb.c (hash zipmap conversion): Reorder the hashtableAdd()/lpSafeToAdd() checks so lpSafeToAdd() is evaluated before hashtableAdd() takes ownership of the field SDS. Add missing lpFree(lp) in the error path to fix a memory leak when the conversion fails. 3. src/rdb.c (stream consumer PEL): Remove erroneous streamFreeNACK(nack) in the "Duplicated consumer PEL entry" error path. The nack is a shared reference from the global PEL (obtained via raxFind), so freeing it here causes a double-free when the stream object is later destroyed. Test: added a regression test in tests/unit/dump.tcl that crafts a RESTORE payload with a 2-entry zipmap where the first field uses an overlong 5-byte length encoding for value 3. Post-patch, this is cleanly rejected by zipmapValidateIntegrity(). Pre-patch, the misaligned zipmapNext() reads garbage (confirmed via server log: "Hash zipmap with dup elements, or big length (0)") which also produces an error, so the test serves as a defense-in-depth regression anchor rather than a strict pass/fail differentiator. The actual heap over-read is detectable with AddressSanitizer builds. Signed-off-by: ikolomi <ikolomin@amazon.com> Co-authored-by: ikolomi <ikolomin@amazon.com>
This commit is contained in:
@@ -2359,11 +2359,12 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
|
||||
|
||||
/* search for duplicate records */
|
||||
sds field = sdstrynewlen(fstr, flen);
|
||||
if (!field || !hashtableAdd(dupSearchHashtable, field) ||
|
||||
!lpSafeToAdd(lp, (size_t)flen + vlen)) {
|
||||
if (!field || !lpSafeToAdd(lp, (size_t)flen + vlen) ||
|
||||
!hashtableAdd(dupSearchHashtable, field)) {
|
||||
rdbReportCorruptRDB("Hash zipmap with dup elements, or big length (%u)", flen);
|
||||
hashtableRelease(dupSearchHashtable);
|
||||
sdsfree(field);
|
||||
lpFree(lp);
|
||||
zfree(encoded);
|
||||
o->ptr = NULL;
|
||||
decrRefCount(o);
|
||||
@@ -2811,7 +2812,6 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
|
||||
" loading a stream consumer "
|
||||
"group");
|
||||
decrRefCount(o);
|
||||
streamFreeNACK(nack);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -196,6 +196,10 @@ int zipmapValidateIntegrity(unsigned char *zm, size_t size, int deep) {
|
||||
|
||||
/* read the field name length */
|
||||
l = zipmapDecodeLength(p);
|
||||
/* Sanity check: length < 254 must be encoded in 1 byte, not 5 bytes */
|
||||
if (l < ZIPMAP_BIGLEN && s != 1)
|
||||
return 0;
|
||||
|
||||
p += s; /* skip the encoded field size */
|
||||
p += l; /* skip the field */
|
||||
|
||||
@@ -209,6 +213,9 @@ int zipmapValidateIntegrity(unsigned char *zm, size_t size, int deep) {
|
||||
|
||||
/* read the value length */
|
||||
l = zipmapDecodeLength(p);
|
||||
/* Sanity check: length < 254 must be encoded in 1 byte, not 5 bytes */
|
||||
if (l < ZIPMAP_BIGLEN && s != 1)
|
||||
return 0;
|
||||
p += s; /* skip the encoded value size*/
|
||||
e = *p++; /* skip the encoded free space (always encoded in one byte) */
|
||||
p += l + e; /* skip the value and free space */
|
||||
|
||||
@@ -426,4 +426,41 @@ start_server {tags {"dump"}} {
|
||||
assert_match {*WRONGPASS*} $err
|
||||
}
|
||||
} {} {external:skip}
|
||||
|
||||
test {RESTORE rejects zipmap with overlong field length encoding (CVE-2026-25243)} {
|
||||
# Craft a RESTORE payload containing a hash-zipmap (RDB type 9) where
|
||||
# the field-name length is encoded using the 5-byte format (0xfe prefix)
|
||||
# even though the actual length (3) fits in a single byte.
|
||||
#
|
||||
# The bug: zipmapValidateIntegrity() walks the zipmap using the actual
|
||||
# encoded size (5 bytes for 0xfe prefix), but zipmapNext() recalculates
|
||||
# the encoding size via zipmapEncodeLength(NULL, len) which returns 1
|
||||
# for lengths < 254. This 4-byte mismatch causes zipmapNext() to read
|
||||
# at wrong offsets during the hash conversion loop after validation,
|
||||
# leading to invalid memory access (heap buffer over-read).
|
||||
#
|
||||
# Zipmap layout (2 entries, 24 bytes):
|
||||
# 02 - zmlen (2 entries)
|
||||
# fe 03000000 - field length = 3, overlong 5-byte encoding
|
||||
# 616263 - "abc"
|
||||
# 03 - value length = 3
|
||||
# 00 - free = 0
|
||||
# 646566 - "def"
|
||||
# 03 - field length = 3 (normal, padding entry)
|
||||
# 676869 - "ghi"
|
||||
# 03 - value length = 3
|
||||
# 00 - free = 0
|
||||
# 6a6b6c - "jkl"
|
||||
# ff - ZIPMAP_END
|
||||
#
|
||||
# Post-patch: zipmapValidateIntegrity() rejects (l < 254 && s != 1).
|
||||
#
|
||||
# RESTORE payload: <type=09><rdb-string-len=18><zipmap><rdb-ver=5000><crc=0>
|
||||
|
||||
r debug set-skip-checksum-validation 1
|
||||
set payload "\x09\x18\x02\xfe\x03\x00\x00\x00\x61\x62\x63\x03\x00\x64\x65\x66\x03\x67\x68\x69\x03\x00\x6a\x6b\x6c\xff\x50\x00\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
catch {r restore zipmap_test 0 $payload} err
|
||||
r debug set-skip-checksum-validation 0
|
||||
assert_match {*Bad data format*} $err
|
||||
} {} {needs:debug}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user