Allow some tags only on top level to fix --tags filtering (#3171)

The problem: when running `./runtest --large-memory --tags large-memory`
the `--tags large-memory` filter would look for tests with
`large-memory` tag, it found the nested tag in the test context but
`start_server` didn't have `large-memory`, so the filtering logic had
issues. And that's why we did not notice how we introduced this failure:

```
[err]: CVE-2025-32023: Sparse HLL XZERO overflow triggers crash in tests/unit/hyperloglog.tcl
Expected an error matching '*INVALIDOBJ*' but got '1' (context: type eval line 24 cmd {assert_error {*INVALIDOBJ*} {r pfadd hll_overflow foo}} proc ::test)
```

Tags `large-memory`, `needs:other-server`, `compatible-redis`, and
`network` now restricted to top-level `start_server`. Prevents `--tags`
filtering issues when these tags appear in nested contexts. Added
validation that errors on nested usage. Refactored tests that fail new
validation. Also fixed all `lsearch` calls to use `-exact` flag to
preventing bugs like "tls" matching "tls:skip".

---------

Signed-off-by: Daniil Kashapov <daniil.kashapov.ykt@gmail.com>
Co-authored-by: Viktor Söderqvist <viktor.soderqvist@est.tech>
This commit is contained in:
Daniil Kashapov
2026-02-09 16:01:30 +05:00
committed by GitHub
parent cb3195e0b8
commit 185d2ad7d9
12 changed files with 93 additions and 46 deletions
+4
View File
@@ -84,6 +84,8 @@ Tags can be applied in different context levels:
* `tags` context that bundles several tests together
* A single test context.
Some tags are restricted to top-level use only. These tags are `large-memory`, `needs:other-server`, `compatible-redis` and `network`.
The following compatibility and capability tags are currently used:
| Tag | Indicates |
@@ -103,6 +105,8 @@ The following compatibility and capability tags are currently used:
| `needs:reset` | Uses `RESET` to reset client connections. |
| `needs:save` | Uses `SAVE` or `BGSAVE` to create an RDB file. |
| `needs:other-server` | Requires `--other-server-path`. |
| `compatible-redis` | Tests that run against Redis (compatibility tests). |
| `network` | Tests that require network operations. |
| `singledb` | Test runs as if `--singledb` was given. |
| `valgrind:skip` | Not compatible with `--valgrind`. |
+43 -15
View File
@@ -1,6 +1,8 @@
set ::global_overrides {}
set ::tags {}
set ::valgrind_errors {}
# Tags that are only allowed at the top level (not in nested blocks)
set ::toplevel_only_tags {large-memory needs:other-server compatible-redis network}
proc start_server_error {executable config_file error} {
set err {}
@@ -189,6 +191,22 @@ proc server_is_up {host port retrynum} {
return 0
}
# Checks if $tags are allowed on a sub-level.
# If ::tags is empty, we're at top level; otherwise we're nested.
proc check_subtags {tags} {
if {$::tags eq {}} {
# We're entering a top-level block with tags.
return
}
foreach tag $tags {
# Only error if it's a top-level-only tag AND it's not already in ::tags
if {[lsearch -exact $::toplevel_only_tags $tag] >= 0 &&
[lsearch -exact $::tags $tag] < 0} {
error "Test design error: Tag is only allowed on toplevel: $tag"
}
}
}
# Check if current ::tags match requested tags. If ::allowtags are used,
# there must be some intersection. If ::denytags are used, no intersection
# is allowed. Returns 1 if tags are acceptable or 0 otherwise, in which
@@ -200,7 +218,7 @@ proc tags_acceptable {tags err_return} {
if {[llength $::allowtags] > 0} {
set matched 0
foreach tag $::allowtags {
if {[lsearch $tags $tag] >= 0} {
if {[lsearch -exact $tags $tag] >= 0} {
incr matched
}
}
@@ -211,69 +229,69 @@ proc tags_acceptable {tags err_return} {
}
foreach tag $::denytags {
if {[lsearch $tags $tag] >= 0} {
if {[lsearch -exact $tags $tag] >= 0} {
set err "Tag: $tag denied"
return 0
}
}
# some units mess with the client output buffer so we can't really use the req-res logging mechanism.
if {$::log_req_res && [lsearch $tags "logreqres:skip"] >= 0} {
if {$::log_req_res && [lsearch -exact $tags "logreqres:skip"] >= 0} {
set err "Not supported when running in log-req-res mode"
return 0
}
if {$::other_server_path eq {} && [lsearch $tags "needs:other-server"] >= 0} {
if {$::other_server_path eq {} && [lsearch -exact $tags "needs:other-server"] >= 0} {
set err "Other server path not provided"
return 0
}
if {$::external && [lsearch $tags "external:skip"] >= 0} {
if {$::external && [lsearch -exact $tags "external:skip"] >= 0} {
set err "Not supported on external server"
return 0
}
if {$::debug_defrag && [lsearch $tags "debug_defrag:skip"] >= 0} {
if {$::debug_defrag && [lsearch -exact $tags "debug_defrag:skip"] >= 0} {
set err "Not supported on server compiled with DEBUG_FORCE_DEFRAG option"
return 0
}
if {$::singledb && [lsearch $tags "singledb:skip"] >= 0} {
if {$::singledb && [lsearch -exact $tags "singledb:skip"] >= 0} {
set err "Not supported on singledb"
return 0
}
if {$::cluster_mode && [lsearch $tags "cluster:skip"] >= 0} {
if {$::cluster_mode && [lsearch -exact $tags "cluster:skip"] >= 0} {
set err "Not supported in cluster mode"
return 0
}
if {$::tls && [lsearch $tags "tls:skip"] >= 0} {
if {$::tls && [lsearch -exact $tags "tls:skip"] >= 0} {
set err "Not supported in tls mode"
return 0
}
if {!$::large_memory && [lsearch $tags "large-memory"] >= 0} {
if {!$::large_memory && [lsearch -exact $tags "large-memory"] >= 0} {
set err "large memory flag not provided"
return 0
}
if {$::io_threads && [lsearch $tags "io-threads:skip"] >= 0} {
if {$::io_threads && [lsearch -exact $tags "io-threads:skip"] >= 0} {
set err "Not supported in io-threads mode"
return 0
}
if {$::tcl_version < 8.6 && [lsearch $tags "ipv6"] >= 0} {
if {$::tcl_version < 8.6 && [lsearch -exact $tags "ipv6"] >= 0} {
set err "TCL version is too low and does not support this"
return 0
}
if {[lsearch $tags "ipv6"] >= 0 && ![is_ipv6_available]} {
if {[lsearch -exact $tags "ipv6"] >= 0 && ![is_ipv6_available]} {
set err "IPv6 not available on this system"
return 0
}
if {[lsearch $tags "mptcp"] >= 0 && ![is_mptcp_available]} {
if {[lsearch -exact $tags "mptcp"] >= 0 && ![is_mptcp_available]} {
set err "MPTCP not available on this system"
return 0
}
@@ -291,6 +309,7 @@ proc tags {tags code} {
# If we 'tags' contain multiple tags, quoted and separated by spaces,
# we want to get rid of the quotes in order to have a proper list
set tags [string map { \" "" } $tags]
check_subtags $tags
set ::tags [concat $::tags $tags]
if {![tags_acceptable $::tags err]} {
incr ::num_aborted
@@ -298,8 +317,13 @@ proc tags {tags code} {
set ::tags [lrange $::tags 0 end-[llength $tags]]
return
}
uplevel 1 $code
# Catch to ensure cleanup always happens
set err_code [catch {uplevel 1 $code} result]
set ::tags [lrange $::tags 0 end-[llength $tags]]
if {$err_code} {
# Re-raise, let handler up the stack take care of this.
error $result $::errorInfo
}
}
# Write the configuration in the dictionary 'config' in the specified
@@ -499,6 +523,7 @@ proc start_server {options {code undefined}} {
# If we 'tags' contain multiple tags, quoted and separated by spaces,
# we want to get rid of the quotes in order to have a proper list
set tags [string map { \" "" } $value]
check_subtags $tags
set ::tags [concat $::tags $tags]
}
"keep_persistence" {
@@ -787,6 +812,9 @@ proc start_server {options {code undefined}} {
incr ::num_failed
send_data_packet $::test_server_fd err [join $details "\n"]
} else {
# Restore state before re-raising
set ::singledb $old_singledb
set ::tags [lrange $::tags 0 end-[llength $tags]]
# Re-raise, let handler up the stack take care of this.
error $error $backtrace
}
+1
View File
@@ -215,6 +215,7 @@ proc test {name code {okpattern undefined} {tags {}}} {
}
set old_singledb $::singledb
check_subtags $tags
set tags [concat $::tags $tags]
if {![tags_acceptable $tags err]} {
incr ::num_aborted
+13 -2
View File
@@ -139,6 +139,8 @@ set ::numclients 16
proc execute_test_file __testname {
set path "tests/$__testname.tcl"
set ::curfile $path
# Reset tags at the start of each test file
set ::tags {}
source $path
send_data_packet $::test_server_fd done "$__testname"
}
@@ -150,6 +152,8 @@ proc execute_test_file __testname {
# finished.
proc execute_test_code {__testname filename code} {
set ::curfile $filename
# Reset tags at the start of each test code execution
set ::tags {}
eval $code
send_data_packet $::test_server_fd done "$__testname"
}
@@ -683,8 +687,9 @@ proc print_help_screen {} {
" line). This option can be repeated."
"--skiptest <test> Test name or regexp pattern (if <test> starts with '/') to"
" skip. This option can be repeated."
"--tags <tags> Run only tests having specified tags or not having '-'"
" prefixed tags."
"--tags <tags> Run only tests having specified tags (allow list) or, for '-'"
" prefixed tags, skip tests with the tag (deny list). Only"
" top-level-only tags are possible in the allow list."
"--dont-clean Don't delete valkey log files after the run."
"--dont-pre-clean Don't delete existing valkey log files before the run."
"--no-latency Skip latency measurements and validation by some tests."
@@ -727,6 +732,12 @@ for {set j 0} {$j < [llength $argv]} {incr j} {
if {[string index $tag 0] eq "-"} {
lappend ::denytags [string range $tag 1 end]
} else {
# Validate that allowtags only use top-level tags
if {[lsearch -exact $::toplevel_only_tags $tag] < 0} {
puts "Error: --tags allowlist can only use top-level-only tags: large-memory, needs:other-server, compatible-redis, network"
puts "Invalid tag: $tag"
exit 1
}
lappend ::allowtags $tag
}
}
+2 -2
View File
@@ -656,7 +656,7 @@ start_server {tags {"bitops large-memory"}} {
}
r config set proto-max-bulk-len $oldval
r del mykey
} {1} {large-memory}
} {1}
test "SETBIT values larger than UINT32_MAX and lzf_compress/lzf_decompress correctly" {
set bytes [expr (1 << 32) + 1]
@@ -670,6 +670,6 @@ start_server {tags {"bitops large-memory"}} {
assert_equal 1 [r getbit mykey $bitpos]
r config set proto-max-bulk-len $oldval
r del mykey
} {1} {large-memory needs:debug}
} {1} {needs:debug}
}
} ;#run_solo
+4 -2
View File
@@ -1,4 +1,4 @@
start_server {tags {"hll"}} {
start_server {tags {"hll large-memory"}} {
test {CVE-2025-32023: Sparse HLL XZERO overflow triggers crash} {
# Build a valid HLL header for sparse encoding
set hll [binary format cccc 72 89 76 76] ; # "HYLL"
@@ -23,8 +23,10 @@ start_server {tags {"hll"}} {
# Test pfadd and pfmerge, these used to crash but now error
assert_error {*INVALIDOBJ*} {r pfmerge fail_target hll_overflow hll_merge_source}
assert_error {*INVALIDOBJ*} {r pfadd hll_overflow foo}
} {} {large-memory}
}
}
start_server {tags {"hll"}} {
test {HyperLogLog self test passes} {
catch {r pfselftest} e
set e
+1 -1
View File
@@ -178,7 +178,7 @@ run_solo {stream-large-memory} {
# restore defaults
r config set proto-max-bulk-len $original_proto
r config set client-query-buffer-limit $original_query
} {OK} {large-memory}
} {OK}
assert_equal {OK} [r module unload stream]
}
+2 -2
View File
@@ -8,7 +8,7 @@ set test_script_get "#!lua
redis.call('get','x')
return 1"
start_server {tags {"modules usercall"}} {
start_server {tags {"modules usercall network"}} {
r module load $testmodule
# baseline test that module isn't doing anything weird
@@ -134,7 +134,7 @@ start_server {tags {"modules usercall"}} {
assert_match {*cmd=usercall.call_with_user_flag*} [dict get $entry client-info]
}
start_server {tags {"wait aof network external:skip"}} {
start_server {tags {"wait aof external:skip"}} {
set slave [srv 0 client]
set slave_host [srv 0 host]
set slave_port [srv 0 port]
+7 -6
View File
@@ -368,12 +368,6 @@ start_server {tags {"scripting"}} {
set e
} {*against a key*}
test {EVAL - JSON string encoding a string larger than 2GB} {
run_script {
local s = string.rep("a", 1024 * 1024 * 1024)
return #cjson.encode(s..s..s)
} 0
} {3221225474} {large-memory} ;# length includes two double quotes at both ends
test {EVAL - JSON numeric decoding} {
# We must return the table as a string because otherwise
@@ -1235,6 +1229,13 @@ start_server {tags {"scripting"}} {
# start a new server to test the large-memory tests
start_server {tags {"scripting external:skip large-memory"}} {
test {EVAL - JSON string encoding a string larger than 2GB} {
run_script {
local s = string.rep("a", 1024 * 1024 * 1024)
return #cjson.encode(s..s..s)
} 0
} {3221225474} ;# length includes two double quotes at both ends
test {EVAL - Test long escape sequences for strings} {
r eval {
-- Generate 1gb '==...==' separator
+9 -9
View File
@@ -308,7 +308,7 @@ if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} {
assert_equal [read_big_bulk {r rpop lst}] $str_length
assert {[r llen lst] == 1}
assert_equal [read_big_bulk {r rpop lst}] $str_length
} {} {large-memory}
}
test {Test LINDEX and LINSERT on plain nodes over 4GB} {
r flushdb
@@ -323,7 +323,7 @@ if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} {
r write "*5\r\n\$7\r\nLINSERT\r\n\$3\r\nlst\r\n\$6\r\nBEFORE\r\n\$3\r\n\"9\"\r\n"
write_big_bulk 10;
assert_equal [read_big_bulk {r rpop lst}] $str_length
} {} {large-memory}
}
test {Test LTRIM on plain nodes over 4GB} {
r flushdb
@@ -335,7 +335,7 @@ if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} {
assert_equal [r llen lst] 2
assert_equal [r rpop lst] 9
assert_equal [read_big_bulk {r rpop lst}] $str_length
} {} {large-memory}
}
test {Test LREM on plain nodes over 4GB} {
r flushdb
@@ -346,7 +346,7 @@ if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} {
r LREM lst -2 "one"
assert_equal [read_big_bulk {r rpop lst}] $str_length
r llen lst
} {1} {large-memory}
} {1}
test {Test LSET on plain nodes over 4GB} {
r flushdb
@@ -358,7 +358,7 @@ if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} {
assert_equal [r rpop lst] "cc"
assert_equal [r rpop lst] "bb"
assert_equal [read_big_bulk {r rpop lst}] $str_length
} {} {large-memory}
}
test {Test LSET on plain nodes with large elements under packed_threshold over 4GB} {
r flushdb
@@ -368,7 +368,7 @@ if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} {
write_big_bulk 1000000000
}
r ping
} {PONG} {large-memory}
} {PONG}
test {Test LSET splits a quicklist node, and then merge} {
# Test when a quicklist node can't be inserted and is split, the split
@@ -389,7 +389,7 @@ if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} {
}
assert_equal "g" [r lindex lst 1]
r ping
} {PONG} {large-memory}
} {PONG}
test {Test LSET splits a LZF compressed quicklist node, and then merge} {
# Test when a LZF compressed quicklist node can't be inserted and is split,
@@ -414,7 +414,7 @@ if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} {
assert_equal "h" [r lindex lst 0]
r config set list-compress-depth 0
r ping
} {PONG} {large-memory}
} {PONG}
test {Test LMOVE on plain nodes over 4GB} {
r flushdb
@@ -432,7 +432,7 @@ if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} {
assert_equal [r lpop lst2{t}] "cc"
assert_equal [r lpop lst{t}] "dd"
assert_equal [read_big_bulk {r rpop lst{t}}] $str_length
} {} {large-memory}
}
# restore defaults
r config set proto-max-bulk-len 536870912
+1 -1
View File
@@ -1200,7 +1200,7 @@ if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} {
r write "*3\r\n\$4\r\nSREM\r\n\$5\r\nmyset\r\n"
assert_equal 1 [write_big_bulk $str_length "bbb"]
assert_equal [read_big_bulk {r spop myset} yes "aaa"] $str_length
} {} {large-memory}
}
# restore defaults
r config set proto-max-bulk-len 536870912
+6 -6
View File
@@ -12,7 +12,7 @@ start_server [list overrides [list save ""] tags {"large-memory"}] {
} err
assert_match {*too large*} $err
r xlen S1
} {0} {large-memory}
} {0}
}
# One XADD with one huge (exactly nearly) 4GB field
@@ -29,7 +29,7 @@ start_server [list overrides [list save ""] tags {"large-memory"}] {
} err
assert_match {*too large*} $err
r xlen S1
} {0} {large-memory}
} {0}
}
# Gradually add big stream fields using repeated XADD calls
@@ -41,7 +41,7 @@ start_server [list overrides [list save ""] tags {"large-memory"}] {
}
r ping
r xlen stream
} {10} {large-memory}
} {10}
}
# Add over 4GB to a single stream listpack (one XADD command)
@@ -57,7 +57,7 @@ start_server [list overrides [list save ""] tags {"large-memory"}] {
catch {r read} err
assert_match {*too large*} $err
r xlen S
} {0} {large-memory}
} {0}
}
# Gradually add big hash fields using repeated HSET calls
@@ -70,7 +70,7 @@ start_server [list overrides [list save ""] tags {"large-memory"}] {
r hset h $j $::str500
}
r object encoding h
} {hashtable} {large-memory}
} {hashtable}
}
# Add over 4GB to a single hash field (one HSET command)
@@ -84,7 +84,7 @@ start_server [list overrides [list save ""] tags {"large-memory"}] {
r write "\$1\r\nA\r\n"
write_big_bulk 5000000000 ;#5gb
r object encoding H1
} {hashtable} {large-memory}
} {hashtable}
}
} ;# run_solo