From d7f71f51972886b2b831199a5168478a094dc5bc Mon Sep 17 00:00:00 2001 From: Dwight Hubbard Date: Mon, 30 Nov 2015 15:11:22 -0800 Subject: [PATCH 1/5] Update the redis-server to 3.0.5 --- redis.submodule/00-RELEASENOTES | 1042 ++++++++------- redis.submodule/BUGS | 2 +- redis.submodule/COPYING | 2 +- redis.submodule/README | 19 + redis.submodule/deps/Makefile | 2 +- redis.submodule/deps/hiredis/async.c | 1 + redis.submodule/deps/hiredis/test.c | 2 +- redis.submodule/deps/linenoise/.gitignore | 4 +- .../deps/linenoise/README.markdown | 23 +- redis.submodule/deps/linenoise/example.c | 45 +- redis.submodule/deps/linenoise/linenoise.c | 79 +- redis.submodule/deps/linenoise/linenoise.h | 6 +- redis.submodule/deps/lua/src/Makefile | 5 +- redis.submodule/deps/lua/src/ldo.c | 2 +- redis.submodule/deps/lua/src/lua_cjson.c | 730 ++++++----- redis.submodule/deps/lua/src/lua_cmsgpack.c | 497 ++++++-- redis.submodule/deps/lua/src/strbuf.c | 6 +- redis.submodule/deps/lua/src/strbuf.h | 16 +- redis.submodule/redis.conf | 236 +++- redis.submodule/sentinel.conf | 2 +- redis.submodule/src/Makefile | 5 +- redis.submodule/src/Makefile.dep | 164 +-- redis.submodule/src/adlist.c | 2 +- redis.submodule/src/anet.c | 118 +- redis.submodule/src/anet.h | 4 + redis.submodule/src/aof.c | 229 +++- redis.submodule/src/bitops.c | 38 +- redis.submodule/src/config.c | 132 +- redis.submodule/src/config.h | 12 +- redis.submodule/src/db.c | 259 +++- redis.submodule/src/debug.c | 20 +- redis.submodule/src/dict.c | 197 ++- redis.submodule/src/dict.h | 1 + redis.submodule/src/endianconv.h | 10 + redis.submodule/src/fmacros.h | 1 + redis.submodule/src/hyperloglog.c | 2 +- redis.submodule/src/latency.c | 59 +- redis.submodule/src/latency.h | 5 + redis.submodule/src/memtest.c | 3 + redis.submodule/src/migrate.c | 247 ---- redis.submodule/src/networking.c | 207 ++- redis.submodule/src/object.c | 268 ++-- redis.submodule/src/pubsub.c | 11 +- redis.submodule/src/rdb.c | 405 +++++- redis.submodule/src/rdb.h | 1 + redis.submodule/src/redis-benchmark.c | 75 +- redis.submodule/src/redis-cli.c | 412 +++++- redis.submodule/src/redis.c | 655 +++++++--- redis.submodule/src/redis.h | 240 +++- redis.submodule/src/replication.c | 1126 +++++++++++++---- redis.submodule/src/rio.c | 177 ++- redis.submodule/src/rio.h | 16 + redis.submodule/src/scripting.c | 61 +- redis.submodule/src/sds.c | 23 +- redis.submodule/src/sentinel.c | 76 +- redis.submodule/src/sha1.c | 16 +- redis.submodule/src/sha1.h | 8 +- redis.submodule/src/slowlog.c | 2 +- redis.submodule/src/solarisfixes.h | 4 + redis.submodule/src/sort.c | 56 +- redis.submodule/src/sparkline.c | 3 +- redis.submodule/src/syncio.c | 1 + redis.submodule/src/t_hash.c | 7 +- redis.submodule/src/t_list.c | 52 +- redis.submodule/src/t_set.c | 11 +- redis.submodule/src/t_string.c | 22 +- redis.submodule/src/t_zset.c | 110 +- redis.submodule/src/util.c | 73 +- redis.submodule/src/version.h | 2 +- redis.submodule/src/ziplist.c | 9 +- redis.submodule/src/zipmap.c | 5 +- redis.submodule/src/zmalloc.c | 24 +- redis.submodule/src/zmalloc.h | 1 + redis.submodule/tests/instances.tcl | 71 +- redis.submodule/tests/integration/aof.tcl | 2 +- redis.submodule/tests/integration/rdb.tcl | 26 +- .../tests/integration/replication-psync.tcl | 63 +- .../tests/integration/replication.tcl | 248 ++-- redis.submodule/tests/sentinel/run.tcl | 3 +- redis.submodule/tests/support/redis.tcl | 6 +- redis.submodule/tests/support/server.tcl | 33 +- redis.submodule/tests/support/test.tcl | 2 +- redis.submodule/tests/support/util.tcl | 78 +- redis.submodule/tests/test_helper.tcl | 80 +- redis.submodule/tests/unit/auth.tcl | 2 +- redis.submodule/tests/unit/basic.tcl | 57 +- redis.submodule/tests/unit/bitops.tcl | 2 +- redis.submodule/tests/unit/dump.tcl | 84 +- redis.submodule/tests/unit/introspection.tcl | 2 +- redis.submodule/tests/unit/maxmemory.tcl | 6 +- redis.submodule/tests/unit/memefficiency.tcl | 7 +- redis.submodule/tests/unit/scan.tcl | 10 + redis.submodule/tests/unit/scripting.tcl | 88 ++ redis.submodule/tests/unit/sort.tcl | 28 +- redis.submodule/tests/unit/type/hash.tcl | 10 +- redis.submodule/tests/unit/type/set.tcl | 1 + redis.submodule/tests/unit/type/zset.tcl | 80 ++ redis.submodule/utils/redis_init_script.tpl | 5 +- redis.submodule/utils/whatisdoing.sh | 8 +- 99 files changed, 6837 insertions(+), 2553 deletions(-) delete mode 100644 redis.submodule/src/migrate.c diff --git a/redis.submodule/00-RELEASENOTES b/redis.submodule/00-RELEASENOTES index f06ff80..67582f9 100644 --- a/redis.submodule/00-RELEASENOTES +++ b/redis.submodule/00-RELEASENOTES @@ -1,10 +1,6 @@ -Redis 2.8 release notes +Redis 3.0 release notes ======================= -** IMPORTANT ** Check the 'Migrating from 2.6 to 2.8' section at the end of - this file for information about what changed between 2.6 and - 2.8 and how this may affect your application. - -------------------------------------------------------------------------------- Upgrade urgency levels: @@ -14,59 +10,354 @@ HIGH: There is a critical bug that may affect a subset of users. Upgrade! CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. -------------------------------------------------------------------------------- ---[ Redis 2.8.17 ] Release date: 19 Sep 2014 +--[ Redis 3.0.5 ] Release date: 15 Oct 2015 + +Upgrade urgency: MODERATE, the most important thing is a fix in the replication + code that may make the slave hanging forever if the master + remains with an open socket even if it is no longer able to + reply. + +* [FIX] MOVE now moves the TTL as well. A bug lasting forever... finally + fixed thanks to Andy Grunwald that reported it. + (reported by Andy Grunwald, fixed by Salvatore Sanfilippo) +* [FIX] Fix a false positive in HSTRLEN test. +* [FIX] Fix a bug in redis-cli --pipe mode that was not able to read back + replies from the server incrementally. Now a mass import will use + a lot less memory, and you can use --pipe to do incremental streaming. + (reported by Twitter user @fsaintjacques, fixed by Salvatore + Sanfilippo) +* [FIX] Slave detection of master timeout. (fixed by Kevin McGehee, refactoring + and regression test by Salvatore Sanfilippo) -# UPGRADE URGENCY: HIGH for Redis Sentinel. - LOW for Redis Server (unmodified compared to 2.8.16). +* [NEW] Cluster: redis-trib fix can fix an additional case for opens lots. + (Salvatore Sanfilippo) +* [NEW] Cluster: redis-trib import support for --copy and --replace options + (David Thomson) + +--[ Redis 3.0.4 ] Release date: 8 Sep 2015 + +Upgrade urgency: HIGH for Redis and Sentinel. However note that in order to + fix certain replication bugs, the replication internals were + modified in a very heavy way. So while this release is + conceptually saner, it may contain regressions. For this + reason, before the release, QA activities were performed by + me (antirez) and Redis Labs and no evident bug was found. + +* [FIX] A number of bugs related to replication PSYNC and the (yet experimental) + diskless replication feature were fixed. The bugs could lead to + inconsistency between masters and slaves. (Salvatore Sanfilippo, Oran + Agra fixed the issue found by Yuval Inbar) +* [FIX] A replication bug in the context of PSYNC partial resynchonization was + found and fixed. This bug happens even when diskless replication is off + in the case different slaves connect at different times while the master + is creating an RDB file, and later a partial resynchronization is + attempted by a slave that connected not as the first one. (Salvatore + Sanfilippo, Oran Agra) +* [FIX] Chained replication and PSYNC interactions leading to potential stale + chained slaves data set, see issue #2694. (Salvatore Sanfilippo fixed + an issue reported by "GeorgeBJ" user at Github) +* [FIX] redis-cli --scan iteration fixed when returned cursor overflows + 32 bit signed integer. (Ofir Luzon, Yuval Inbar) +* [FIX] Sentinel: fixed a bug during the master switch process, where for a + failed conditional check, the new configuration is rewritten, during + a small window of time, in a corrupted way where the master is + also reported to be one of the slaves. This bug is rare to trigger + but apparently it happens in the wild, and the effect is to see + a replication loop where the master will try to replicate with itself. + A detailed explanation of the bug and its effects can be found in + the commit message here: https://github.com/antirez/redis/commit/c20218eb5770b2cafb12bc7092313b8358fedc0a. + The bug was found by Jan-Erik Rediger using a static analyzer and + fixed by Salvatore Sanfilippo. +* [FIX] Sentinel lack of arity checks for certain commands. + (Rogerio Goncalves, Salvatore Sanfilippo) + +* [NEW] Replication internals rewritten in order to be more resistant to bugs. + The replication handshake in the slave side was rewritten as a non + blocking state machine. (Salvatore Sanfilippo, Oran Agra) +* [NEW] New "replication capabilities" feature introduced in order to signal + from the master to the slave what are the features supported, so that + the master can choose the kind of replication to start (diskless or + not) when master and slave are of different versions. (Oran Agra, + Salvatore Sanfilippo) +* [NEW] Log clients details when SLAVEOF command is received. (Salvatore + Sanfilippo with inputs from Nick Craver and Marc Gravell). -* [FIX] Resolved a memory leak in the hiredis library causing a memory leak - in Redis Sentinel when a monitored instance or another Sentinel is - unavailable. Every reconnection attempt will leak a small amount of - memory, but in the long run the process can reach a considerable size. +--[ Redis 3.0.3 ] Release date: 17 Jul 2015 ---[ Redis 2.8.16 ] Release date: 16 Sep 2014 +Upgrade urgency: LOW for Redis and Sentinel. -# UPGRADE URGENCY: HIGH for Redis if you are using 2.8.15 + AOF. - LOW for Sentinel. +* [FIX] Fix blocking operations timeout precision when HZ is at its default + value (not increased) and there are thousands of clients connected + at the same time. This bug affected Sidekiq users that experienced + a very long delay for BLPOP and similar commands to return for + timeout. Check commit b029ff1 for more info. (Salvatore Sanfilippo) +* [FIX] MIGRATE "creating socket: Invalid argument" error fix. Check + issues #2609 and #2612 for more info. (Salvatore Sanfilippo) +* [FIX] Be able to connect to the master even when the slave is bound to + just the loopback interface and has no valid public address in the + network the master is reacahble. (Salvatore Sanfilippo) +* [FIX] ZADD with options encoding promotion fixed. (linfangrong) +* [FIX] Reset aof_delayed_fsync on CONFIG RESETSTATS. (Tom Kiemes) +* [FIX] PFCOUNT key parsing in cluster fixed. (MOON_CLJ) +* [FIX] Fix Solaris compilation of Redis 3.0. (Jan-Erik Rediger) -* [FIX] The ability to load truncated AOF files introduced with Redis 2.8.15 - contains a bug fixed in this release: after loading the file was not - truncated to the last valid command, so the new commands are appended - after a non well formed command. This means that: +* [NEW] Variadic EXISTS command. Now the command accepts multiple arguments + and returns the total count of existing keys. - 1) The first AOF rewrite triggered by the server will automatically - fix the problem. - 2) However, if the server is restarted before the rewrite, Redis may - not be able to load the file and you need to manually fix it. +--[ Redis 3.0.2 ] Release date: 4 Jun 2015 - In order to fix a corrupted file you should start the redis-check-aof - utility WITHOUT the --fix option, just to check the offset where the - corruption is found. Around the offset reported by the check utility - you'll find, inside your AOF file, a command which is not complete - according to the Redis protocol. Just remove this incomplete command - leafing the file unaltered before and after the offending command, - and restart the server. +Upgrade urgency: HIGH for Redis because of a security issue. + LOW for Sentinel. - IMPORTANT #1: Redis 2.8.15 is the only stable version of Redis with - this bug so probably no actual real-world problem happened since the - problem is automatically fixed at the first automatic AOF rewrite. +* [FIX] Critical security issue fix by Ben Murphy: http://t.co/LpGTyZmfS7 +* [FIX] SMOVE reply fixed when src and dst keys are the same. (Glenn Nethercutt) +* [FIX] Lua cmsgpack lib updated to support str8 type. (Sebastian Waisbrot) - IMPORTANT #2: Before upgrading to Redis 2.8.16, if you are using Redis - 2.8.15 with AOF enabled, make sure to trigger a manual AOF rewrite - using the BGREWRITEAOF command. +* [NEW] ZADD support for options: NX, XX, CH. See new doc at redis.io. + (Salvatore Sanfilippo) +* [NEW] Senitnel: CKQUORUM and FLUSHCONFIG commands back ported. + (Salvatore Sanfilippo and Bill Anderson) -* [FIX] SAVE is no longer propagated to AOF / slaves. +--[ Redis 3.0.1 ] Release date: 5 May 2015 ---[ Redis 2.8.15 ] Release date: 12 Sep 2014 +Upgrade urgency: LOW for Redis and Cluster, MODERATE for Sentinel. -# UPGRADE URGENCY: LOW for Redis, HIGH for Sentinel. +* [FIX] Sentinel memory leak due to hiredis fixed. (Salvatore Sanfilippo) +* [FIX] Sentinel memory leak on duplicated instance. (Charsyam) +* [FIX] Redis crash on Lua reaching output buffer limits. (Yossi Gottlieb) +* [FIX] Sentinel flushes config on +slave events. (Bill Anderson) -* [FIX] Sentinel critical bug fixed: the absolute majority was computed in a - wrong way because of a programming error. Now the implementation does - what the specification says and the majority to authorize a failover - (that should not be confused with the ODOWN quorum) is the majority of - *all* the Sentinels ever seen for a given master, regardless of their - current state. +--[ Redis 3.0.0 ] Release date: 1 Apr 2015 + +>> What's new in Redis 3.0 compared to Redis 2.8? + +* Redis Cluster: a distributed implementation of a subset of Redis. +* New "embedded string" object encoding resulting in less cache + misses. Big speed gain under certain work loads. +* AOF child -> parent final data transmission to minimize latency due + to "last write" during AOF rewrites. +* Much improved LRU approximation algorithm for keys eviction. +* WAIT command to block waiting for a write to be transmitted to + the specified number of slaves. +* MIGRATE connection caching. Much faster keys migraitons. +* MIGRATE new options COPY and REPLACE. +* CLIENT PAUSE command: stop processing client requests for a + specified amount of time. +* BITCOUNT performance improvements. +* CONFIG SET accepts memory values in different units (for example + you can use "CONFIG SET maxmemory 1gb"). +* Redis log format slightly changed reporting in each line the role of the + instance (master/slave) or if it's a saving child log. +* INCR performance improvements. + +>> Refactoring changes (no new features nor bug fixes) + +* Blocking operations full refactoring (blocked.c) +* Client output buffer memory tracking refactored. + +Changes between RC6 and 3.0.0 stable: + +>> General changes + +* Fixes to diskless replication. (Oran Agra) +* Test for BLPOP replication on role change. (Salvatore Sanfilippo) +* prepareClientToWrite() error handling improvements. (Salvatore Sanfilippo) +* Remove dict.c no longer used function. (Salvatore Sanfilippo) + +>> Cluster changes + +None + +>> Sentinel changes + +None + +--[ Redis 3.0.0 RC6 (version 2.9.106) ] Release date: 24 mar 2015 + +Upgrade urgency: HIGH because of bugs related to Redis Custer and replication. + +This is the 6th release candidate of Redis 3.0.0. This release fixes important +issues discovered during stress testing, and implements safest behavior +for blocking operations during clients reshardings, and a new much needed +functionality of Redis Cluster manual failovers. + +In order to fix certain bugs quite a bit of refactoring was needed which +is usually non advisabble in a Release Candidate, but needed in order to +end with a clean fix. + +>> General changes + +* [FIX] Redis (non clustered & clustered) replication bug involving blocking + operations: see issue #2473. (Salvatore Sanfilippo) + +>> Cluster changes + +* [FIX] clientsArePaused() fix crashing the old master during manual failover. + (Salvatore Sanfilippo) +* [FIX] Lua scripts replication in Redis Cluster was totally broken. + (Salvatore Sanfilippo) +* [FIX] Redirect clients blocked into list operations when the hash slot + they are blocked into is migrated to another instance or the cluster + state turns into "fail". (Salvatore Sanfilippo) + +* [NEW] TAKEOVER option for CLUSTER FAILOVER implemented. It is now possible + to fix a cluster manually in the minority side of the partition, for + example in order to allow for multi DC setups & recovery. + (Salvatore Sanfilippo) + +>> Sentinel changes + +No changes in Sentinel. + +--[ Redis 3.0.0 RC5 (version 2.9.105) ] Release date: 20 mar 2015 + +Upgrade urgency: Moderate for Redis Cluster users, low otherwise. + +This is the 5th release candidate of Redis 3.0.0, released in order to fix +a moderate bug in Redis Cluster. This RC does not shift in the future the +Redis 3.0.0 final release which is scheduled in a few days (we are in the +process of finishing the documentation for Redis Cluster). + +>> General changes + +* [FIX] Fix LATENCY command crash. (Salvatore Sanfilippo, thx to Ingmar) +* [FIX] Config: missing activerehashing option support in CONFIG SET added. + (Salvatore Sanfilippo, thx to Bill Anderson) +* [FIX] Fix for backtrace generation issue. (Mariano Pérez Rodríguez, Matt Stancliff, Salvatore Sanfilippo) + +* [NEW] Redis-cli --latency-dist backported from unstable. + (Salvatore Sanfilippo) + +>> Cluster changes + +* [FIX] Avoid redundant SELECT in MIGRATE. (Tommy Wang, Salvatore Sanfilippo) +* [FIX] More robust slave check in CLUSTER REPLICATE. (Salvatore Sanfilippo) +* [FIX] Fixed possible Redis Cluster node crash due to wrong separation of + concerns between getNodeByQuery() and Cluster global state update + fnuction. (Salvatore Sanfilippo, thx to Ingmar) + +* [NEW] Add command CLUSTER MYID to easily featch instance ID. (Michel Martens) + +>> Sentinel changes + +* [NEW] Support for CLIENT command added. It was missing in the command table. + (Leandro López) + +--[ Redis 3.0.0 RC4 (version 2.9.104) ] Release date: 13 feb 2015 + +Upgrade urgency: High for Redis if you use LRU eviction, low otherwise. + +This is the 4th release candidate of Redis 3.0.0, it fixes problems with +LRU eviction that are not present in older release (2.8.x is not affected) +and adds new tools to inspect latency and load-test LRU. + +>> General changes + +* [FIX] redis-cli CSV output NIL spurious newline removed. (Matt Collier) +* [FIX] Memory efficiency test in unit test is now much faster: it affacted + the total "make test" execution time in a bad way. (Salvatore + Sanfilippo) +* [FIX] Fixes and improvements to dict.c and LRU eviction. Redis 3.0.0 new + LRU eviction had bugs creating high latency spikes when LRU was + happening during the keys dictionary rehashing. This bug is not + present into 2.8, was 3.0 specific. As a side effect of this issue + dict.c is now improved, and LRU algorithm is more precise (better + approximates true LRU). This was a joint effort, see issue + #2306 for details. (Oran Agra, Sun He, Salvatore Sanfilippo). + Thanks to Charsyam for spotting an integer overflow. + +* [NEW] New latency tool: redis-cli --latency-dist is able to show an + xterm-256 based spectrum of latencies over time. (Salvatore Sanfilippo) +* [NEW] redis-cli --lru-test implemented (cache workload simulator). (Salvatore + Sanfilippo) +* [NEW] redis-cli --stat now shows LOAD when Redis is loading data. +* [NEW] Support "1G" etc. units in CONFIG SET. (Chris Lamb, Salvatore + Sanfilippo) + +>> Cluster changes + +* None. + +>> Sentinel changes + +* None. + +--[ Redis 3.0.0 RC3 (version 2.9.103) ] Release date: 30 jan 2015 + +Upgrade urgency: High for Redis Cluster users, low otherwise. + +This is the third release candidate for Redis 3.0.0, the new RC fixes +several critical issues with Redis Cluster. + +>> General changes + +* [FIX] AOF bug unlikely to happen in practice and mostly harmless: child + process segfaults when parent is not reachable via pipe. (Sun He) +* [FIX] Scripting engine now reports an error when misused with Lua debug + hooks, instead of crashing. (Salvatore Sanfilippo) + +>> Cluster changes + +* [FIX] Several issues with Redis Cluster internal nodes objects handling. + (Matt Stancliff & Salvatore Sanfilippo) +* [FIX] Improvements in the Cluster test. + (Matt Stancliff & Salvatore Sanfilippo). +* [FIX] Cluster memory leaks / double frees (Matt Stancliff). +* [FIX] /dev/urandom surrogate for generation of unique IDs in a more + cheap way. (Salvatore Sanfilippo) +* [FIX] Fixes and improvements to PING / PONG packets gossip sections + in order to improve (and fix) failure detection and speedup + cluster info propagation. (Salvatore Sanfilippo) + +* [NEW] CLUSTER count-failure-reports command added. (Salvatore Sanfilippo) + +>> Sentinel changes + +No changes for Sentinel. + +--[ Redis 3.0.0 RC2 (version 2.9.102) ] Release date: 13 jan 2015 + +Upgrade urgency: LOW. + +This is the second release candidate of Redis 3.0.0. The major changes +are back porting of things implemented into the unstable branch while +this was still possible (with the new development model adopted only +bug fixes will be merged in the future). + +RC2 also fixes a few Redis Cluster non critical bugs. + +>> General changes + +* [FIX] A number of minor bug fixes. + +* [NEW] Diskless replication backportede. +* [NEW] Lua bitops and updated cmsgpack backported. +* [NEW] Transparent Huge Pages warnings and reporting backported. + +>> Cluster changes + +* [FIX] Fix PUBLISH cluster bus message count field. +* [FIX] It is no longer possible to write outside node hash slots using Lua. +* [FIX] Valgrind warnings (no actual bugs). +* [FIX] Less strict in acceptiong myself->ip if it's not populated. + +* [NEW] Better testing of Lua scripts. + +>> Sentinel changes + +No changes to Sentinel. + +--[ Redis 3.0.0 RC1 (version 2.9.101) ] Release date: 9 oct 2014 + +This is the first release candidate of Redis 3.0.0. + +>> General changes + +* [FIX] An very large number of small fixes, old and new, merged in the + context of a the issue #1906. Please see the issue page here + for exact credits: https://github.com/antirez/redis/pull/1906 + of each commit. (Matt Stancliff and many others). +* [FIX] SAVE is no longer propagated to AOF / slaves. * [FIX] GETRANGE test no longer fails for 32 bit builds (Matt Stancliff). * [FIX] Limit SCAN latency when the hash table is in an odd state (very few populted buckets because rehashing is in progress). (Xiaost and @@ -76,475 +367,352 @@ CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. redis-check-aof utility run. The default now is to load truncated (but apparently not corrupted) AOFs, you can change this in redis.conf. (Salvatore Sanfilippo). +* [NEW] DEBUG POPULATE two args form implemented. It is now possible to + call it with DEBUG POPULATE . Default prefix + is "key:" as usually. +* [NEW] INCR: Modify incremented object in-place when possible. This results + in speed improvements + possibly better memory locality. + +>> Cluster changes + +* [FIX] Cluster: claim ping_sent time even if we can't connect. +* [FIX] redis-trib should not abort easily on connection issues. +* [FIX] Cluster test: less console-spammy resharding test. +* [FIX] Fix logic to detect we are among a minority. +* [FIX] Process gossip section only for known nodes. + +* [NEW] Redis Cluster is stable and tested enough, there is a clear MVP, + so it was promoted from beta to stable. +* [NEW] New unit 09, Pub/Sub across the cluster. +* [NEW] New unit 08, update messages. +* [NEW] New cluster option to work with partial slots coverage. +* [NEW] More chatty cluster slaves when failover is stalled. They log reason + with rate limiting, only when reason changes or a given time + has elapsed. + +>> Sentinel changes + +* [FIX] Sentinel critical bug fixed: the absolute majority was computed in a + wrong way because of a programming error. Now the implementation does + what the specification says and the majority to authorize a failover + (that should not be confused with the ODOWN quorum) is the majority of + *all* the Sentinels ever seen for a given master, regardless of their + current state. +* [FIX] Resolved a memory leak in the hiredis library causing a memory leak + in Redis Sentinel when a monitored instance or another Sentinel is + unavailable. Every reconnection attempt will leak a small amount of + memory, but in the long run the process can reach a considerable size. + * [NEW] Sentinel: ability to announce itself with an arbitrary IP/port to work in the context of natted networks. However this is probably still not enough since there is no equivalent mechanism for slaves listed in the master INFO output. (Dara Kong and Salvatore Sanfilippo) ---[ Redis 2.8.14 ] Release date: 1 Sep 2014 +--[ Redis 3.0.0 Beta 8 (version 2.9.57) ] Release date: 29 jul 2014 -# UPGRADE URGENCY: HIGH for Lua scripting users, the server could crash because - of a bug introduced in Redis 2.8.10, otherwise LOW. - LOW for Redis Sentinel. +This is the 8th beta of Redis 3.0.0. -* [FIX] Don't prevent use of shared integers if maxmemory policy is non-LRU. - (Salvatore Sanfilippo) -* [FIX] Fail SYNC if background save child aborted due to a signal. - (Yossi Gottlieb) -* [FIX] Different small redis-cli fixes. (Dov Murik, Charsyam, cubicdaiya, - Kashif Rasul, Jan-Erik Rediger, Matt Stancliff) -* [FIX] AIX compilation fixes. (Siah Lyimo) -* [FIX] A number of other smaller issues. -* [FIX] Improved SIGINT handling (Matt Stancliff, Salvatore Sanfilippo) -* [FIX] Use unsigned types in SDS header to raise limit to 4GB. - (Matt Stancliff, Salvatore Sanfilippo) -* [FIX] Handle signed/unsigned comparisons with more care around the code. +>> General changes + +* [FIX] Solaris compilation issues. (Matt Stancliff, Salvatore Sanfilippo) +* [FIX] Allow shared integer objects if maxmemory policy is not LRU based. (Salvatore Sanfilippo) -* [FIX] Colorized test output fixed to don't change the background color. - (Mariano Pérez Rodríguez) -* [FIX] More Sentinel IPv6 fixes. (Eiichi Sato) -* [FIX] Deny CLIENT command in scripts. (Matt Stancliff) -* [FIX] Allow datasets with more than 2 billion of keys, initial work. -* [FIX] Fix a Lua scripting crash by storing the length of the static - argv when first allocated. (Paddy Byers) +* [FIX] PFSELFTEST: less false positives. (Salvatore Sanfilippo) +* [FIX] Fail SYNC if background save child aborted due to a signal. (Yossi + Gottlieb) +* [NEW] Latency framework backported from unstable branch. (Salvatore + Sanfilippo) +* [NEW] AOF rewrite improved for latency. (Salvatore Sanfilippo) * [NEW] Pub/Sub PING. (Salvatore Sanfilippo) * [NEW] Much faster ZUNIONSTORE. (Kyle Hubert, Salvatore Sanfilippo) * [NEW] Faster ll2string() implementation. (Salvatore Sanfilippo) -* [NEW] **WARNING, minor API change**: PUBSUB NUMSUB: return type modified - to integer. (Matt Stancliff) -* [NEW] redis-benchmark support for AUTH. (CharSyam) - ---[ Redis 2.8.13 ] Release date: 14 Jul 2014 - -# UPGRADE URGENCY: LOW for Redis and Sentinel, this is a features enhancement - release mostly. Since this release introduces the latency - monitoring feature, Redis 2.8 users experiencing latency - issues are strongly encouraged to upgrade. - -* [FIX] CLIENT KILL minor backward compatibility fixes. (Salvatore Sanfilippo) -* [FIX] Enable HAVE_ATOMIC for PowerPC. (Matt Stancliff) -* [FIX] More robust PSYNC and AOF rewrites tests. (Salvatore Sanfilippo) -* [FIX] Solaris build fixed. (Matt Stancliff, Salvatore Sanfilippo) - -* [NEW] The new latency monitoring feature, as documented at - http://redis.io/topics/latency-monitor (Salvatore Sanfilippo) -* [NEW] The COMMAND command, exposing the Redis command table - as an API. (Matt Stancliff) -* [NEW] Update used memory with C11 __atomic. (Matt Stancliff) - ---[ Redis 2.8.12 ] Release date: 23 Jun 2014 - -# UPGRADE URGENCY: HIGH for Redis, CRITICAL for Sentinel. - -# WARNING: This release contains a non backward compatible semantical change - to Lua script that should affect an almost zero number of users. - -* [FIX / BREAKS BACKWARD COMPATIBILITY] Using SELECT inside Lua scripts no - longer makes the selected DB to be set in the calling client. - So Lua can still use SELECT, but the client calling the script will - remain set to the original DB. Thix fixes an issue with Redis - replication of Lua scripts that called SELECT without reverting the - selected DB to the original one. (Salvatore Sanfilippo) -* [FIX] Sentinel failover was instalbe if the master was detected as available - during the failover (especially during manual failovers) because - of an implementation error (lack of checking of - SRI_PROMOTED flag). (Salvatore Sanfilippo) -* [FIX] Cancel SHUTDOWN if initial AOF is being written. (Matt Stancliff) -* [FIX] Sentinel: bind source address for outcoming connections. (Matt - Stancliff). -* [FIX] Less timing sensitive Sentinel tests. (Salvatore Sanfilippo). - -* [NEW] redis-cli --intrinsic-latency stopped with SIGINT still reports - stats (Matt Stancliff) -* [NEW] Sentinels broadcast an HELLO message ASAP after a failover in order to - reach a consistent state faster (before it relied for periodic HELLO - messages). (Salvatore Sanfilippo). -* [NEW] Jemalloc updated to 3.6.0. (Salvatore Sanfilippo) -* [NEW] CLIENT LIST speedup. (Salvatore Sanfilippo) -* [NEW] CLIENT LIST new unique incremental ID to every client. (Salvatore - Sanfilippo) -* [NEW] ROLE command added. (Salvatore Sanfilippo) -* [NEW] CLIENT KILL new form to kill by client type and ID (see doc at - redis.io for more info). (Salvatore Sanfilippo) -* [NEW] Sentinel now disconnects clients when instances are reconfigured - (see http://redis.io/topics/sentinel-clients). (Salvatore Sanfilippo) -* [NEW] Hiredis update to latest version. (Matt Stancliff) - ---[ Redis 2.8.11 ] Release date: 11 Jun 2014 - -# UPGRADE URGENCY: HIGH if you use Lua scripting, LOW otherwise. - -* [FIX] A previous fix for Lua -> Redis numerical precision enhancement - introduced a new problem. In Redis 2.8.10 commands called from Lua - passing a string that "looks like" a very large number, may actually - use as argument the string converted as a float. This bug is now - fixed. -* [FIX] Now commands other than *PUSH* adding elements to a list will be able - to awake clients blocked in a blocking POP operation. -* [FIX] Cygwin compilation fixes. - ---[ Redis 2.8.10 ] Release date: 5 Jun 2014 - -# UPGRADE URGENCY: HIGH if you use min-slaves-to-write option. - -* [FIX] IMPORTANT! A min-slaves-to-write option active in a slave totally - prevented the slave from acception the master stream of commands. - This release includes testes for min-slaves-to-write, and a fix - for this issue. -* [FIX] Sometimes DEL returned 1 for already expired keys. Fixed. -* [FIX] Fix test false positive because new osx 'leaks' output. -* [FIX] PFCOUNT HLL cache invalidation fixed: no wrong value was reported - but the cache was not used at all, leading to lower performances. -* [FIX] Accept(2) multiple clients per readable-event invocation, and better - processing of I/O while loading or busy running a timedout script. - Basically now the LOADING / BUSY errors are reported at a decent - speed. -* [FIX] A softwaer watchdog crash fixed. -* [FIX] Fixed a Lua -> Redis numerical precision loss. - -* [NEW] Lua scripting engine speed improved. -* [NEW] Sentinel generates one new event for humans to understand better - what is happening during a failover: +config-update-from. - Also the time at which a failover will be re-attempted is logged. - ---[ Redis 2.8.9 ] Release date: 22 Apr 2014 - -# UPGRADE URGENCY: LOW, only new features introduced, no bugs fixed. - -* [NEW] The HyperLogLog data structure. You can read more about it - in this blog post. http://antirez.com/news/75 -* [NEW] The Sorted Set data type has now support for lexicographic range - queries, check the new commands ZRANGEBYLEX, ZLEXCOUNT and - ZREMRANGEBYLEX, which are documented at http://redis.io. - ---[ Redis 2.8.8 ] Release date: 25 Mar 2014 - -# UPGRADE URGENCY: HIGH for Redis, LOW for Sentinel. There is a potentially - critical bug fix causing data loss in Redis but it requires - a combination of disk full and the use of the - SHUTDOWN command. - -* [FIX] Fixed data loss when SHUTDOWN was used with a disk full condition. -* [FIX] Fixed a memory leak in the SORT syntax error processing. -* [FIX] When Sentinel down-after-milliseconds parameter is modified at runtime - now it gets propagated to all the slaves and sentinel instances - of the master. -* [FIX] `install_server.sh` script finally fixed. -* [FIX] Different fixes to maxclients handling. -* [NEW] Sentinels are now able to send update messages in a peer-to-peer - fashion even if no Redis instances are available. Now the Sentinel - liveness property that the most updated configuration in a given - partition is propagated to all the Sentinels is extended to partitions - without reachable instances. -* [NEW] Sentinel safety properties are now ensured in a crash-recovery system - model since some state is persisted on disk before replying to other - nodes, and reloaded at startup. -* [NEW] Sentinel now uses CLIENT SETNAME so that it is easy to identify - Sentinels using CLIENT LIST among other clients. -* [NEW] Sentinel failure detection and reconnection code improved. -* [NEW] Use all 24 bits (instead of 22) for the Redis objects LRU field. - Note that the new LRU algorithm using eviction pools was not backported - from unstable for safery / code maturity concerns. -* [NEW] Majory speedup for the INFO command (it is now 6 times faster). -* [NEW] More Sentinel unit tests. -* [NEW] New command DEBUG ERROR returns the specified error. Example: - DEBUG ERROR "LOADING database". This is handy to write Redis client - libraries unit tests. -* [NEW] redis-cli now supports multi-line editing via updated linenoise lib. +>> Cluster changes -Thanks to Matt Stancliff and Jan-Erik Rediger for the work done in the context -of this release. +* [FIX] CLUSTER RESET: Flush slave dataset on reset. (Salvatore Sanfilippo) +* [FIX] Replica migration: don't migrate to masters that never had slaves + in the past, but only to masters that remained orphaned after + failure events. (Salvatore Sanfilippo) ---[ Redis 2.8.7 ] Release date: 5 Mar 2014 - -# UPGRADE URGENCY: LOW for Redis, LOW for Sentinel. However this release adds - new features so users may want to upgrade in order to - exploit the new functionalities. - -* [FIX] Sometimes the absolute config file path was obtained in a wrong way. - This happened when there was a "dir" directive inside the config file - and at the same time the configuration file was given as a relative - path to redis-server or redis-sentinel executables. -* [FIX] redis-cli: Automatically enter --slave mode when SYNC or PSYNC are - called during an interactive session. -* [FIX] Sentinel "IDONTKNOW" error removed as it does not made sense with the - new Sentinel design. This error was actually a fix for a design error - in the first implementation of Sentinel. -* [FIX] Sentinel: added a missing exit() call to abort after config file - checks at startup. This error was introduced with an improvement in - a previous 2.8 release. -* [FIX] BITCOUNT: fixed unaligned access causing issues in sparc and other - archs not capable of dealing with unaligned accesses. This also makes - the code faster in archs where unaligned accesses are allowed. -* [FIX] Sentinel: better nodes fail over start time desynchronization to avoid - split-brain during the voting process needed to get authorization to - fail over. This means the system is less likely to need to retry - and will fail over faster. No changes in behavior / correctness. -* [FIX] Force INFO used_memory_peak to match peak memory. This generated some - confusion among users even if it was not an actual bug. +* [NEW] redis-trib: allow to reshard in non-interactive way. (Salvatore + Sanfilippo) +* [NEW] Cluster test: unit 04, check consistency during resharding. (Salvatore + Sanfilippo) +* [NEW] Cluster test: unit 05, slave selection. (Salvatore Sanfilippo) +* [NEW] Cluster test: unit 06, slaves with stale data can't failover. (Salvatore + Sanfilippo) +* [NEW] Cluster test: unit 07, replicas migration. (Salvatore Sanfilippo) -* [NEW] Sentinel unit tests and framework. More tests needed and units must - be improved in order to have less false positives, but it is a start - and features a debugging console that is useful to fix tests or to - inspect bugs causing tests failures. -* [NEW] New Sentinel events: +/-monitor and +set used to monitor when an - instance to monitor is added or removed, or when a configuration - is modified via SENTINEL SET. -* [NEW] Redis-cli updated to use SCAN instead of random sampling via - RANDOMKEY in order to implement --bigkeys feature. Moreover the - implementation now supports pipelining and reports more information - at the end of the scan. Much faster, much better. A special thank - you to Michael Grunder for this improvement. -* [NEW] redis-cli now supports a new --intrinsic-latency mode that is able - to meter the latency of a system due to kernel / hypervisor. - How to use it is explained at http://redis.io/topics/latency. -* [NEW] New command BITPOS: find first bit set or clear in a bitmap. -* [NEW] CONFIG REWRITE calls are now logged. +>> Sentinel changes ---[ Redis 2.8.6 ] Release date: 13 Feb 2014 +* No Sentinel changes in this release. -# UPGRADE URGENCY: HIGH for Redis, LOW for Sentinel. Redis users using Lua - scripts with EVALSHA and attached slaves and/or AOF - persistence should consider upgrading ASAP. +--[ Redis 3.0.0 Beta 7 (version 2.9.56) ] Release date: 30 jun 2014 -* [FIX] Fixed an critical EVALSHA script cache bug: scripts executed may not - propagate to AOF / Slaves correctly under certain conditions. - See issue #1549 at Github for more information. -* [FIX] Fixed multiple bugs resulting into closing the link with master or slave - during replication without good reasons. This will result in useless - resynchronizations, or infinite loops where the replication link can't - be established. -* [FIX] Don't count the time needed to populate the buffers of clients waiting - in MONITOR mode when populating the Slow Log entries. +This is the 7th beta of Redis 3.0.0. -* [NEW] AOF write errors (like no space on device) no longer abort Redis if the - fsync policy is none or every second. The database enters a read-only - mode where every write is refused with an error. Normal operations are - restored as soon as Redis is able to append again data to the AOF file. -* [NEW] Sentinel now accepts SHUTDOWN command. +>> General changes ---[ Redis 2.8.5 ] Release date: 4 Feb 2014 +* [FIX] Scripting fixes backported from unstable, see Redis 2.8.12 changelog + for more info. (Salvatore Sanfilippo) +* [FIX] Cancel SHUTDOWN if initial AOF is being written. (Matt Stancliff) -# UPGRADE URGENCY: HIGH for Redis, LOW for Sentinel. Redis users using Lua - scripts with expires, and Redis users relying on the - ability of Redis to block writes on RDB saving errors - should plan to upgrade ASAP. +* [NEW] New command: COMMAND, for commands introspection (Matt Stancliff & + Salvatore Sanfilippo) +* [NEW] hiredis: Update to latest version. (Matt Stancliff) +* [NEW] Jemalloc updated to 3.6.0. (Salvatore Sanfilippo) -* [FIX] Fixed a replication bug caused by Lua scripts + expired keys: keys could - expire in the middle of scripts causing non-deterministic behavior. -* [FIX] MISCONFIG error if condition fixed, the server was no longer able - to stop writes on RDB misconfiguration after this error was introduced. -* [FIX] REDIS_AOF_REWRITE_MIN_SIZE is now 64mb like example redis.conf default. -* [FIX] Perform fflush() before fsync() in rio.c (bug without actual effects). -* [FIX] Don't log MONITOR clients as disconnecting slaves. -* [FIX] SENTINEL MASTER arity check fixed. Crashed the Sentinel instance when - the command was given without arguments. +>> Cluster changes -* [NEW] Allow CONFIG and SHUTDOWN while in stale-slave state. -* [NEW] Support for configurable TCP listen(2) backlog size. -* [NEW] redis-cli supports SCAN via the --scan and --pattern options. -* [NEW] SENTINEL SET master quorum via runtime API implemented. +* [FIX] Cluster: clear NOADDR flag when updating node address. + (Salvatore Sanfilippo) ---[ Redis 2.8.4 ] Release date: 13 Jan 2014 +* [NEW] New CLUSTER SLOTS command to simplify Cluster clients operations. + (Matt Stancliff) +* [NEW] More Cluster tests. (Salvatore Sanfilippo) +* [NEW] Log when failover authorization are granted / denied. + (Salvatore Sanfilippo) -# UPGRADE URGENCY: MODERATE for Redis and Sentinel. +>> Sentinel changes -* [FIX] Makefile compatibility with non common make variants improved. -* [FIX] SDIFF crash in very unlikely to trigger state fixed. -* [FIX] Config rewriting fixed: don't wipe options unknown to the rewrite - process. -* [FIX] Set TCP port to 0 works again to disable TCP networking. -* [FIX] Fixed replication with old Redis instances as masters by not - sending REPLCONF ACK to them. -* [FIX] Fix keyspace notifications rewrite and CONFIG GET output. -* [FIX] Fix RESTORE TTL handling in 32 bit systems (32 bit overflow). +* [FIX] A few Sentinel bugs fixed and improvements, see Redis 2.8.12 + changelog for more info. (Salvatore Sanfilippo & Matt Stancliff) +* [NEW] New Sentinel-Client handshake protocol, ROLE command, CLIENT KILL, + all backported to 3.0 branch. (Salvatore Sanfilippo) -* [NEW] Sentinel now has a run time configuration API. -* [NEW] Log when we lost connection with master or slave. -* [NEW] When instance is turned from slave to master now inherits the - old master replication offset when possible. This improves the - Sentinel failover procedure. +--[ Redis 3.0.0 Beta 6 (version 2.9.55) ] Release date: 9 jun 2014 ---[ Redis 2.8.3 ] Release date: 11 Dec 2013 +This is the 6th beta of Redis 3.0.0. -# UPGRADE URGENCY: MODERATE for Redis, HIGH for Sentinel. +>> General changes -* [FIX] Sentinel instance role sampling fixed, the system is now more - reliable during failover and when reconfiguring instances with - non matching configuration. -* [FIX] Inline requests are now handled even when terminated with just LF. -* [FIX] Replication timeout handling greatly improved, now the slave is able - to ping the master while removing the old data from memory, and while - loading the new RDB file. This avoid false timeouts sensed by - masters. -* [FIX] Fixed a replication bug involving 32 bit instances and big datasets - hard to compress that resulted into more than 2GB of RDB file sent. -* [FIX] Return error for inline requests with unbalanced quotes. -* [FIX] Publish the slave replication offset even when disconnected from the - master if there is still a cached master instance. +* [FIX] Fix software watchdog signal handler crash due to re-entering. +* [FIX] Better Lua number -> string conversion for Lua scripts. +* [FIX] Serious replication bug when min-slaves-* feature is used in slaves + configuration fixed. +* [FIX] Blocking pop on lists now works when the list is created by commands + other than *PUSH* (for example SORT STORE). ---[ Redis 2.8.2 ] Release date: 2 Dec 2013 +>> Cluster changes -# UPGRADE URGENCY: MODERATE for both Redis and Sentinel. +* [FIX] CRITICAL: For a bug in the implementation of CLUSTER SET-CONFIG-EPOCH + introduced with beta-3 (especially beta-4 where the command + is actually used by redis-trib), a configEpoch for a node could + jump backward, breaking the eventual consistency property of the + slots -> nodes mapping in the cluster. -* [FIX] Sentinel better desynchronization to avoid split-brain elections - where no Sentinel managed to get elected. -* [FIX] Stop accepting writes on "MISCONF" error only if master, not slave. -* [FIX] Reply to PING with an error on "MISCONF" errors. +>> Sentinel changes ---[ Redis 2.8.1 ] Release date: 25 Nov 2013 +* No changes for Sentinel in this release. -# UPGRADE URGENCY: LOW for Redis, CRITICAL for Senitnel. You don't need to - upgrade your Redis instances but it is highly recommended - to upgrade and restart all the Sentinel processes. +--[ Redis 3.0.0 Beta 5 (version 2.9.54) ] Release date: 26 may 2014 -* [FIX] Fixed a bug in "new Sentinel" config propagation. -* [FIX] Fixed a false positive in Redis tests. +This is the 5th beta of Redis 3.0.0. It does not include any real +worthwhile change (just three days passed since the previous beta), but +fixes two stupid bugs preventing cluster tests to pass. ---[ Redis 2.8.0 ] Release date: 22 Nov 2013 +--[ Redis 3.0.0 Beta 4 (version 2.9.53) ] Release date: 23 may 2014 -# UPGRADE URGENCY: LOW, unless you want to upgrade to new Sentinel code. +This is the fourth beta of Redis 3.0.0. -* [FIX] Fixed an error in rdbWriteRaw() that should have no practical impact. -* [NEW] Log the new master when SLAVEOF command is used. -* [NEW] Sentinel code synchronized with the unstable branch, the new Sentinel - is a reimplementation that uses more reliable algorithms. +>> General changes ---[ Redis 2.8 Release Candidate 6 (2.7.106) ] Release date: 6 Nov 2013 +* [NEW] Scripting engine performances improvements. +* [NEW] Log format slightly changed to report current node role. -This is the 6th release candidate of Redis 2.8 (official version is 2.7.106). +* [FIX] Correct the HyperLogLog stale cache flag to prevent unnecessary + computation. -# UPGRADE URGENCY: LOW, only new features back ported, no fixes. +>> Cluster changes -* [NEW] SCAN, SSCAN, HSCAN, ZSCAN commands. +* [NEW] redis-trib: ability to import data from standalone Redis instances. +* [NEW] redis-trib: "fix" subcommand much better at fixing errors. +* [NEW] CLUSTER FAILOVER FORCE implemented. +* [NEW] CLUSTER RESET implemented, it is now possible to completely reset + nodes and create a new cluster without restarting anything. +* [NEW] Slave validity factor (max estimated data age to still failover) + is now under the control of the user, and can be configured via + redis.conf or CONFIG SET. Option name cluster-slave-validity-factor. +* [NEW] Cluster test: failure detection and failover initial tests. +* [NEW] CLUSTER MEET: better error messages when address is invalid. +* [NEW] Bulk-accept new Cluster nodes in the Cluster bus instead of + performing just a single accept() per event fired. ---[ Redis 2.8 Release Candidate 5 (2.7.105) ] Release date: 9 Oct 2013 +* [FIX] Bypass data_age check for manual failovers. +* [FIX] Fixed data_age computation / check integer overflow. +* [FIX] Various fixes to Tcl client.tcl Redis Cluster client used in tests. +* [FIX] Better handling of stolen slots. +* [FIX] Don't accept cluster bus connections during startup. -This is the 5th release candidate of Redis 2.8 (official version is 2.7.105). -Important bugs fixed inside. +>> Sentinel changes -# UPGRADE URGENCY: HIGH because of many non critical replication bugs fixed. +* [NEW] Generate +config-update-from event when a new config is received. +* [NEW] Log when a failover will be re-attempted again. -* [FIX] redis-cli: don't crash with --bigkeys when the key no longer exist. -* [FIX] Allow AUTH / PING when disconnected from slave and serve-stale-data is no. -* [FIX] PSYNC: safer handling of PSYNC requests with offsets in the future. -* [FIX] Replication: Fix master timeout detection. -* [FIX] Replication: Correctly install the write handler after successful PSYNC. - ---[ Redis 2.8 Release Candidate 4 (2.7.104) ] Release date: 30 Aug 2013 +* [FIX] Sentinel: Add "dir /tmp" directive in example sentinel.conf. -This is the fourth release candidate of Redis 2.8 (official version is 2.7.104). -Important bugs fixed inside. +--[ Redis 3.0.0 Beta 3 (version 2.9.52) ] Release date: 5 may 2014 -# UPGRADE URGENCY: HIGH because of the EVAL memory leak. +This is the third beta of Redis 3.0.0. -* [FIX] Fixed a serious EVAL memory leak in the Lua stack. -* [FIX] Fixed server startup when no IPv6 address exists in any interface. -* [FIX] Send MISCONFIG error when BGSAVE fails because can't fork. -* [FIX] Memory efficiency with large (> a few kbytes) values improved considerably. -* [NEW] DEBUG SDSLEN for sds memory debugging. +>> General changes ---[ Redis 2.8 Release Candidate 3 (2.7.103) ] Release date: 19 Aug 2013 +* [NEW] New data structure: the HyperLogLog (see 2.8 release notes). +* [NEW] Lexicograhical range queries in sorted sets (see 2.8 release notes). +* [NEW] LRU algorithm precision greatly improved. -This is the third release candidate of Redis 2.8 (official version is 2.7.103). -Important bugs fixed inside. +* [FIX] Redis is now much more responsive to reply with LOADING / BUSY errors. -# UPGRADE URGENCY: HIGH +>> Cluster changes -* [FIX] Improved expired keys collection algorithm. Even under heavy load keys - to be expired can't accumulate because of lack of CPU time. -* [FIX] Replication speed regression fixed (issue #1238). -* [FIX] Fixed an hard to trigger PSYNC bug. -* [FIX] Fixed Issue #1240, ZUNIONSTORE could lead to wrong result. -* [NEW] Add per-db average TTL information in INFO output. -* [NEW] redis-benchmark improvements. -* [NEW] dict.c API wrong usage detection. +* [NEW] Cluster testing framework and initial tests. +* [NEW] Cluster epochs collision resolution (make Redis Cluster more resilient + to admin and programming errors). +* [NEW] Persist / fsync some global state to ensure correct crash-recovery + semantics. +* [NEW] New command introduced: CLUSTER SET-CONFIG-EPOCH, still not used + by redis-trib. Will be used to speedup the assignment of unique + epochs to different nodes at cluster creation time. For now this is + handled as a side effect of the cluster epochs collision resolution. ---[ Redis 2.8 Release Candidate 2 (2.7.102) ] Release date: 30 Jul 2013 +* [FIX] Different fixes to redis-trib cluster creation. +* [FIX] Fix an error in the CLUSTER NODES output for nodes slots. -This is the second release candidate of Redis 2.8 (official version is 2.7.102). -Important bugs fixed inside. +>> Sentinel changes -# UPGRADE URGENCY: HIGH +* [NEW] Sentinels are now able to send update messages in a peer-to-peer + fashion even if no Redis instances are available. Now the Sentinel + liveness property that the most updated configuration in a given + partition is propagated to all the Sentinels is extended to partitions + without reachable instances. +* [NEW] Sentinel safety properties are now ensured in a crash-recovery system + model since some state is persisted on disk before replying to other + nodes, and reloaded at startup. +* [NEW] Sentinel now uses CLIENT SETNAME so that it is easy to identify + Sentinels using CLIENT LIST among other clients. +* [NEW] Sentinel failure detection and reconnection code improved. -* [FIX] Fixed a critical replication bug, see issue #1221. -* [NEW] The new inline protocol now accepts quoted strings like, for example - you can now type in a telnet session: set 'foo bar' "hello world\n". +--[ Redis 3.0.0 Beta 2 (version 2.9.51) ] Release date: 11 mar 2014 ---[ Redis 2.8 Release Candidate 1 (2.7.101) ] Release date: 18 Jul 2013 +This is the second beta of Redis 3.0.0. -This is the first release candidate of Redis 2.8 (official version is 2.7.101). +>> General changes -The following is a list of improvements in Redis 2.8, compared to Redis 2.6. +* [FIX] Sometimes the absolute config file path was obtained in a wrong way. + This happened when there was a "dir" directive inside the config file + and at the same time the configuration file was given as a relative + path to redis-server or redis-sentinel executables. +* [FIX] redis-cli: Automatically enter --slave mode when SYNC or PSYNC are + called during an interactive session. +* [FIX] BITCOUNT: fixed unaligned access causing issues in sparc and other + archs not capable of dealing with unaligned accesses. This also makes + the code faster in archs where unaligned accesses are allowed. +* [FIX] Force INFO used_memory_peak to match peak memory. This generated some + confusion among users even if it was not an actual bug. +* [FIX] Fixed an critical EVALSHA script cache bug: scripts executed may not + propagate to AOF / Slaves correctly under certain conditions. + See issue #1549 at Github for more information. +* [FIX] Fixed multiple bugs resulting into closing the link with master or slave + during replication without good reasons. This will result in useless + resynchronizations, or infinite loops where the replication link can't + be established. +* [FIX] Don't count the time needed to populate the buffers of clients waiting + in MONITOR mode when populating the Slow Log entries. -* [NEW] Slaves are now able to partially resynchronize with the master, so most - of the times a full resynchronization with the RDB creation in the master - side is not needed when the master-slave link is disconnected for a short - amount of time. -* [NEW] Experimental IPv6 support. -* [NEW] Slaves explicitly ping masters now, a master is able to detect a timed out - slave independently. -* [NEW] Masters can stop accepting writes if not enough slaves with a given - maximum latency are connected. -* [NEW] Keyspace changes notifications via Pub/Sub. -* [NEW] CONFIG SET maxclients is now available. -* [NEW] Ability to bind multiple IP addresses. -* [NEW] Set process names so that you can recognize, in the "ps" command output, - the listening port of an instance, or if it is a saving child. -* [NEW] Automatic memory check on crash. -* [NEW] CONFIG REWRITE is able to materialize the changes in the configuration - operated using CONFIG SET into the redis.conf file. -* [NEW] More NetBSD friendly code base. -* [NEW] PUBSUB command for Pub/Sub introspection capabilities. -* [NEW] EVALSHA can now be replicated as such, without requiring to be expanded - to a full EVAL for the replication link. -* [NEW] Better Lua scripts error reporting. -* [NEW] SDIFF performance improved. -* [FIX] A number of bugfixes. +* [NEW] Redis-cli updated to use SCAN instead of random sampling via + RANDOMKEY in order to implement --bigkeys feature. Moreover the + implementation now supports pipelining and reports more information + at the end of the scan. Much faster, much better. A special thank + you to Michael Grunder for this improvement. +* [NEW] redis-cli now supports a new --intrinsic-latency mode that is able + to meter the latency of a system due to kernel / hypervisor. + How to use it is explained at http://redis.io/topics/latency. +* [NEW] New command BITPOS: find first bit set or clear in a bitmap. +* [NEW] CONFIG REWRITE calls are now logged. +* [NEW] AOF write errors (like no space on device) no longer abort Redis if the + fsync policy is none or every second. The database enters a read-only + mode where every write is refused with an error. Normal operations are + restored as soon as Redis is able to append again data to the AOF file. +* [NEW] Sentinel now accepts SHUTDOWN command. -Migrating from 2.6 to 2.8 -========================= -Redis 2.6 is mostly a strict subset of 2.8. However there are a few things -that you should be aware of: +>> Cluster changes + +* [FIX] Bind the first interface listed in the "bind" configuration directive + if any, in order to perform outgoing connections. This fixes Cluster + usage when an address is bound but there are multiple interfaces that + may be used to connect with other nodes. +* [FIX] When an "Importing" slot is closed via CLUSTER SETSLOT NODE ... + increment the configEpoch in the special case it is zero. +* [FIX] Current transaction is invalidated on redirection errors. +* [FIX] Abort if port does not allow for a valid cluster bus port that is + always at fixed +10000 offset. +* [FIX] Keys extraction algorithm fixed for ZUNIONSTORE/ZINTERSTORE and SORT. +* [FIX] Better failover timeout and retry times: failover should now work + reliabily when node-timeout is very small (a few milliseconds). +* [FIX] Don't allow SORT GET/BY options in Cluster mode. +* [FIX] Clear importing/migrating state when turning from master to slave role. +* [FIX] Set slot error if we receive an update for a busy slot. +* [FIX] Update node configEpoch on UPDATE messages. + +* [NEW] Support multi-key operations as long as keys resolve to the same + hash slot, and the slot is not migrating, or it is migrating but all + the mentioned keys are available. +* [NEW] New DEBUG command CMDKEYS available to debug / test keys identification + in Redis commands. +* [NEW] redis-trib: create subcommand is now able to assign spare slaves. +* [NEW] redis-trib: new subcommand 'call'. Exec command in all nodes. + + +>> Sentinel changes -The following commands changed behavior: +* [FIX] Sentinel "IDONTKNOW" error removed as it does not made sense with the + new Sentinel design. This error was actually a fix for a design error + in the first implementation of Sentinel. +* [FIX] Sentinel: added a missing exit() call to abort after config file + checks at startup. This error was introduced with an improvement in + a previous 2.8 release. +* [FIX] Sentinel: better nodes fail over start time desynchronization to avoid + split-brain during the voting process needed to get authorization to + fail over. This means the system is less likely to need to retry + and will fail over faster. No changes in behavior / correctness. - * The TTL and PTTL commands now return -2 if the key does not exist and - -1 if it exists but has no associated expire. Redis 2.6 and previous - versions used to return -1 for both the conditions. - * SORT with ALPHA now sorts according to local collation locale if no STORE - option is used. - * ZADD/ZINCRBY are now able to accept a bigger range of values as valid - scores, that is, all the values you may end having as a result of - calling ZINCRBY multiple times. - * Many errors are now prefixed by a more specific error code instead of - the generic -ERR, for example -WRONGTYPE, -NOAUTH, ... - * PUBLISH called inside Lua scripts is now correctly propagated to slaves. +* [NEW] Sentinel unit tests and framework. More tests needed and units must + be improved in order to have less false positives, but it is a start + and features a debugging console that is useful to fix tests or to + inspect bugs causing tests failures. +* [NEW] New Sentinel events: +/-monitor and +set used to monitor when an + instance to monitor is added or removed, or when a configuration + is modified via SENTINEL SET. -The following redis.conf and CONFIG GET / SET parameters changed: +--[ Redis 3.0.0 Beta 1 (version 2.9.50) ] Release date: 11 Feb 2014 - * logfile now uses the empty string in order to log to standard output, - so 'logfile stdout' is now invalid, use 'logfile ""' instead. +This is the first beta of Redis 3.0.0. -The following INFO fields changed format in a non-backward compatible way: +Migrating from 2.8 to 3.0 +========================= - * The list of slaves in INFO is now in field=value format. +Redis 2.8 is mostly a strict subset of 3.0, you should not have any problem +upgrading your application from 2.8 to 3.0. However this is a list of small +non-backward compatible changes introduced in the 3.0 release: -Replication: +* The log format was modified. The prefix of each line included the pid + in the following format [1234]. Now instead it is 1234:? Where + '?' is actually the role of the instance. M for master, S for slave, C + if this process is a saving child (for RDB/AOF), and X for Sentinel. - Redis 2.8 can be used as slave for Redis 2.6, but doing this is only - a good idea for the short amount of time needed to upgrade your servers. - We suggest to update both master and slaves at about the same time. +* The default maxmemory policy in Redis 3.0 is no longer "volatile-lru" as + it used to be in 2.8, but "noeviction". The policies behavior is the same + (but LRU eviction is much more precise in 3.0), so only the default value + changed. Just make sure to specify in your redis.conf what you mean. -------------------------------------------------------------------------------- Credits: Where not specified the implementation and design is done by -Salvatore Sanfilippo. Thanks to VMware and Pivotal for making all -this possible. Also many thanks to all the other contributors and the amazing -community we have. +Salvatore Sanfilippo. Thanks to Pivotal for making all this possible. +Also many thanks to all the other contributors and the amazing community +we have. See commit messages for more credits. diff --git a/redis.submodule/BUGS b/redis.submodule/BUGS index 96d52bf..a8e9368 100644 --- a/redis.submodule/BUGS +++ b/redis.submodule/BUGS @@ -1 +1 @@ -Plese check https://github.com/antirez/redis/issues +Please check https://github.com/antirez/redis/issues diff --git a/redis.submodule/COPYING b/redis.submodule/COPYING index a58de44..ac68e01 100644 --- a/redis.submodule/COPYING +++ b/redis.submodule/COPYING @@ -1,4 +1,4 @@ -Copyright (c) 2006-2014, Salvatore Sanfilippo +Copyright (c) 2006-2015, Salvatore Sanfilippo All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/redis.submodule/README b/redis.submodule/README index b7a12b8..3691186 100644 --- a/redis.submodule/README +++ b/redis.submodule/README @@ -26,6 +26,25 @@ After building Redis is a good idea to test it, using: % make test +Fixing build problems with dependencies or cached build options +—-------- +Redis has some dependencies which are included into the "deps" directory. +"make" does not rebuild dependencies automatically, even if something in the +source code of dependencies is changes. + +When you update the source code with `git pull` or when code inside the +dependencies tree is modified in any other way, make sure to use the following +command in order to really clean everything and rebuild from scratch: + + make distclean + +This will clean: jemalloc, lua, hiredis, linenoise. + +Also if you force certain build options like 32bit target, no C compiler +optimizations (for debugging purposes), and other similar build time options, +those options are cached indefinitely until you issue a "make distclean" +command. + Fixing problems building 32 bit binaries --------- diff --git a/redis.submodule/deps/Makefile b/redis.submodule/deps/Makefile index e183ede..1f623ea 100644 --- a/redis.submodule/deps/Makefile +++ b/redis.submodule/deps/Makefile @@ -58,7 +58,7 @@ ifeq ($(uname_S),SunOS) LUA_CFLAGS= -D__C99FEATURES__=1 endif -LUA_CFLAGS+= -O2 -Wall -DLUA_ANSI $(CFLAGS) +LUA_CFLAGS+= -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL $(CFLAGS) LUA_LDFLAGS+= $(LDFLAGS) # lua's Makefile defines AR="ar rcu", which is unusual, and makes it more # challenging to cross-compile lua (and redis). These defines make it easier diff --git a/redis.submodule/deps/hiredis/async.c b/redis.submodule/deps/hiredis/async.c index f7f343b..9cc3563 100644 --- a/redis.submodule/deps/hiredis/async.c +++ b/redis.submodule/deps/hiredis/async.c @@ -443,6 +443,7 @@ void redisProcessCallbacks(redisAsyncContext *ac) { if (((redisReply*)reply)->type == REDIS_REPLY_ERROR) { c->err = REDIS_ERR_OTHER; snprintf(c->errstr,sizeof(c->errstr),"%s",((redisReply*)reply)->str); + c->reader->fn->freeObject(reply); __redisAsyncDisconnect(ac); return; } diff --git a/redis.submodule/deps/hiredis/test.c b/redis.submodule/deps/hiredis/test.c index 713cc06..2cc35a4 100644 --- a/redis.submodule/deps/hiredis/test.c +++ b/redis.submodule/deps/hiredis/test.c @@ -51,7 +51,7 @@ static redisContext *select_database(redisContext *c) { assert(reply != NULL); freeReplyObject(reply); - /* Make sure the DB is emtpy */ + /* Make sure the DB is empty */ reply = redisCommand(c,"DBSIZE"); assert(reply != NULL); if (reply->type == REDIS_REPLY_INTEGER && reply->integer == 0) { diff --git a/redis.submodule/deps/linenoise/.gitignore b/redis.submodule/deps/linenoise/.gitignore index 28f258a..7ab7825 100644 --- a/redis.submodule/deps/linenoise/.gitignore +++ b/redis.submodule/deps/linenoise/.gitignore @@ -1 +1,3 @@ -linenoise_example* +linenoise_example +*.dSYM +history.txt diff --git a/redis.submodule/deps/linenoise/README.markdown b/redis.submodule/deps/linenoise/README.markdown index 9612da4..c845673 100644 --- a/redis.submodule/deps/linenoise/README.markdown +++ b/redis.submodule/deps/linenoise/README.markdown @@ -1,8 +1,13 @@ # Linenoise -A minimal, zero-config, BSD licensed, readline replacement. +A minimal, zero-config, BSD licensed, readline replacement used in Redis, +MongoDB, and Android. -News: linenoise is now part of [Android](http://android.git.kernel.org/?p=platform/system/core.git;a=tree;f=liblinenoise;h=56450eaed7f783760e5e6a5993ef75cde2e29dea;hb=HEAD Android)! +* Single and multi line editing mode with the usual key bindings implemented. +* History handling. +* Completion. +* About 1,100 lines of BSD license source code. +* Only uses a subset of VT100 escapes (ANSI.SYS compatible). ## Can a line editing library be 20k lines of code? @@ -10,7 +15,7 @@ Line editing with some support for history is a really important feature for com So what usually happens is either: - * Large programs with configure scripts disabling line editing if readline is not present in the system, or not supporting it at all since readline is GPL licensed and libedit (the BSD clone) is not as known and available as readline is (Readl world example of this problem: Tclsh). + * Large programs with configure scripts disabling line editing if readline is not present in the system, or not supporting it at all since readline is GPL licensed and libedit (the BSD clone) is not as known and available as readline is (Real world example of this problem: Tclsh). * Smaller programs not using a configure script not supporting line editing at all (A problem we had with Redis-cli for instance). The result is a pollution of binaries without line editing support. @@ -19,27 +24,29 @@ So I spent more or less two hours doing a reality check resulting in this little ## Terminals, in 2010. -Apparently almost every terminal you can happen to use today has some kind of support for VT100 alike escape sequences. So I tried to write a lib using just very basic VT100 features. The resulting library appears to work everywhere I tried to use it. +Apparently almost every terminal you can happen to use today has some kind of support for basic VT100 escape sequences. So I tried to write a lib using just very basic VT100 features. The resulting library appears to work everywhere I tried to use it, and now can work even on ANSI.SYS compatible terminals, since no +VT220 specific sequences are used anymore. -Since it's so young I guess there are a few bugs, or the lib may not compile or work with some operating system, but it's a matter of a few weeks and eventually we'll get it right, and there will be no excuses for not shipping command line tools without built-in line editing support. - -The library is currently less than 400 lines of code. In order to use it in your project just look at the *example.c* file in the source distribution, it is trivial. Linenoise is BSD code, so you can use both in free software and commercial software. +The library is currently about 1100 lines of code. In order to use it in your project just look at the *example.c* file in the source distribution, it is trivial. Linenoise is BSD code, so you can use both in free software and commercial software. ## Tested with... * Linux text only console ($TERM = linux) * Linux KDE terminal application ($TERM = xterm) * Linux xterm ($TERM = xterm) + * Linux Buildroot ($TERM = vt100) * Mac OS X iTerm ($TERM = xterm) * Mac OS X default Terminal.app ($TERM = xterm) * OpenBSD 4.5 through an OSX Terminal.app ($TERM = screen) * IBM AIX 6.1 * FreeBSD xterm ($TERM = xterm) + * ANSI.SYS Please test it everywhere you can and report back! ## Let's push this forward! -Please fork it and add something interesting and send me a pull request. What's especially interesting are fixes, new key bindings, completion. +Patches should be provided in the respect of linenoise sensibility for small +easy to understand code. Send feedbacks to antirez at gmail diff --git a/redis.submodule/deps/linenoise/example.c b/redis.submodule/deps/linenoise/example.c index ea0b515..a2f0936 100644 --- a/redis.submodule/deps/linenoise/example.c +++ b/redis.submodule/deps/linenoise/example.c @@ -1,5 +1,6 @@ #include #include +#include #include "linenoise.h" @@ -10,16 +11,52 @@ void completion(const char *buf, linenoiseCompletions *lc) { } } -int main(void) { +int main(int argc, char **argv) { char *line; + char *prgname = argv[0]; + /* Parse options, with --multiline we enable multi line editing. */ + while(argc > 1) { + argc--; + argv++; + if (!strcmp(*argv,"--multiline")) { + linenoiseSetMultiLine(1); + printf("Multi-line mode enabled.\n"); + } else if (!strcmp(*argv,"--keycodes")) { + linenoisePrintKeyCodes(); + exit(0); + } else { + fprintf(stderr, "Usage: %s [--multiline] [--keycodes]\n", prgname); + exit(1); + } + } + + /* Set the completion callback. This will be called every time the + * user uses the key. */ linenoiseSetCompletionCallback(completion); + + /* Load history from file. The history file is just a plain text file + * where entries are separated by newlines. */ linenoiseHistoryLoad("history.txt"); /* Load the history at startup */ + + /* Now this is the main loop of the typical linenoise-based application. + * The call to linenoise() will block as long as the user types something + * and presses enter. + * + * The typed string is returned as a malloc() allocated string by + * linenoise, so the user needs to free() it. */ while((line = linenoise("hello> ")) != NULL) { - if (line[0] != '\0') { + /* Do something with the string. */ + if (line[0] != '\0' && line[0] != '/') { printf("echo: '%s'\n", line); - linenoiseHistoryAdd(line); - linenoiseHistorySave("history.txt"); /* Save every new entry */ + linenoiseHistoryAdd(line); /* Add to the history. */ + linenoiseHistorySave("history.txt"); /* Save the history on disk. */ + } else if (!strncmp(line,"/historylen",11)) { + /* The "/historylen" command will change the history len. */ + int len = atoi(line+11); + linenoiseHistorySetMaxLen(len); + } else if (line[0] == '/') { + printf("Unreconized command: %s\n", line); } free(line); } diff --git a/redis.submodule/deps/linenoise/linenoise.c b/redis.submodule/deps/linenoise/linenoise.c index aef5cdd..36c0c5f 100644 --- a/redis.submodule/deps/linenoise/linenoise.c +++ b/redis.submodule/deps/linenoise/linenoise.c @@ -2,7 +2,7 @@ * line editing lib needs to be 20,000 lines of C code. * * You can find the latest source code at: - * + * * http://github.com/antirez/linenoise * * Does a number of crazy assumptions that happen to be true in 99.9999% of @@ -14,18 +14,18 @@ * Copyright (c) 2010-2013, Pieter Noordhuis * * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -37,7 +37,7 @@ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * + * * ------------------------------------------------------------------------ * * References: @@ -56,10 +56,6 @@ * flickering effect with some slow terminal, but the lesser sequences * the more compatible. * - * CHA (Cursor Horizontal Absolute) - * Sequence: ESC [ n G - * Effect: moves cursor to column n - * * EL (Erase Line) * Sequence: ESC [ n K * Effect: if n is 0 or missing, clear from cursor to end of line @@ -68,7 +64,19 @@ * * CUF (CUrsor Forward) * Sequence: ESC [ n C - * Effect: moves cursor forward of n chars + * Effect: moves cursor forward n chars + * + * CUB (CUrsor Backward) + * Sequence: ESC [ n D + * Effect: moves cursor backward n chars + * + * The following is used to get the terminal width if getting + * the width with the TIOCGWINSZ ioctl fails + * + * DSR (Device Status Report) + * Sequence: ESC [ 6 n + * Effect: reports the current cusor position as ESC [ n ; m R + * where n is the row and m is the column * * When multi line mode is enabled, we also use an additional escape * sequence. However multi line editing is disabled by default. @@ -81,17 +89,18 @@ * Sequence: ESC [ n B * Effect: moves cursor down of n chars. * - * The following are used to clear the screen: ESC [ H ESC [ 2 J - * This is actually composed of two sequences: + * When linenoiseClearScreen() is called, two additional escape sequences + * are used in order to clear the screen and position the cursor at home + * position. * - * cursorhome + * CUP (Cursor position) * Sequence: ESC [ H * Effect: moves the cursor to upper left corner * - * ED2 (Clear entire screen) + * ED (Erase display) * Sequence: ESC [ 2 J * Effect: clear the whole screen - * + * */ #include @@ -332,7 +341,7 @@ static void freeCompletions(linenoiseCompletions *lc) { /* This is an helper function for linenoiseEdit() and is called when the * user types the key in order to complete the string currently in the * input. - * + * * The state of the editing is encapsulated into the pointed linenoiseState * structure as described in the structure definition. */ static int completeLine(struct linenoiseState *ls) { @@ -459,7 +468,7 @@ static void refreshSingleLine(struct linenoiseState *l) { size_t len = l->len; size_t pos = l->pos; struct abuf ab; - + while((plen+pos) >= l->cols) { buf++; len--; @@ -471,7 +480,7 @@ static void refreshSingleLine(struct linenoiseState *l) { abInit(&ab); /* Cursor to left edge */ - snprintf(seq,64,"\x1b[0G"); + snprintf(seq,64,"\r"); abAppend(&ab,seq,strlen(seq)); /* Write the prompt and the current buffer content */ abAppend(&ab,l->prompt,strlen(l->prompt)); @@ -480,7 +489,7 @@ static void refreshSingleLine(struct linenoiseState *l) { snprintf(seq,64,"\x1b[0K"); abAppend(&ab,seq,strlen(seq)); /* Move cursor to original position. */ - snprintf(seq,64,"\x1b[0G\x1b[%dC", (int)(pos+plen)); + snprintf(seq,64,"\r\x1b[%dC", (int)(pos+plen)); abAppend(&ab,seq,strlen(seq)); if (write(fd,ab.b,ab.len) == -1) {} /* Can't recover from write error. */ abFree(&ab); @@ -496,6 +505,7 @@ static void refreshMultiLine(struct linenoiseState *l) { int rows = (plen+l->len+l->cols-1)/l->cols; /* rows used by current buf. */ int rpos = (plen+l->oldpos+l->cols)/l->cols; /* cursor relative row. */ int rpos2; /* rpos after refresh. */ + int col; /* colum position, zero-based. */ int old_rows = l->maxrows; int fd = l->ofd, j; struct abuf ab; @@ -515,15 +525,15 @@ static void refreshMultiLine(struct linenoiseState *l) { /* Now for every row clear it, go up. */ for (j = 0; j < old_rows-1; j++) { lndebug("clear+up"); - snprintf(seq,64,"\x1b[0G\x1b[0K\x1b[1A"); + snprintf(seq,64,"\r\x1b[0K\x1b[1A"); abAppend(&ab,seq,strlen(seq)); } /* Clean the top line. */ lndebug("clear"); - snprintf(seq,64,"\x1b[0G\x1b[0K"); + snprintf(seq,64,"\r\x1b[0K"); abAppend(&ab,seq,strlen(seq)); - + /* Write the prompt and the current buffer content */ abAppend(&ab,l->prompt,strlen(l->prompt)); abAppend(&ab,l->buf,l->len); @@ -536,7 +546,7 @@ static void refreshMultiLine(struct linenoiseState *l) { { lndebug(""); abAppend(&ab,"\n",1); - snprintf(seq,64,"\x1b[0G"); + snprintf(seq,64,"\r"); abAppend(&ab,seq,strlen(seq)); rows++; if (rows > (int)l->maxrows) l->maxrows = rows; @@ -554,8 +564,12 @@ static void refreshMultiLine(struct linenoiseState *l) { } /* Set column. */ - lndebug("set col %d", 1+((plen+(int)l->pos) % (int)l->cols)); - snprintf(seq,64,"\x1b[%dG", 1+((plen+(int)l->pos) % (int)l->cols)); + col = (plen+(int)l->pos) % (int)l->cols; + lndebug("set col %d", 1+col); + if (col) + snprintf(seq,64,"\r\x1b[%dC", col); + else + snprintf(seq,64,"\r"); abAppend(&ab,seq,strlen(seq)); lndebug("\n"); @@ -732,7 +746,7 @@ static int linenoiseEdit(int stdin_fd, int stdout_fd, char *buf, size_t buflen, /* The latest history entry is always our current buffer, that * initially is just an empty string. */ linenoiseHistoryAdd(""); - + if (write(l.ofd,prompt,l.plen) == -1) return -1; while(1) { char c; @@ -757,6 +771,7 @@ static int linenoiseEdit(int stdin_fd, int stdout_fd, char *buf, size_t buflen, case ENTER: /* enter */ history_len--; free(history[history_len]); + if (mlmode) linenoiseEditMoveEnd(&l); return (int)l.len; case CTRL_C: /* ctrl-c */ errno = EAGAIN; @@ -765,8 +780,8 @@ static int linenoiseEdit(int stdin_fd, int stdout_fd, char *buf, size_t buflen, case 8: /* ctrl-h */ linenoiseEditBackspace(&l); break; - case CTRL_D: /* ctrl-d, remove char at right of cursor, or of the - line is empty, act as end-of-file. */ + case CTRL_D: /* ctrl-d, remove char at right of cursor, or if the + line is empty, act as end-of-file. */ if (l.len > 0) { linenoiseEditDelete(&l); } else { @@ -904,7 +919,7 @@ void linenoisePrintKeyCodes(void) { printf("'%c' %02x (%d) (type quit to exit)\n", isprint(c) ? c : '?', (int)c, (int)c); - printf("\x1b[0G"); /* Go left edge manually, we are in raw mode. */ + printf("\r"); /* Go left edge manually, we are in raw mode. */ fflush(stdout); } disableRawMode(STDIN_FILENO); @@ -1058,7 +1073,7 @@ int linenoiseHistorySetMaxLen(int len) { int linenoiseHistorySave(const char *filename) { FILE *fp = fopen(filename,"w"); int j; - + if (fp == NULL) return -1; for (j = 0; j < history_len; j++) fprintf(fp,"%s\n",history[j]); @@ -1074,12 +1089,12 @@ int linenoiseHistorySave(const char *filename) { int linenoiseHistoryLoad(const char *filename) { FILE *fp = fopen(filename,"r"); char buf[LINENOISE_MAX_LINE]; - + if (fp == NULL) return -1; while (fgets(buf,LINENOISE_MAX_LINE,fp) != NULL) { char *p; - + p = strchr(buf,'\r'); if (!p) p = strchr(buf,'\n'); if (p) *p = '\0'; diff --git a/redis.submodule/deps/linenoise/linenoise.h b/redis.submodule/deps/linenoise/linenoise.h index e22ebd3..36394eb 100644 --- a/redis.submodule/deps/linenoise/linenoise.h +++ b/redis.submodule/deps/linenoise/linenoise.h @@ -9,18 +9,18 @@ * Copyright (c) 2010, Pieter Noordhuis * * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR diff --git a/redis.submodule/deps/lua/src/Makefile b/redis.submodule/deps/lua/src/Makefile index 34b0c36..f3bba2f 100644 --- a/redis.submodule/deps/lua/src/Makefile +++ b/redis.submodule/deps/lua/src/Makefile @@ -25,9 +25,10 @@ PLATS= aix ansi bsd freebsd generic linux macosx mingw posix solaris LUA_A= liblua.a CORE_O= lapi.o lcode.o ldebug.o ldo.o ldump.o lfunc.o lgc.o llex.o lmem.o \ lobject.o lopcodes.o lparser.o lstate.o lstring.o ltable.o ltm.o \ - lundump.o lvm.o lzio.o strbuf.o + lundump.o lvm.o lzio.o strbuf.o fpconv.o LIB_O= lauxlib.o lbaselib.o ldblib.o liolib.o lmathlib.o loslib.o ltablib.o \ - lstrlib.o loadlib.o linit.o lua_cjson.o lua_struct.o lua_cmsgpack.o + lstrlib.o loadlib.o linit.o lua_cjson.o lua_struct.o lua_cmsgpack.o \ + lua_bit.o LUA_T= lua LUA_O= lua.o diff --git a/redis.submodule/deps/lua/src/ldo.c b/redis.submodule/deps/lua/src/ldo.c index d1bf786..514f7a2 100644 --- a/redis.submodule/deps/lua/src/ldo.c +++ b/redis.submodule/deps/lua/src/ldo.c @@ -495,7 +495,7 @@ static void f_parser (lua_State *L, void *ud) { struct SParser *p = cast(struct SParser *, ud); int c = luaZ_lookahead(p->z); luaC_checkGC(L); - tf = ((c == LUA_SIGNATURE[0]) ? luaU_undump : luaY_parser)(L, p->z, + tf = (luaY_parser)(L, p->z, &p->buff, p->name); cl = luaF_newLclosure(L, tf->nups, hvalue(gt(L))); cl->l.p = tf; diff --git a/redis.submodule/deps/lua/src/lua_cjson.c b/redis.submodule/deps/lua/src/lua_cjson.c index 2e272b0..c26c0d7 100644 --- a/redis.submodule/deps/lua/src/lua_cjson.c +++ b/redis.submodule/deps/lua/src/lua_cjson.c @@ -1,8 +1,6 @@ -#define VERSION "1.0.3" - -/* CJSON - JSON support for Lua +/* Lua CJSON - JSON support for Lua * - * Copyright (c) 2010-2011 Mark Pulford + * Copyright (c) 2010-2012 Mark Pulford * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the @@ -41,22 +39,42 @@ #include #include #include +#include #include "lua.h" #include "lauxlib.h" #include "strbuf.h" +#include "fpconv.h" + +#include "../../../src/solarisfixes.h" + +#ifndef CJSON_MODNAME +#define CJSON_MODNAME "cjson" +#endif + +#ifndef CJSON_VERSION +#define CJSON_VERSION "2.1.0" +#endif -#ifdef MISSING_ISINF +/* Workaround for Solaris platforms missing isinf() */ +#if !defined(isinf) && (defined(USE_INTERNAL_ISINF) || defined(MISSING_ISINF)) #define isinf(x) (!isnan(x) && isnan((x) - (x))) #endif #define DEFAULT_SPARSE_CONVERT 0 #define DEFAULT_SPARSE_RATIO 2 #define DEFAULT_SPARSE_SAFE 10 -#define DEFAULT_MAX_DEPTH 20 -#define DEFAULT_ENCODE_REFUSE_BADNUM 1 -#define DEFAULT_DECODE_REFUSE_BADNUM 0 +#define DEFAULT_ENCODE_MAX_DEPTH 1000 +#define DEFAULT_DECODE_MAX_DEPTH 1000 +#define DEFAULT_ENCODE_INVALID_NUMBERS 0 +#define DEFAULT_DECODE_INVALID_NUMBERS 1 #define DEFAULT_ENCODE_KEEP_BUFFER 1 +#define DEFAULT_ENCODE_NUMBER_PRECISION 14 + +#ifdef DISABLE_INVALID_NUMBERS +#undef DEFAULT_DECODE_INVALID_NUMBERS +#define DEFAULT_DECODE_INVALID_NUMBERS 0 +#endif typedef enum { T_OBJ_BEGIN, @@ -96,29 +114,29 @@ static const char *json_token_type_name[] = { typedef struct { json_token_type_t ch2token[256]; char escape2char[256]; /* Decoding */ -#if 0 - char escapes[35][8]; /* Pre-generated escape string buffer */ - char *char2escape[256]; /* Encoding */ -#endif + + /* encode_buf is only allocated and used when + * encode_keep_buffer is set */ strbuf_t encode_buf; - char number_fmt[8]; /* "%.XXg\0" */ - int current_depth; int encode_sparse_convert; int encode_sparse_ratio; int encode_sparse_safe; int encode_max_depth; - int encode_refuse_badnum; - int decode_refuse_badnum; - int encode_keep_buffer; + int encode_invalid_numbers; /* 2 => Encode as "null" */ int encode_number_precision; + int encode_keep_buffer; + + int decode_invalid_numbers; + int decode_max_depth; } json_config_t; typedef struct { const char *data; - int index; + const char *ptr; strbuf_t *tmp; /* Temporary storage for strings */ json_config_t *cfg; + int current_depth; } json_parse_t; typedef struct { @@ -171,29 +189,76 @@ static const char *char2escape[256] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; -static int json_config_key; - /* ===== CONFIGURATION ===== */ static json_config_t *json_fetch_config(lua_State *l) { json_config_t *cfg; - lua_pushlightuserdata(l, &json_config_key); - lua_gettable(l, LUA_REGISTRYINDEX); - cfg = lua_touserdata(l, -1); + cfg = lua_touserdata(l, lua_upvalueindex(1)); if (!cfg) luaL_error(l, "BUG: Unable to fetch CJSON configuration"); - lua_pop(l, 1); - return cfg; } -static void json_verify_arg_count(lua_State *l, int args) +/* Ensure the correct number of arguments have been provided. + * Pad with nil to allow other functions to simply check arg[i] + * to find whether an argument was provided */ +static json_config_t *json_arg_init(lua_State *l, int args) { luaL_argcheck(l, lua_gettop(l) <= args, args + 1, "found too many arguments"); + + while (lua_gettop(l) < args) + lua_pushnil(l); + + return json_fetch_config(l); +} + +/* Process integer options for configuration functions */ +static int json_integer_option(lua_State *l, int optindex, int *setting, + int min, int max) +{ + char errmsg[64]; + int value; + + if (!lua_isnil(l, optindex)) { + value = luaL_checkinteger(l, optindex); + snprintf(errmsg, sizeof(errmsg), "expected integer between %d and %d", min, max); + luaL_argcheck(l, min <= value && value <= max, 1, errmsg); + *setting = value; + } + + lua_pushinteger(l, *setting); + + return 1; +} + +/* Process enumerated arguments for a configuration function */ +static int json_enum_option(lua_State *l, int optindex, int *setting, + const char **options, int bool_true) +{ + static const char *bool_options[] = { "off", "on", NULL }; + + if (!options) { + options = bool_options; + bool_true = 1; + } + + if (!lua_isnil(l, optindex)) { + if (bool_true && lua_isboolean(l, optindex)) + *setting = lua_toboolean(l, optindex) * bool_true; + else + *setting = luaL_checkoption(l, optindex, NULL, options); + } + + if (bool_true && (*setting == 0 || *setting == bool_true)) + lua_pushboolean(l, *setting); + else + lua_pushstring(l, options[*setting]); + + return 1; } /* Configures handling of extremely sparse arrays: @@ -202,29 +267,11 @@ static void json_verify_arg_count(lua_State *l, int args) * safe: Always use an array when the max index <= safe */ static int json_cfg_encode_sparse_array(lua_State *l) { - json_config_t *cfg; - int val; - - json_verify_arg_count(l, 3); - cfg = json_fetch_config(l); - - switch (lua_gettop(l)) { - case 3: - val = luaL_checkinteger(l, 3); - luaL_argcheck(l, val >= 0, 3, "expected integer >= 0"); - cfg->encode_sparse_safe = val; - case 2: - val = luaL_checkinteger(l, 2); - luaL_argcheck(l, val >= 0, 2, "expected integer >= 0"); - cfg->encode_sparse_ratio = val; - case 1: - luaL_argcheck(l, lua_isboolean(l, 1), 1, "expected boolean"); - cfg->encode_sparse_convert = lua_toboolean(l, 1); - } + json_config_t *cfg = json_arg_init(l, 3); - lua_pushboolean(l, cfg->encode_sparse_convert); - lua_pushinteger(l, cfg->encode_sparse_ratio); - lua_pushinteger(l, cfg->encode_sparse_safe); + json_enum_option(l, 1, &cfg->encode_sparse_convert, NULL, 1); + json_integer_option(l, 2, &cfg->encode_sparse_ratio, 0, INT_MAX); + json_integer_option(l, 3, &cfg->encode_sparse_safe, 0, INT_MAX); return 3; } @@ -233,108 +280,80 @@ static int json_cfg_encode_sparse_array(lua_State *l) * encoding */ static int json_cfg_encode_max_depth(lua_State *l) { - json_config_t *cfg; - int depth; + json_config_t *cfg = json_arg_init(l, 1); - json_verify_arg_count(l, 1); - cfg = json_fetch_config(l); - - if (lua_gettop(l)) { - depth = luaL_checkinteger(l, 1); - luaL_argcheck(l, depth > 0, 1, "expected positive integer"); - cfg->encode_max_depth = depth; - } - - lua_pushinteger(l, cfg->encode_max_depth); - - return 1; + return json_integer_option(l, 1, &cfg->encode_max_depth, 1, INT_MAX); } -static void json_set_number_precision(json_config_t *cfg, int prec) +/* Configures the maximum number of nested arrays/objects allowed when + * encoding */ +static int json_cfg_decode_max_depth(lua_State *l) { - cfg->encode_number_precision = prec; - sprintf(cfg->number_fmt, "%%.%dg", prec); + json_config_t *cfg = json_arg_init(l, 1); + + return json_integer_option(l, 1, &cfg->decode_max_depth, 1, INT_MAX); } /* Configures number precision when converting doubles to text */ static int json_cfg_encode_number_precision(lua_State *l) { - json_config_t *cfg; - int precision; - - json_verify_arg_count(l, 1); - cfg = json_fetch_config(l); - - if (lua_gettop(l)) { - precision = luaL_checkinteger(l, 1); - luaL_argcheck(l, 1 <= precision && precision <= 14, 1, - "expected integer between 1 and 14"); - json_set_number_precision(cfg, precision); - } + json_config_t *cfg = json_arg_init(l, 1); - lua_pushinteger(l, cfg->encode_number_precision); - - return 1; + return json_integer_option(l, 1, &cfg->encode_number_precision, 1, 14); } /* Configures JSON encoding buffer persistence */ static int json_cfg_encode_keep_buffer(lua_State *l) { - json_config_t *cfg; + json_config_t *cfg = json_arg_init(l, 1); + int old_value; - json_verify_arg_count(l, 1); - cfg = json_fetch_config(l); + old_value = cfg->encode_keep_buffer; - if (lua_gettop(l)) { - luaL_checktype(l, 1, LUA_TBOOLEAN); - cfg->encode_keep_buffer = lua_toboolean(l, 1); - } + json_enum_option(l, 1, &cfg->encode_keep_buffer, NULL, 1); - lua_pushboolean(l, cfg->encode_keep_buffer); + /* Init / free the buffer if the setting has changed */ + if (old_value ^ cfg->encode_keep_buffer) { + if (cfg->encode_keep_buffer) + strbuf_init(&cfg->encode_buf, 0); + else + strbuf_free(&cfg->encode_buf); + } return 1; } -/* On argument: decode enum and set config variables - * **options must point to a NULL terminated array of 4 enums - * Returns: current enum value */ -static void json_enum_option(lua_State *l, const char **options, - int *opt1, int *opt2) +#if defined(DISABLE_INVALID_NUMBERS) && !defined(USE_INTERNAL_FPCONV) +void json_verify_invalid_number_setting(lua_State *l, int *setting) { - int setting; + if (*setting == 1) { + *setting = 0; + luaL_error(l, "Infinity, NaN, and/or hexadecimal numbers are not supported."); + } +} +#else +#define json_verify_invalid_number_setting(l, s) do { } while(0) +#endif - if (lua_gettop(l)) { - if (lua_isboolean(l, 1)) - setting = lua_toboolean(l, 1) * 3; - else - setting = luaL_checkoption(l, 1, NULL, options); +static int json_cfg_encode_invalid_numbers(lua_State *l) +{ + static const char *options[] = { "off", "on", "null", NULL }; + json_config_t *cfg = json_arg_init(l, 1); - *opt1 = setting & 1 ? 1 : 0; - *opt2 = setting & 2 ? 1 : 0; - } else { - setting = *opt1 | (*opt2 << 1); - } + json_enum_option(l, 1, &cfg->encode_invalid_numbers, options, 1); - if (setting) - lua_pushstring(l, options[setting]); - else - lua_pushboolean(l, 0); -} + json_verify_invalid_number_setting(l, &cfg->encode_invalid_numbers); + return 1; +} -/* When enabled, rejects: NaN, Infinity, hexidecimal numbers */ -static int json_cfg_refuse_invalid_numbers(lua_State *l) +static int json_cfg_decode_invalid_numbers(lua_State *l) { - static const char *options_enc_dec[] = { "none", "encode", "decode", - "both", NULL }; - json_config_t *cfg; + json_config_t *cfg = json_arg_init(l, 1); - json_verify_arg_count(l, 1); - cfg = json_fetch_config(l); + json_enum_option(l, 1, &cfg->decode_invalid_numbers, NULL, 1); - json_enum_option(l, options_enc_dec, - &cfg->encode_refuse_badnum, - &cfg->decode_refuse_badnum); + json_verify_invalid_number_setting(l, &cfg->encode_invalid_numbers); return 1; } @@ -364,16 +383,19 @@ static void json_create_config(lua_State *l) lua_setfield(l, -2, "__gc"); lua_setmetatable(l, -2); - strbuf_init(&cfg->encode_buf, 0); - cfg->encode_sparse_convert = DEFAULT_SPARSE_CONVERT; cfg->encode_sparse_ratio = DEFAULT_SPARSE_RATIO; cfg->encode_sparse_safe = DEFAULT_SPARSE_SAFE; - cfg->encode_max_depth = DEFAULT_MAX_DEPTH; - cfg->encode_refuse_badnum = DEFAULT_ENCODE_REFUSE_BADNUM; - cfg->decode_refuse_badnum = DEFAULT_DECODE_REFUSE_BADNUM; + cfg->encode_max_depth = DEFAULT_ENCODE_MAX_DEPTH; + cfg->decode_max_depth = DEFAULT_DECODE_MAX_DEPTH; + cfg->encode_invalid_numbers = DEFAULT_ENCODE_INVALID_NUMBERS; + cfg->decode_invalid_numbers = DEFAULT_DECODE_INVALID_NUMBERS; cfg->encode_keep_buffer = DEFAULT_ENCODE_KEEP_BUFFER; - json_set_number_precision(cfg, 14); + cfg->encode_number_precision = DEFAULT_ENCODE_NUMBER_PRECISION; + +#if DEFAULT_ENCODE_KEEP_BUFFER > 0 + strbuf_init(&cfg->encode_buf, 0); +#endif /* Decoding init */ @@ -419,41 +441,15 @@ static void json_create_config(lua_State *l) cfg->escape2char['f'] = '\f'; cfg->escape2char['r'] = '\r'; cfg->escape2char['u'] = 'u'; /* Unicode parsing required */ - - -#if 0 - /* Initialise separate storage for pre-generated escape codes. - * Escapes 0-31 map directly, 34, 92, 127 follow afterwards to - * save memory. */ - for (i = 0 ; i < 32; i++) - sprintf(cfg->escapes[i], "\\u%04x", i); - strcpy(cfg->escapes[8], "\b"); /* Override simpler escapes */ - strcpy(cfg->escapes[9], "\t"); - strcpy(cfg->escapes[10], "\n"); - strcpy(cfg->escapes[12], "\f"); - strcpy(cfg->escapes[13], "\r"); - strcpy(cfg->escapes[32], "\\\""); /* chr(34) */ - strcpy(cfg->escapes[33], "\\\\"); /* chr(92) */ - sprintf(cfg->escapes[34], "\\u%04x", 127); /* char(127) */ - - /* Initialise encoding escape lookup table */ - for (i = 0; i < 32; i++) - cfg->char2escape[i] = cfg->escapes[i]; - for (i = 32; i < 256; i++) - cfg->char2escape[i] = NULL; - cfg->char2escape[34] = cfg->escapes[32]; - cfg->char2escape[92] = cfg->escapes[33]; - cfg->char2escape[127] = cfg->escapes[34]; -#endif } /* ===== ENCODING ===== */ -static void json_encode_exception(lua_State *l, json_config_t *cfg, int lindex, +static void json_encode_exception(lua_State *l, json_config_t *cfg, strbuf_t *json, int lindex, const char *reason) { if (!cfg->encode_keep_buffer) - strbuf_free(&cfg->encode_buf); + strbuf_free(json); luaL_error(l, "Cannot serialise %s: %s", lua_typename(l, lua_type(l, lindex)), reason); } @@ -494,7 +490,7 @@ static void json_append_string(lua_State *l, strbuf_t *json, int lindex) * -1 object (not a pure array) * >=0 elements in array */ -static int lua_array_length(lua_State *l, json_config_t *cfg) +static int lua_array_length(lua_State *l, json_config_t *cfg, strbuf_t *json) { double k; int max; @@ -529,7 +525,7 @@ static int lua_array_length(lua_State *l, json_config_t *cfg) max > items * cfg->encode_sparse_ratio && max > cfg->encode_sparse_safe) { if (!cfg->encode_sparse_convert) - json_encode_exception(l, cfg, -1, "excessively sparse array"); + json_encode_exception(l, cfg, json, -1, "excessively sparse array"); return -1; } @@ -537,31 +533,41 @@ static int lua_array_length(lua_State *l, json_config_t *cfg) return max; } -static void json_encode_descend(lua_State *l, json_config_t *cfg) +static void json_check_encode_depth(lua_State *l, json_config_t *cfg, + int current_depth, strbuf_t *json) { - cfg->current_depth++; + /* Ensure there are enough slots free to traverse a table (key, + * value) and push a string for a potential error message. + * + * Unlike "decode", the key and value are still on the stack when + * lua_checkstack() is called. Hence an extra slot for luaL_error() + * below is required just in case the next check to lua_checkstack() + * fails. + * + * While this won't cause a crash due to the EXTRA_STACK reserve + * slots, it would still be an improper use of the API. */ + if (current_depth <= cfg->encode_max_depth && lua_checkstack(l, 3)) + return; - if (cfg->current_depth > cfg->encode_max_depth) { - if (!cfg->encode_keep_buffer) - strbuf_free(&cfg->encode_buf); - luaL_error(l, "Cannot serialise, excessive nesting (%d)", - cfg->current_depth); - } + if (!cfg->encode_keep_buffer) + strbuf_free(json); + + luaL_error(l, "Cannot serialise, excessive nesting (%d)", + current_depth); } -static void json_append_data(lua_State *l, json_config_t *cfg, strbuf_t *json); +static void json_append_data(lua_State *l, json_config_t *cfg, + int current_depth, strbuf_t *json); /* json_append_array args: * - lua_State * - JSON strbuf * - Size of passwd Lua array (top of stack) */ -static void json_append_array(lua_State *l, json_config_t *cfg, strbuf_t *json, - int array_length) +static void json_append_array(lua_State *l, json_config_t *cfg, int current_depth, + strbuf_t *json, int array_length) { int comma, i; - json_encode_descend(l, cfg); - strbuf_append_char(json, '['); comma = 0; @@ -572,38 +578,48 @@ static void json_append_array(lua_State *l, json_config_t *cfg, strbuf_t *json, comma = 1; lua_rawgeti(l, -1, i); - json_append_data(l, cfg, json); + json_append_data(l, cfg, current_depth, json); lua_pop(l, 1); } strbuf_append_char(json, ']'); - - cfg->current_depth--; } -static void json_append_number(lua_State *l, strbuf_t *json, int index, - json_config_t *cfg) +static void json_append_number(lua_State *l, json_config_t *cfg, + strbuf_t *json, int lindex) { - double num = lua_tonumber(l, index); + double num = lua_tonumber(l, lindex); + int len; - if (cfg->encode_refuse_badnum && (isinf(num) || isnan(num))) - json_encode_exception(l, cfg, index, "must not be NaN or Inf"); + if (cfg->encode_invalid_numbers == 0) { + /* Prevent encoding invalid numbers */ + if (isinf(num) || isnan(num)) + json_encode_exception(l, cfg, json, lindex, "must not be NaN or Inf"); + } else if (cfg->encode_invalid_numbers == 1) { + /* Encode invalid numbers, but handle "nan" separately + * since some platforms may encode as "-nan". */ + if (isnan(num)) { + strbuf_append_mem(json, "nan", 3); + return; + } + } else { + /* Encode invalid numbers as "null" */ + if (isinf(num) || isnan(num)) { + strbuf_append_mem(json, "null", 4); + return; + } + } - /* Lowest double printed with %.14g is 21 characters long: - * -1.7976931348623e+308 - * - * Use 32 to include the \0, and a few extra just in case.. - */ - strbuf_append_fmt(json, 32, cfg->number_fmt, num); + strbuf_ensure_empty_length(json, FPCONV_G_FMT_BUFSIZE); + len = fpconv_g_fmt(strbuf_empty_ptr(json), num, cfg->encode_number_precision); + strbuf_extend_length(json, len); } static void json_append_object(lua_State *l, json_config_t *cfg, - strbuf_t *json) + int current_depth, strbuf_t *json) { int comma, keytype; - json_encode_descend(l, cfg); - /* Object */ strbuf_append_char(json, '{'); @@ -620,30 +636,29 @@ static void json_append_object(lua_State *l, json_config_t *cfg, keytype = lua_type(l, -2); if (keytype == LUA_TNUMBER) { strbuf_append_char(json, '"'); - json_append_number(l, json, -2, cfg); + json_append_number(l, cfg, json, -2); strbuf_append_mem(json, "\":", 2); } else if (keytype == LUA_TSTRING) { json_append_string(l, json, -2); strbuf_append_char(json, ':'); } else { - json_encode_exception(l, cfg, -2, + json_encode_exception(l, cfg, json, -2, "table key must be a number or string"); /* never returns */ } /* table, key, value */ - json_append_data(l, cfg, json); + json_append_data(l, cfg, current_depth, json); lua_pop(l, 1); /* table, key */ } strbuf_append_char(json, '}'); - - cfg->current_depth--; } /* Serialise Lua data into JSON string. */ -static void json_append_data(lua_State *l, json_config_t *cfg, strbuf_t *json) +static void json_append_data(lua_State *l, json_config_t *cfg, + int current_depth, strbuf_t *json) { int len; @@ -652,7 +667,7 @@ static void json_append_data(lua_State *l, json_config_t *cfg, strbuf_t *json) json_append_string(l, json, -1); break; case LUA_TNUMBER: - json_append_number(l, json, -1, cfg); + json_append_number(l, cfg, json, -1); break; case LUA_TBOOLEAN: if (lua_toboolean(l, -1)) @@ -661,11 +676,13 @@ static void json_append_data(lua_State *l, json_config_t *cfg, strbuf_t *json) strbuf_append_mem(json, "false", 5); break; case LUA_TTABLE: - len = lua_array_length(l, cfg); + current_depth++; + json_check_encode_depth(l, cfg, current_depth, json); + len = lua_array_length(l, cfg, json); if (len > 0) - json_append_array(l, cfg, json, len); + json_append_array(l, cfg, current_depth, json, len); else - json_append_object(l, cfg, json); + json_append_object(l, cfg, current_depth, json); break; case LUA_TNIL: strbuf_append_mem(json, "null", 4); @@ -678,38 +695,38 @@ static void json_append_data(lua_State *l, json_config_t *cfg, strbuf_t *json) default: /* Remaining types (LUA_TFUNCTION, LUA_TUSERDATA, LUA_TTHREAD, * and LUA_TLIGHTUSERDATA) cannot be serialised */ - json_encode_exception(l, cfg, -1, "type not supported"); + json_encode_exception(l, cfg, json, -1, "type not supported"); /* never returns */ } } static int json_encode(lua_State *l) { - json_config_t *cfg; + json_config_t *cfg = json_fetch_config(l); + strbuf_t local_encode_buf; + strbuf_t *encode_buf; char *json; int len; - /* Can't use json_verify_arg_count() since we need to ensure - * there is only 1 argument */ luaL_argcheck(l, lua_gettop(l) == 1, 1, "expected 1 argument"); - cfg = json_fetch_config(l); - cfg->current_depth = 0; - - /* Reset the persistent buffer if it exists. - * Otherwise allocate a new buffer. */ - if (strbuf_allocated(&cfg->encode_buf)) - strbuf_reset(&cfg->encode_buf); - else - strbuf_init(&cfg->encode_buf, 0); + if (!cfg->encode_keep_buffer) { + /* Use private buffer */ + encode_buf = &local_encode_buf; + strbuf_init(encode_buf, 0); + } else { + /* Reuse existing buffer */ + encode_buf = &cfg->encode_buf; + strbuf_reset(encode_buf); + } - json_append_data(l, cfg, &cfg->encode_buf); - json = strbuf_string(&cfg->encode_buf, &len); + json_append_data(l, cfg, 0, encode_buf); + json = strbuf_string(encode_buf, &len); lua_pushlstring(l, json, len); if (!cfg->encode_keep_buffer) - strbuf_free(&cfg->encode_buf); + strbuf_free(encode_buf); return 1; } @@ -808,7 +825,7 @@ static int json_append_unicode_escape(json_parse_t *json) int escape_len = 6; /* Fetch UTF-16 code unit */ - codepoint = decode_hex4(&json->data[json->index + 2]); + codepoint = decode_hex4(json->ptr + 2); if (codepoint < 0) return -1; @@ -824,13 +841,13 @@ static int json_append_unicode_escape(json_parse_t *json) return -1; /* Ensure the next code is a unicode escape */ - if (json->data[json->index + escape_len] != '\\' || - json->data[json->index + escape_len + 1] != 'u') { + if (*(json->ptr + escape_len) != '\\' || + *(json->ptr + escape_len + 1) != 'u') { return -1; } /* Fetch the next codepoint */ - surrogate_low = decode_hex4(&json->data[json->index + 2 + escape_len]); + surrogate_low = decode_hex4(json->ptr + 2 + escape_len); if (surrogate_low < 0) return -1; @@ -852,7 +869,7 @@ static int json_append_unicode_escape(json_parse_t *json) /* Append bytes and advance parse index */ strbuf_append_mem_unsafe(json->tmp, utf8, len); - json->index += escape_len; + json->ptr += escape_len; return 0; } @@ -861,7 +878,7 @@ static void json_set_token_error(json_token_t *token, json_parse_t *json, const char *errtype) { token->type = T_ERROR; - token->index = json->index; + token->index = json->ptr - json->data; token->value.string = errtype; } @@ -871,15 +888,18 @@ static void json_next_string_token(json_parse_t *json, json_token_t *token) char ch; /* Caller must ensure a string is next */ - assert(json->data[json->index] == '"'); + assert(*json->ptr == '"'); /* Skip " */ - json->index++; + json->ptr++; /* json->tmp is the temporary strbuf used to accumulate the - * decoded string value. */ + * decoded string value. + * json->tmp is sized to handle JSON containing only a string value. + */ strbuf_reset(json->tmp); - while ((ch = json->data[json->index]) != '"') { + + while ((ch = *json->ptr) != '"') { if (!ch) { /* Premature end of the string */ json_set_token_error(token, json, "unexpected end of string"); @@ -889,7 +909,7 @@ static void json_next_string_token(json_parse_t *json, json_token_t *token) /* Handle escapes */ if (ch == '\\') { /* Fetch escape character */ - ch = json->data[json->index + 1]; + ch = *(json->ptr + 1); /* Translate escape code and append to tmp string */ ch = escape2char[(unsigned char)ch]; @@ -907,14 +927,14 @@ static void json_next_string_token(json_parse_t *json, json_token_t *token) } /* Skip '\' */ - json->index++; + json->ptr++; } /* Append normal character or translated single character * Unicode escapes are handled above */ strbuf_append_char_unsafe(json->tmp, ch); - json->index++; + json->ptr++; } - json->index++; /* Eat final quote (") */ + json->ptr++; /* Eat final quote (") */ strbuf_ensure_null(json->tmp); @@ -928,7 +948,7 @@ static void json_next_string_token(json_parse_t *json, json_token_t *token) * json_next_number_token() uses strtod() which allows other forms: * - numbers starting with '+' * - NaN, -NaN, infinity, -infinity - * - hexidecimal numbers + * - hexadecimal numbers * - numbers with leading zeros * * json_is_invalid_number() detects "numbers" which may pass strtod()'s @@ -939,34 +959,33 @@ static void json_next_string_token(json_parse_t *json, json_token_t *token) */ static int json_is_invalid_number(json_parse_t *json) { - int i = json->index; + const char *p = json->ptr; /* Reject numbers starting with + */ - if (json->data[i] == '+') + if (*p == '+') return 1; /* Skip minus sign if it exists */ - if (json->data[i] == '-') - i++; + if (*p == '-') + p++; /* Reject numbers starting with 0x, or leading zeros */ - if (json->data[i] == '0') { - int ch2 = json->data[i + 1]; + if (*p == '0') { + int ch2 = *(p + 1); if ((ch2 | 0x20) == 'x' || /* Hex */ ('0' <= ch2 && ch2 <= '9')) /* Leading zero */ return 1; return 0; - } else if (json->data[i] <= '9') { + } else if (*p <= '9') { return 0; /* Ordinary number */ } - /* Reject inf/nan */ - if (!strncasecmp(&json->data[i], "inf", 3)) + if (!strncasecmp(p, "inf", 3)) return 1; - if (!strncasecmp(&json->data[i], "nan", 3)) + if (!strncasecmp(p, "nan", 3)) return 1; /* Pass all other numbers which may still be invalid, but @@ -976,35 +995,39 @@ static int json_is_invalid_number(json_parse_t *json) static void json_next_number_token(json_parse_t *json, json_token_t *token) { - const char *startptr; char *endptr; token->type = T_NUMBER; - startptr = &json->data[json->index]; - token->value.number = strtod(&json->data[json->index], &endptr); - if (startptr == endptr) + token->value.number = fpconv_strtod(json->ptr, &endptr); + if (json->ptr == endptr) json_set_token_error(token, json, "invalid number"); else - json->index += endptr - startptr; /* Skip the processed number */ + json->ptr = endptr; /* Skip the processed number */ return; } /* Fills in the token struct. * T_STRING will return a pointer to the json_parse_t temporary string - * T_ERROR will leave the json->index pointer at the error. + * T_ERROR will leave the json->ptr pointer at the error. */ static void json_next_token(json_parse_t *json, json_token_t *token) { - json_token_type_t *ch2token = json->cfg->ch2token; + const json_token_type_t *ch2token = json->cfg->ch2token; int ch; - /* Eat whitespace. FIXME: UGLY */ - token->type = ch2token[(unsigned char)json->data[json->index]]; - while (token->type == T_WHITESPACE) - token->type = ch2token[(unsigned char)json->data[++json->index]]; + /* Eat whitespace. */ + while (1) { + ch = (unsigned char)*(json->ptr); + token->type = ch2token[ch]; + if (token->type != T_WHITESPACE) + break; + json->ptr++; + } - token->index = json->index; + /* Store location of new token. Required when throwing errors + * for unexpected tokens (syntax errors). */ + token->index = json->ptr - json->data; /* Don't advance the pointer for an error or the end */ if (token->type == T_ERROR) { @@ -1018,14 +1041,13 @@ static void json_next_token(json_parse_t *json, json_token_t *token) /* Found a known single character token, advance index and return */ if (token->type != T_UNKNOWN) { - json->index++; + json->ptr++; return; } - /* Process characters which triggered T_UNKNOWN */ - ch = json->data[json->index]; - - /* Must use strncmp() to match the front of the JSON string. + /* Process characters which triggered T_UNKNOWN + * + * Must use strncmp() to match the front of the JSON string. * JSON identifier must be lowercase. * When strict_numbers if disabled, either case is allowed for * Infinity/NaN (since we are no longer following the spec..) */ @@ -1033,29 +1055,29 @@ static void json_next_token(json_parse_t *json, json_token_t *token) json_next_string_token(json, token); return; } else if (ch == '-' || ('0' <= ch && ch <= '9')) { - if (json->cfg->decode_refuse_badnum && json_is_invalid_number(json)) { + if (!json->cfg->decode_invalid_numbers && json_is_invalid_number(json)) { json_set_token_error(token, json, "invalid number"); return; } json_next_number_token(json, token); return; - } else if (!strncmp(&json->data[json->index], "true", 4)) { + } else if (!strncmp(json->ptr, "true", 4)) { token->type = T_BOOLEAN; token->value.boolean = 1; - json->index += 4; + json->ptr += 4; return; - } else if (!strncmp(&json->data[json->index], "false", 5)) { + } else if (!strncmp(json->ptr, "false", 5)) { token->type = T_BOOLEAN; token->value.boolean = 0; - json->index += 5; + json->ptr += 5; return; - } else if (!strncmp(&json->data[json->index], "null", 4)) { + } else if (!strncmp(json->ptr, "null", 4)) { token->type = T_NULL; - json->index += 4; + json->ptr += 4; return; - } else if (!json->cfg->decode_refuse_badnum && + } else if (json->cfg->decode_invalid_numbers && json_is_invalid_number(json)) { - /* When refuse_badnum is disabled, only attempt to process + /* When decode_invalid_numbers is enabled, only attempt to process * numbers we know are invalid JSON (Inf, NaN, hex) * This is required to generate an appropriate token error, * otherwise all bad tokens will register as "invalid number" @@ -1091,13 +1113,23 @@ static void json_throw_parse_error(lua_State *l, json_parse_t *json, exp, found, token->index + 1); } -static void json_decode_checkstack(lua_State *l, json_parse_t *json, int n) +static inline void json_decode_ascend(json_parse_t *json) { - if (lua_checkstack(l, n)) + json->current_depth--; +} + +static void json_decode_descend(lua_State *l, json_parse_t *json, int slots) +{ + json->current_depth++; + + if (json->current_depth <= json->cfg->decode_max_depth && + lua_checkstack(l, slots)) { return; + } strbuf_free(json->tmp); - luaL_error(l, "Too many nested data structures"); + luaL_error(l, "Found too many nested data structures (%d) at character %d", + json->current_depth, json->ptr - json->data); } static void json_parse_object_context(lua_State *l, json_parse_t *json) @@ -1106,7 +1138,7 @@ static void json_parse_object_context(lua_State *l, json_parse_t *json) /* 3 slots required: * .., table, key, value */ - json_decode_checkstack(l, json, 3); + json_decode_descend(l, json, 3); lua_newtable(l); @@ -1114,6 +1146,7 @@ static void json_parse_object_context(lua_State *l, json_parse_t *json) /* Handle empty objects */ if (token.type == T_OBJ_END) { + json_decode_ascend(json); return; } @@ -1137,8 +1170,10 @@ static void json_parse_object_context(lua_State *l, json_parse_t *json) json_next_token(json, &token); - if (token.type == T_OBJ_END) + if (token.type == T_OBJ_END) { + json_decode_ascend(json); return; + } if (token.type != T_COMMA) json_throw_parse_error(l, json, "comma or object end", &token); @@ -1155,15 +1190,17 @@ static void json_parse_array_context(lua_State *l, json_parse_t *json) /* 2 slots required: * .., table, value */ - json_decode_checkstack(l, json, 2); + json_decode_descend(l, json, 2); lua_newtable(l); json_next_token(json, &token); /* Handle empty arrays */ - if (token.type == T_ARR_END) + if (token.type == T_ARR_END) { + json_decode_ascend(json); return; + } for (i = 1; ; i++) { json_process_value(l, json, &token); @@ -1171,8 +1208,10 @@ static void json_parse_array_context(lua_State *l, json_parse_t *json) json_next_token(json, &token); - if (token.type == T_ARR_END) + if (token.type == T_ARR_END) { + json_decode_ascend(json); return; + } if (token.type != T_COMMA) json_throw_parse_error(l, json, "comma or array end", &token); @@ -1211,15 +1250,26 @@ static void json_process_value(lua_State *l, json_parse_t *json, } } -/* json_text must be null terminated string */ -static void lua_json_decode(lua_State *l, const char *json_text, int json_len) +static int json_decode(lua_State *l) { json_parse_t json; json_token_t token; + size_t json_len; + + luaL_argcheck(l, lua_gettop(l) == 1, 1, "expected 1 argument"); json.cfg = json_fetch_config(l); - json.data = json_text; - json.index = 0; + json.data = luaL_checklstring(l, 1, &json_len); + json.current_depth = 0; + json.ptr = json.data; + + /* Detect Unicode other than UTF-8 (see RFC 4627, Sec 3) + * + * CJSON can support any simple data type, hence only the first + * character is guaranteed to be ASCII (at worst: '"'). This is + * still enough to detect whether the wrong encoding is in use. */ + if (json_len >= 2 && (!json.data[0] || !json.data[1])) + luaL_error(l, "JSON parser does not support UTF-16 or UTF-32"); /* Ensure the temporary buffer can hold the entire string. * This means we no longer need to do length checks since the decoded @@ -1236,64 +1286,142 @@ static void lua_json_decode(lua_State *l, const char *json_text, int json_len) json_throw_parse_error(l, &json, "the end", &token); strbuf_free(json.tmp); + + return 1; } -static int json_decode(lua_State *l) +/* ===== INITIALISATION ===== */ + +#if !defined(LUA_VERSION_NUM) || LUA_VERSION_NUM < 502 +/* Compatibility for Lua 5.1. + * + * luaL_setfuncs() is used to create a module table where the functions have + * json_config_t as their first upvalue. Code borrowed from Lua 5.2 source. */ +static void luaL_setfuncs (lua_State *l, const luaL_Reg *reg, int nup) { - const char *json; - size_t len; + int i; - json_verify_arg_count(l, 1); + luaL_checkstack(l, nup, "too many upvalues"); + for (; reg->name != NULL; reg++) { /* fill the table with given functions */ + for (i = 0; i < nup; i++) /* copy upvalues to the top */ + lua_pushvalue(l, -nup); + lua_pushcclosure(l, reg->func, nup); /* closure with those upvalues */ + lua_setfield(l, -(nup + 2), reg->name); + } + lua_pop(l, nup); /* remove upvalues */ +} +#endif - json = luaL_checklstring(l, 1, &len); +/* Call target function in protected mode with all supplied args. + * Assumes target function only returns a single non-nil value. + * Convert and return thrown errors as: nil, "error message" */ +static int json_protect_conversion(lua_State *l) +{ + int err; - /* Detect Unicode other than UTF-8 (see RFC 4627, Sec 3) - * - * CJSON can support any simple data type, hence only the first - * character is guaranteed to be ASCII (at worst: '"'). This is - * still enough to detect whether the wrong encoding is in use. */ - if (len >= 2 && (!json[0] || !json[1])) - luaL_error(l, "JSON parser does not support UTF-16 or UTF-32"); + /* Deliberately throw an error for invalid arguments */ + luaL_argcheck(l, lua_gettop(l) == 1, 1, "expected 1 argument"); - lua_json_decode(l, json, len); + /* pcall() the function stored as upvalue(1) */ + lua_pushvalue(l, lua_upvalueindex(1)); + lua_insert(l, 1); + err = lua_pcall(l, 1, 1, 0); + if (!err) + return 1; - return 1; -} + if (err == LUA_ERRRUN) { + lua_pushnil(l); + lua_insert(l, -2); + return 2; + } -/* ===== INITIALISATION ===== */ + /* Since we are not using a custom error handler, the only remaining + * errors are memory related */ + return luaL_error(l, "Memory allocation error in CJSON protected call"); +} -int luaopen_cjson(lua_State *l) +/* Return cjson module table */ +static int lua_cjson_new(lua_State *l) { luaL_Reg reg[] = { { "encode", json_encode }, { "decode", json_decode }, { "encode_sparse_array", json_cfg_encode_sparse_array }, { "encode_max_depth", json_cfg_encode_max_depth }, + { "decode_max_depth", json_cfg_decode_max_depth }, { "encode_number_precision", json_cfg_encode_number_precision }, { "encode_keep_buffer", json_cfg_encode_keep_buffer }, - { "refuse_invalid_numbers", json_cfg_refuse_invalid_numbers }, + { "encode_invalid_numbers", json_cfg_encode_invalid_numbers }, + { "decode_invalid_numbers", json_cfg_decode_invalid_numbers }, + { "new", lua_cjson_new }, { NULL, NULL } }; - /* Use json_fetch_config as a pointer. - * It's faster than using a config string, and more unique */ - lua_pushlightuserdata(l, &json_config_key); - json_create_config(l); - lua_settable(l, LUA_REGISTRYINDEX); + /* Initialise number conversions */ + fpconv_init(); - luaL_register(l, "cjson", reg); + /* cjson module table */ + lua_newtable(l); + + /* Register functions with config data as upvalue */ + json_create_config(l); + luaL_setfuncs(l, reg, 1); /* Set cjson.null */ lua_pushlightuserdata(l, NULL); lua_setfield(l, -2, "null"); - /* Set cjson.version */ - lua_pushliteral(l, VERSION); - lua_setfield(l, -2, "version"); + /* Set module name / version fields */ + lua_pushliteral(l, CJSON_MODNAME); + lua_setfield(l, -2, "_NAME"); + lua_pushliteral(l, CJSON_VERSION); + lua_setfield(l, -2, "_VERSION"); + + return 1; +} + +/* Return cjson.safe module table */ +static int lua_cjson_safe_new(lua_State *l) +{ + const char *func[] = { "decode", "encode", NULL }; + int i; + + lua_cjson_new(l); + + /* Fix new() method */ + lua_pushcfunction(l, lua_cjson_safe_new); + lua_setfield(l, -2, "new"); + + for (i = 0; func[i]; i++) { + lua_getfield(l, -1, func[i]); + lua_pushcclosure(l, json_protect_conversion, 1); + lua_setfield(l, -2, func[i]); + } + + return 1; +} + +int luaopen_cjson(lua_State *l) +{ + lua_cjson_new(l); + +#ifdef ENABLE_CJSON_GLOBAL + /* Register a global "cjson" table. */ + lua_pushvalue(l, -1); + lua_setglobal(l, CJSON_MODNAME); +#endif /* Return cjson table */ return 1; } +int luaopen_cjson_safe(lua_State *l) +{ + lua_cjson_safe_new(l); + + /* Return cjson.safe table */ + return 1; +} + /* vi:ai et sw=4 ts=4: */ diff --git a/redis.submodule/deps/lua/src/lua_cmsgpack.c b/redis.submodule/deps/lua/src/lua_cmsgpack.c index 53dc1cf..0b82d00 100644 --- a/redis.submodule/deps/lua/src/lua_cmsgpack.c +++ b/redis.submodule/deps/lua/src/lua_cmsgpack.c @@ -7,14 +7,38 @@ #include "lua.h" #include "lauxlib.h" -#define LUACMSGPACK_VERSION "lua-cmsgpack 0.3.0" +#define LUACMSGPACK_NAME "cmsgpack" +#define LUACMSGPACK_SAFE_NAME "cmsgpack_safe" +#define LUACMSGPACK_VERSION "lua-cmsgpack 0.4.0" #define LUACMSGPACK_COPYRIGHT "Copyright (C) 2012, Salvatore Sanfilippo" #define LUACMSGPACK_DESCRIPTION "MessagePack C implementation for Lua" -#define LUACMSGPACK_MAX_NESTING 16 /* Max tables nesting. */ +/* Allows a preprocessor directive to override MAX_NESTING */ +#ifndef LUACMSGPACK_MAX_NESTING + #define LUACMSGPACK_MAX_NESTING 16 /* Max tables nesting. */ +#endif -/* ============================================================================== - * MessagePack implementation and bindings for Lua 5.1. +/* Check if float or double can be an integer without loss of precision */ +#define IS_INT_TYPE_EQUIVALENT(x, T) (!isinf(x) && (T)(x) == (x)) + +#define IS_INT64_EQUIVALENT(x) IS_INT_TYPE_EQUIVALENT(x, int64_t) +#define IS_INT_EQUIVALENT(x) IS_INT_TYPE_EQUIVALENT(x, int) + +/* If size of pointer is equal to a 4 byte integer, we're on 32 bits. */ +#if UINTPTR_MAX == UINT_MAX + #define BITS_32 1 +#else + #define BITS_32 0 +#endif + +#if BITS_32 + #define lua_pushunsigned(L, n) lua_pushnumber(L, n) +#else + #define lua_pushunsigned(L, n) lua_pushinteger(L, n) +#endif + +/* ============================================================================= + * MessagePack implementation and bindings for Lua 5.1/5.2. * Copyright(C) 2012 Salvatore Sanfilippo * * http://github.com/antirez/lua-cmsgpack @@ -29,23 +53,27 @@ * 20-Feb-2012 (ver 0.2.0): Tables encoding improved. * 20-Feb-2012 (ver 0.2.1): Minor bug fixing. * 20-Feb-2012 (ver 0.3.0): Module renamed lua-cmsgpack (was lua-msgpack). - * ============================================================================ */ + * 04-Apr-2014 (ver 0.3.1): Lua 5.2 support and minor bug fix. + * 07-Apr-2014 (ver 0.4.0): Multiple pack/unpack, lua allocator, efficiency. + * ========================================================================== */ -/* --------------------------- Endian conversion -------------------------------- - * We use it only for floats and doubles, all the other conversions are performed +/* -------------------------- Endian conversion -------------------------------- + * We use it only for floats and doubles, all the other conversions performed * in an endian independent fashion. So the only thing we need is a function - * that swaps a binary string if the arch is little endian (and left it untouched + * that swaps a binary string if arch is little endian (and left it untouched * otherwise). */ /* Reverse memory bytes if arch is little endian. Given the conceptual - * simplicity of the Lua build system we prefer to check for endianess at runtime. + * simplicity of the Lua build system we prefer check for endianess at runtime. * The performance difference should be acceptable. */ -static void memrevifle(void *ptr, size_t len) { - unsigned char *p = ptr, *e = p+len-1, aux; +void memrevifle(void *ptr, size_t len) { + unsigned char *p = (unsigned char *)ptr, + *e = (unsigned char *)p+len-1, + aux; int test = 1; unsigned char *testp = (unsigned char*) &test; - if (testp[0] == 0) return; /* Big endian, nothign to do. */ + if (testp[0] == 0) return; /* Big endian, nothing to do. */ len /= 2; while(len--) { aux = *p; @@ -56,20 +84,34 @@ static void memrevifle(void *ptr, size_t len) { } } -/* ----------------------------- String buffer ---------------------------------- - * This is a simple implementation of string buffers. The only opereation +/* ---------------------------- String buffer ---------------------------------- + * This is a simple implementation of string buffers. The only operation * supported is creating empty buffers and appending bytes to it. * The string buffer uses 2x preallocation on every realloc for O(N) append * behavior. */ typedef struct mp_buf { + lua_State *L; unsigned char *b; size_t len, free; } mp_buf; -static mp_buf *mp_buf_new(void) { - mp_buf *buf = malloc(sizeof(*buf)); - +void *mp_realloc(lua_State *L, void *target, size_t osize,size_t nsize) { + void *(*local_realloc) (void *, void *, size_t osize, size_t nsize) = NULL; + void *ud; + + local_realloc = lua_getallocf(L, &ud); + + return local_realloc(ud, target, osize, nsize); +} + +mp_buf *mp_buf_new(lua_State *L) { + mp_buf *buf = NULL; + + /* Old size = 0; new size = sizeof(*buf) */ + buf = (mp_buf*)mp_realloc(L, NULL, 0, sizeof(*buf)); + + buf->L = L; buf->b = NULL; buf->len = buf->free = 0; return buf; @@ -79,7 +121,7 @@ void mp_buf_append(mp_buf *buf, const unsigned char *s, size_t len) { if (buf->free < len) { size_t newlen = buf->len+len; - buf->b = realloc(buf->b,newlen*2); + buf->b = (unsigned char*)mp_realloc(buf->L, buf->b, buf->len, newlen*2); buf->free = newlen; } memcpy(buf->b+buf->len,s,len); @@ -88,11 +130,11 @@ void mp_buf_append(mp_buf *buf, const unsigned char *s, size_t len) { } void mp_buf_free(mp_buf *buf) { - free(buf->b); - free(buf); + mp_realloc(buf->L, buf->b, buf->len, 0); /* realloc to 0 = free */ + mp_realloc(buf->L, buf, sizeof(*buf), 0); } -/* ------------------------------ String cursor ---------------------------------- +/* ---------------------------- String cursor ---------------------------------- * This simple data structure is used for parsing. Basically you create a cursor * using a string pointer and a length, then it is possible to access the * current string position with cursor->p, check the remaining length @@ -102,7 +144,7 @@ void mp_buf_free(mp_buf *buf) { * be used to report errors. */ #define MP_CUR_ERROR_NONE 0 -#define MP_CUR_ERROR_EOF 1 /* Not enough data to complete the opereation. */ +#define MP_CUR_ERROR_EOF 1 /* Not enough data to complete operation. */ #define MP_CUR_ERROR_BADFMT 2 /* Bad data format */ typedef struct mp_cur { @@ -111,22 +153,15 @@ typedef struct mp_cur { int err; } mp_cur; -static mp_cur *mp_cur_new(const unsigned char *s, size_t len) { - mp_cur *cursor = malloc(sizeof(*cursor)); - +void mp_cur_init(mp_cur *cursor, const unsigned char *s, size_t len) { cursor->p = s; cursor->left = len; cursor->err = MP_CUR_ERROR_NONE; - return cursor; -} - -static void mp_cur_free(mp_cur *cursor) { - free(cursor); } #define mp_cur_consume(_c,_len) do { _c->p += _len; _c->left -= _len; } while(0) -/* When there is not enough room we set an error in the cursor and return, this +/* When there is not enough room we set an error in the cursor and return. This * is very common across the code so we have a macro to make the code look * a bit simpler. */ #define mp_cur_need(_c,_len) do { \ @@ -136,15 +171,19 @@ static void mp_cur_free(mp_cur *cursor) { } \ } while(0) -/* --------------------------- Low level MP encoding -------------------------- */ +/* ------------------------- Low level MP encoding -------------------------- */ -static void mp_encode_bytes(mp_buf *buf, const unsigned char *s, size_t len) { +void mp_encode_bytes(mp_buf *buf, const unsigned char *s, size_t len) { unsigned char hdr[5]; int hdrlen; if (len < 32) { hdr[0] = 0xa0 | (len&0xff); /* fix raw */ hdrlen = 1; + } else if (len <= 0xff) { + hdr[0] = 0xd9; + hdr[1] = len; + hdrlen = 2; } else if (len <= 0xffff) { hdr[0] = 0xda; hdr[1] = (len&0xff00)>>8; @@ -163,7 +202,7 @@ static void mp_encode_bytes(mp_buf *buf, const unsigned char *s, size_t len) { } /* we assume IEEE 754 internal format for single and double precision floats. */ -static void mp_encode_double(mp_buf *buf, double d) { +void mp_encode_double(mp_buf *buf, double d) { unsigned char b[9]; float f = d; @@ -181,7 +220,7 @@ static void mp_encode_double(mp_buf *buf, double d) { } } -static void mp_encode_int(mp_buf *buf, int64_t n) { +void mp_encode_int(mp_buf *buf, int64_t n) { unsigned char b[9]; int enclen; @@ -219,7 +258,7 @@ static void mp_encode_int(mp_buf *buf, int64_t n) { } } else { if (n >= -32) { - b[0] = ((char)n); /* negative fixnum */ + b[0] = ((signed char)n); /* negative fixnum */ enclen = 1; } else if (n >= -128) { b[0] = 0xd0; /* int 8 */ @@ -253,7 +292,7 @@ static void mp_encode_int(mp_buf *buf, int64_t n) { mp_buf_append(buf,b,enclen); } -static void mp_encode_array(mp_buf *buf, int64_t n) { +void mp_encode_array(mp_buf *buf, int64_t n) { unsigned char b[5]; int enclen; @@ -276,7 +315,7 @@ static void mp_encode_array(mp_buf *buf, int64_t n) { mp_buf_append(buf,b,enclen); } -static void mp_encode_map(mp_buf *buf, int64_t n) { +void mp_encode_map(mp_buf *buf, int64_t n) { unsigned char b[5]; int enclen; @@ -299,9 +338,9 @@ static void mp_encode_map(mp_buf *buf, int64_t n) { mp_buf_append(buf,b,enclen); } -/* ----------------------------- Lua types encoding --------------------------- */ +/* --------------------------- Lua types encoding --------------------------- */ -static void mp_encode_lua_string(lua_State *L, mp_buf *buf) { +void mp_encode_lua_string(lua_State *L, mp_buf *buf) { size_t len; const char *s; @@ -309,26 +348,43 @@ static void mp_encode_lua_string(lua_State *L, mp_buf *buf) { mp_encode_bytes(buf,(const unsigned char*)s,len); } -static void mp_encode_lua_bool(lua_State *L, mp_buf *buf) { +void mp_encode_lua_bool(lua_State *L, mp_buf *buf) { unsigned char b = lua_toboolean(L,-1) ? 0xc3 : 0xc2; mp_buf_append(buf,&b,1); } -static void mp_encode_lua_number(lua_State *L, mp_buf *buf) { +/* Lua 5.3 has a built in 64-bit integer type */ +void mp_encode_lua_integer(lua_State *L, mp_buf *buf) { +#if (LUA_VERSION_NUM < 503) && BITS_32 + lua_Number i = lua_tonumber(L,-1); +#else + lua_Integer i = lua_tointeger(L,-1); +#endif + mp_encode_int(buf, (int64_t)i); +} + +/* Lua 5.2 and lower only has 64-bit doubles, so we need to + * detect if the double may be representable as an int + * for Lua < 5.3 */ +void mp_encode_lua_number(lua_State *L, mp_buf *buf) { lua_Number n = lua_tonumber(L,-1); - if (floor(n) != n) { - mp_encode_double(buf,(double)n); + if (IS_INT64_EQUIVALENT(n)) { + mp_encode_lua_integer(L, buf); } else { - mp_encode_int(buf,(int64_t)n); + mp_encode_double(buf,(double)n); } } -static void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level); +void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level); /* Convert a lua table into a message pack list. */ -static void mp_encode_lua_table_as_array(lua_State *L, mp_buf *buf, int level) { +void mp_encode_lua_table_as_array(lua_State *L, mp_buf *buf, int level) { +#if LUA_VERSION_NUM < 502 size_t len = lua_objlen(L,-1), j; +#else + size_t len = lua_rawlen(L,-1), j; +#endif mp_encode_array(buf,len); for (j = 1; j <= len; j++) { @@ -339,13 +395,13 @@ static void mp_encode_lua_table_as_array(lua_State *L, mp_buf *buf, int level) { } /* Convert a lua table into a message pack key-value map. */ -static void mp_encode_lua_table_as_map(lua_State *L, mp_buf *buf, int level) { +void mp_encode_lua_table_as_map(lua_State *L, mp_buf *buf, int level) { size_t len = 0; /* First step: count keys into table. No other way to do it with the * Lua API, we need to iterate a first time. Note that an alternative * would be to do a single run, and then hack the buffer to insert the - * map opcodes for message pack. Too hachish for this lib. */ + * map opcodes for message pack. Too hackish for this lib. */ lua_pushnil(L); while(lua_next(L,-2)) { lua_pop(L,1); /* remove value, keep key for next iteration. */ @@ -366,80 +422,131 @@ static void mp_encode_lua_table_as_map(lua_State *L, mp_buf *buf, int level) { /* Returns true if the Lua table on top of the stack is exclusively composed * of keys from numerical keys from 1 up to N, with N being the total number * of elements, without any hole in the middle. */ -static int table_is_an_array(lua_State *L) { - long count = 0, max = 0, idx = 0; +int table_is_an_array(lua_State *L) { + int count = 0, max = 0; +#if LUA_VERSION_NUM < 503 lua_Number n; +#else + lua_Integer n; +#endif + + /* Stack top on function entry */ + int stacktop; + + stacktop = lua_gettop(L); lua_pushnil(L); while(lua_next(L,-2)) { /* Stack: ... key value */ lua_pop(L,1); /* Stack: ... key */ - if (lua_type(L,-1) != LUA_TNUMBER) goto not_array; - n = lua_tonumber(L,-1); - idx = n; - if (idx != n || idx < 1) goto not_array; + /* The <= 0 check is valid here because we're comparing indexes. */ +#if LUA_VERSION_NUM < 503 + if ((LUA_TNUMBER != lua_type(L,-1)) || (n = lua_tonumber(L, -1)) <= 0 || + !IS_INT_EQUIVALENT(n)) +#else + if (!lua_isinteger(L,-1) || (n = lua_tointeger(L, -1)) <= 0) +#endif + { + lua_settop(L, stacktop); + return 0; + } + max = (n > max ? n : max); count++; - max = idx; } /* We have the total number of elements in "count". Also we have - * the max index encountered in "idx". We can't reach this code + * the max index encountered in "max". We can't reach this code * if there are indexes <= 0. If you also note that there can not be - * repeated keys into a table, you have that if idx==count you are sure + * repeated keys into a table, you have that if max==count you are sure * that there are all the keys form 1 to count (both included). */ - return idx == count; - -not_array: - lua_pop(L,1); - return 0; + lua_settop(L, stacktop); + return max == count; } /* If the length operator returns non-zero, that is, there is at least * an object at key '1', we serialize to message pack list. Otherwise * we use a map. */ -static void mp_encode_lua_table(lua_State *L, mp_buf *buf, int level) { +void mp_encode_lua_table(lua_State *L, mp_buf *buf, int level) { if (table_is_an_array(L)) mp_encode_lua_table_as_array(L,buf,level); else mp_encode_lua_table_as_map(L,buf,level); } -static void mp_encode_lua_null(lua_State *L, mp_buf *buf) { +void mp_encode_lua_null(lua_State *L, mp_buf *buf) { unsigned char b[1]; + (void)L; b[0] = 0xc0; mp_buf_append(buf,b,1); } -static void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level) { +void mp_encode_lua_type(lua_State *L, mp_buf *buf, int level) { int t = lua_type(L,-1); - /* Limit the encoding of nested tables to a specfiied maximum depth, so that + /* Limit the encoding of nested tables to a specified maximum depth, so that * we survive when called against circular references in tables. */ if (t == LUA_TTABLE && level == LUACMSGPACK_MAX_NESTING) t = LUA_TNIL; switch(t) { case LUA_TSTRING: mp_encode_lua_string(L,buf); break; case LUA_TBOOLEAN: mp_encode_lua_bool(L,buf); break; - case LUA_TNUMBER: mp_encode_lua_number(L,buf); break; + case LUA_TNUMBER: + #if LUA_VERSION_NUM < 503 + mp_encode_lua_number(L,buf); break; + #else + if (lua_isinteger(L, -1)) { + mp_encode_lua_integer(L, buf); + } else { + mp_encode_lua_number(L, buf); + } + break; + #endif case LUA_TTABLE: mp_encode_lua_table(L,buf,level); break; default: mp_encode_lua_null(L,buf); break; } lua_pop(L,1); } -static int mp_pack(lua_State *L) { - mp_buf *buf = mp_buf_new(); - - mp_encode_lua_type(L,buf,0); - lua_pushlstring(L,(char*)buf->b,buf->len); +/* + * Packs all arguments as a stream for multiple upacking later. + * Returns error if no arguments provided. + */ +int mp_pack(lua_State *L) { + int nargs = lua_gettop(L); + int i; + mp_buf *buf; + + if (nargs == 0) + return luaL_argerror(L, 0, "MessagePack pack needs input."); + + buf = mp_buf_new(L); + for(i = 1; i <= nargs; i++) { + /* Copy argument i to top of stack for _encode processing; + * the encode function pops it from the stack when complete. */ + lua_pushvalue(L, i); + + mp_encode_lua_type(L,buf,0); + + lua_pushlstring(L,(char*)buf->b,buf->len); + + /* Reuse the buffer for the next operation by + * setting its free count to the total buffer size + * and the current position to zero. */ + buf->free += buf->len; + buf->len = 0; + } mp_buf_free(buf); + + /* Concatenate all nargs buffers together */ + lua_concat(L, nargs); return 1; } -/* --------------------------------- Decoding --------------------------------- */ +/* ------------------------------- Decoding --------------------------------- */ void mp_decode_to_lua_type(lua_State *L, mp_cur *c); void mp_decode_to_lua_array(lua_State *L, mp_cur *c, size_t len) { + assert(len <= UINT_MAX); int index = 1; lua_newtable(L); @@ -452,6 +559,7 @@ void mp_decode_to_lua_array(lua_State *L, mp_cur *c, size_t len) { } void mp_decode_to_lua_hash(lua_State *L, mp_cur *c, size_t len) { + assert(len <= UINT_MAX); lua_newtable(L); while(len--) { mp_decode_to_lua_type(L,c); /* key */ @@ -466,34 +574,44 @@ void mp_decode_to_lua_hash(lua_State *L, mp_cur *c, size_t len) { * a Lua type, that is left as the only result on the stack. */ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { mp_cur_need(c,1); + + /* If we return more than 18 elements, we must resize the stack to + * fit all our return values. But, there is no way to + * determine how many objects a msgpack will unpack to up front, so + * we request a +1 larger stack on each iteration (noop if stack is + * big enough, and when stack does require resize it doubles in size) */ + luaL_checkstack(L, 1, + "too many return values at once; " + "use unpack_one or unpack_limit instead."); + switch(c->p[0]) { case 0xcc: /* uint 8 */ mp_cur_need(c,2); - lua_pushnumber(L,c->p[1]); + lua_pushunsigned(L,c->p[1]); mp_cur_consume(c,2); break; case 0xd0: /* int 8 */ mp_cur_need(c,2); - lua_pushnumber(L,(char)c->p[1]); + lua_pushinteger(L,(signed char)c->p[1]); mp_cur_consume(c,2); break; case 0xcd: /* uint 16 */ mp_cur_need(c,3); - lua_pushnumber(L, + lua_pushunsigned(L, (c->p[1] << 8) | c->p[2]); mp_cur_consume(c,3); break; case 0xd1: /* int 16 */ mp_cur_need(c,3); - lua_pushnumber(L,(int16_t) + lua_pushinteger(L,(int16_t) (c->p[1] << 8) | c->p[2]); mp_cur_consume(c,3); break; case 0xce: /* uint 32 */ mp_cur_need(c,5); - lua_pushnumber(L, + lua_pushunsigned(L, ((uint32_t)c->p[1] << 24) | ((uint32_t)c->p[2] << 16) | ((uint32_t)c->p[3] << 8) | @@ -502,7 +620,7 @@ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { break; case 0xd2: /* int 32 */ mp_cur_need(c,5); - lua_pushnumber(L, + lua_pushinteger(L, ((int32_t)c->p[1] << 24) | ((int32_t)c->p[2] << 16) | ((int32_t)c->p[3] << 8) | @@ -511,7 +629,7 @@ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { break; case 0xcf: /* uint 64 */ mp_cur_need(c,9); - lua_pushnumber(L, + lua_pushunsigned(L, ((uint64_t)c->p[1] << 56) | ((uint64_t)c->p[2] << 48) | ((uint64_t)c->p[3] << 40) | @@ -524,7 +642,11 @@ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { break; case 0xd3: /* int 64 */ mp_cur_need(c,9); +#if LUA_VERSION_NUM < 503 lua_pushnumber(L, +#else + lua_pushinteger(L, +#endif ((int64_t)c->p[1] << 56) | ((int64_t)c->p[2] << 48) | ((int64_t)c->p[3] << 40) | @@ -569,6 +691,15 @@ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { mp_cur_consume(c,9); } break; + case 0xd9: /* raw 8 */ + mp_cur_need(c,2); + { + size_t l = c->p[1]; + mp_cur_need(c,2+l); + lua_pushlstring(L,(char*)c->p+2,l); + mp_cur_consume(c,2+l); + } + break; case 0xda: /* raw 16 */ mp_cur_need(c,3); { @@ -581,13 +712,14 @@ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { case 0xdb: /* raw 32 */ mp_cur_need(c,5); { - size_t l = (c->p[1] << 24) | - (c->p[2] << 16) | - (c->p[3] << 8) | - c->p[4]; - mp_cur_need(c,5+l); - lua_pushlstring(L,(char*)c->p+5,l); - mp_cur_consume(c,5+l); + size_t l = ((size_t)c->p[1] << 24) | + ((size_t)c->p[2] << 16) | + ((size_t)c->p[3] << 8) | + (size_t)c->p[4]; + mp_cur_consume(c,5); + mp_cur_need(c,l); + lua_pushlstring(L,(char*)c->p,l); + mp_cur_consume(c,l); } break; case 0xdc: /* array 16 */ @@ -601,10 +733,10 @@ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { case 0xdd: /* array 32 */ mp_cur_need(c,5); { - size_t l = (c->p[1] << 24) | - (c->p[2] << 16) | - (c->p[3] << 8) | - c->p[4]; + size_t l = ((size_t)c->p[1] << 24) | + ((size_t)c->p[2] << 16) | + ((size_t)c->p[3] << 8) | + (size_t)c->p[4]; mp_cur_consume(c,5); mp_decode_to_lua_array(L,c,l); } @@ -620,20 +752,20 @@ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { case 0xdf: /* map 32 */ mp_cur_need(c,5); { - size_t l = (c->p[1] << 24) | - (c->p[2] << 16) | - (c->p[3] << 8) | - c->p[4]; + size_t l = ((size_t)c->p[1] << 24) | + ((size_t)c->p[2] << 16) | + ((size_t)c->p[3] << 8) | + (size_t)c->p[4]; mp_cur_consume(c,5); mp_decode_to_lua_hash(L,c,l); } break; default: /* types that can't be idenitified by first byte value. */ if ((c->p[0] & 0x80) == 0) { /* positive fixnum */ - lua_pushnumber(L,c->p[0]); + lua_pushunsigned(L,c->p[0]); mp_cur_consume(c,1); } else if ((c->p[0] & 0xe0) == 0xe0) { /* negative fixnum */ - lua_pushnumber(L,(signed char)c->p[0]); + lua_pushinteger(L,(signed char)c->p[0]); mp_cur_consume(c,1); } else if ((c->p[0] & 0xe0) == 0xa0) { /* fix raw */ size_t l = c->p[0] & 0x1f; @@ -654,54 +786,163 @@ void mp_decode_to_lua_type(lua_State *L, mp_cur *c) { } } -static int mp_unpack(lua_State *L) { +int mp_unpack_full(lua_State *L, int limit, int offset) { size_t len; - const unsigned char *s; - mp_cur *c; + const char *s; + mp_cur c; + int cnt; /* Number of objects unpacked */ + int decode_all = (!limit && !offset); + + s = luaL_checklstring(L,1,&len); /* if no match, exits */ + + if (offset < 0 || limit < 0) /* requesting negative off or lim is invalid */ + return luaL_error(L, + "Invalid request to unpack with offset of %d and limit of %d.", + offset, len); + else if (offset > len) + return luaL_error(L, + "Start offset %d greater than input length %d.", offset, len); + + if (decode_all) limit = INT_MAX; + + mp_cur_init(&c,(const unsigned char *)s+offset,len-offset); - if (!lua_isstring(L,-1)) { - lua_pushstring(L,"MessagePack decoding needs a string as input."); - lua_error(L); + /* We loop over the decode because this could be a stream + * of multiple top-level values serialized together */ + for(cnt = 0; c.left > 0 && cnt < limit; cnt++) { + mp_decode_to_lua_type(L,&c); + + if (c.err == MP_CUR_ERROR_EOF) { + return luaL_error(L,"Missing bytes in input."); + } else if (c.err == MP_CUR_ERROR_BADFMT) { + return luaL_error(L,"Bad data format in input."); + } } - s = (const unsigned char*) lua_tolstring(L,-1,&len); - c = mp_cur_new(s,len); - mp_decode_to_lua_type(L,c); - - if (c->err == MP_CUR_ERROR_EOF) { - mp_cur_free(c); - lua_pushstring(L,"Missing bytes in input."); - lua_error(L); - } else if (c->err == MP_CUR_ERROR_BADFMT) { - mp_cur_free(c); - lua_pushstring(L,"Bad data format in input."); - lua_error(L); - } else if (c->left != 0) { - mp_cur_free(c); - lua_pushstring(L,"Extra bytes in input."); - lua_error(L); + if (!decode_all) { + /* c->left is the remaining size of the input buffer. + * subtract the entire buffer size from the unprocessed size + * to get our next start offset */ + int offset = len - c.left; + /* Return offset -1 when we have have processed the entire buffer. */ + lua_pushinteger(L, c.left == 0 ? -1 : offset); + /* Results are returned with the arg elements still + * in place. Lua takes care of only returning + * elements above the args for us. + * In this case, we have one arg on the stack + * for this function, so we insert our first return + * value at position 2. */ + lua_insert(L, 2); + cnt += 1; /* increase return count by one to make room for offset */ } - mp_cur_free(c); - return 1; + + return cnt; +} + +int mp_unpack(lua_State *L) { + return mp_unpack_full(L, 0, 0); +} + +int mp_unpack_one(lua_State *L) { + int offset = luaL_optinteger(L, 2, 0); + /* Variable pop because offset may not exist */ + lua_pop(L, lua_gettop(L)-1); + return mp_unpack_full(L, 1, offset); +} + +int mp_unpack_limit(lua_State *L) { + int limit = luaL_checkinteger(L, 2); + int offset = luaL_optinteger(L, 3, 0); + /* Variable pop because offset may not exist */ + lua_pop(L, lua_gettop(L)-1); + + return mp_unpack_full(L, limit, offset); } -/* ---------------------------------------------------------------------------- */ +int mp_safe(lua_State *L) { + int argc, err, total_results; + + argc = lua_gettop(L); + + /* This adds our function to the bottom of the stack + * (the "call this function" position) */ + lua_pushvalue(L, lua_upvalueindex(1)); + lua_insert(L, 1); + + err = lua_pcall(L, argc, LUA_MULTRET, 0); + total_results = lua_gettop(L); + + if (!err) { + return total_results; + } else { + lua_pushnil(L); + lua_insert(L,-2); + return 2; + } +} -static const struct luaL_reg thislib[] = { +/* -------------------------------------------------------------------------- */ +const struct luaL_Reg cmds[] = { {"pack", mp_pack}, {"unpack", mp_unpack}, - {NULL, NULL} + {"unpack_one", mp_unpack_one}, + {"unpack_limit", mp_unpack_limit}, + {0} }; -LUALIB_API int luaopen_cmsgpack (lua_State *L) { - luaL_register(L, "cmsgpack", thislib); +int luaopen_create(lua_State *L) { + int i; + /* Manually construct our module table instead of + * relying on _register or _newlib */ + lua_newtable(L); + + for (i = 0; i < (sizeof(cmds)/sizeof(*cmds) - 1); i++) { + lua_pushcfunction(L, cmds[i].func); + lua_setfield(L, -2, cmds[i].name); + } + /* Add metadata */ + lua_pushliteral(L, LUACMSGPACK_NAME); + lua_setfield(L, -2, "_NAME"); lua_pushliteral(L, LUACMSGPACK_VERSION); lua_setfield(L, -2, "_VERSION"); lua_pushliteral(L, LUACMSGPACK_COPYRIGHT); lua_setfield(L, -2, "_COPYRIGHT"); lua_pushliteral(L, LUACMSGPACK_DESCRIPTION); - lua_setfield(L, -2, "_DESCRIPTION"); + lua_setfield(L, -2, "_DESCRIPTION"); + return 1; +} + +LUALIB_API int luaopen_cmsgpack(lua_State *L) { + luaopen_create(L); + +#if LUA_VERSION_NUM < 502 + /* Register name globally for 5.1 */ + lua_pushvalue(L, -1); + lua_setglobal(L, LUACMSGPACK_NAME); +#endif + + return 1; +} + +LUALIB_API int luaopen_cmsgpack_safe(lua_State *L) { + int i; + + luaopen_cmsgpack(L); + + /* Wrap all functions in the safe handler */ + for (i = 0; i < (sizeof(cmds)/sizeof(*cmds) - 1); i++) { + lua_getfield(L, -1, cmds[i].name); + lua_pushcclosure(L, mp_safe, 1); + lua_setfield(L, -2, cmds[i].name); + } + +#if LUA_VERSION_NUM < 502 + /* Register name globally for 5.1 */ + lua_pushvalue(L, -1); + lua_setglobal(L, LUACMSGPACK_SAFE_NAME); +#endif + return 1; } diff --git a/redis.submodule/deps/lua/src/strbuf.c b/redis.submodule/deps/lua/src/strbuf.c index 976925a..f0f7f4b 100644 --- a/redis.submodule/deps/lua/src/strbuf.c +++ b/redis.submodule/deps/lua/src/strbuf.c @@ -1,6 +1,6 @@ -/* strbuf - string buffer routines +/* strbuf - String buffer routines * - * Copyright (c) 2010-2011 Mark Pulford + * Copyright (c) 2010-2012 Mark Pulford * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the @@ -29,7 +29,7 @@ #include "strbuf.h" -void die(const char *fmt, ...) +static void die(const char *fmt, ...) { va_list arg; diff --git a/redis.submodule/deps/lua/src/strbuf.h b/redis.submodule/deps/lua/src/strbuf.h index f856543..d861108 100644 --- a/redis.submodule/deps/lua/src/strbuf.h +++ b/redis.submodule/deps/lua/src/strbuf.h @@ -1,6 +1,6 @@ /* strbuf - String buffer routines * - * Copyright (c) 2010-2011 Mark Pulford + * Copyright (c) 2010-2012 Mark Pulford * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the @@ -62,7 +62,9 @@ extern void strbuf_resize(strbuf_t *s, int len); static int strbuf_empty_length(strbuf_t *s); static int strbuf_length(strbuf_t *s); static char *strbuf_string(strbuf_t *s, int *len); -static void strbuf_ensure_empty_length(strbuf_t *s, int len); +static void strbuf_ensure_empty_length(strbuf_t *s, int len); +static char *strbuf_empty_ptr(strbuf_t *s); +static void strbuf_extend_length(strbuf_t *s, int len); /* Update */ extern void strbuf_append_fmt(strbuf_t *s, int len, const char *fmt, ...); @@ -96,6 +98,16 @@ static inline void strbuf_ensure_empty_length(strbuf_t *s, int len) strbuf_resize(s, s->length + len); } +static inline char *strbuf_empty_ptr(strbuf_t *s) +{ + return s->buf + s->length; +} + +static inline void strbuf_extend_length(strbuf_t *s, int len) +{ + s->length += len; +} + static inline int strbuf_length(strbuf_t *s) { return s->length; diff --git a/redis.submodule/redis.conf b/redis.submodule/redis.conf index 24c1b7a..66126d3 100644 --- a/redis.submodule/redis.conf +++ b/redis.submodule/redis.conf @@ -1,4 +1,9 @@ -# Redis configuration file example +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf # Note on units: when memory size is needed, it is possible to specify # it in the usual form of 1k 5GB 4M and so forth: @@ -15,7 +20,7 @@ ################################## INCLUDES ################################### # Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis server but also need +# have a standard template that goes to all Redis servers but also need # to customize a few per-server settings. Include files can include # other files, so use this wisely. # @@ -131,7 +136,7 @@ databases 16 # after 300 sec (5 min) if at least 10 keys changed # after 60 sec if at least 10000 keys changed # -# Note: you can disable saving at all commenting all the "save" lines. +# Note: you can disable saving completely by commenting out all "save" lines. # # It is also possible to remove all the previously configured save # points by adding a save directive with a single empty string argument @@ -180,9 +185,9 @@ dbfilename dump.rdb # # The DB will be written inside this directory, with the filename specified # above using the 'dbfilename' configuration directive. -# +# # The Append Only File will also be created inside this directory. -# +# # Note that you must specify a directory here, not a file name. dir ./ @@ -240,6 +245,49 @@ slave-serve-stale-data yes # administrative / dangerous commands. slave-read-only yes +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + # Slaves send PINGs to server in a predefined interval. It's possible to change # this interval with the repl_ping_slave_period option. The default value is 10 # seconds. @@ -279,7 +327,7 @@ repl-disable-tcp-nodelay no # resync is enough, just passing the portion of data the slave missed while # disconnected. # -# The biggest the replication backlog, the longer the time the slave can be +# The bigger the replication backlog, the longer the time the slave can be # disconnected and later be able to perform a partial resynchronization. # # The backlog is only allocated once there is at least a slave connected. @@ -318,7 +366,7 @@ slave-priority 100 # The lag in seconds, that must be <= the specified value, is calculated from # the last ping received from the slave, that is usually sent every second. # -# This option does not GUARANTEES that N replicas will accept the write, but +# This option does not GUARANTEE that N replicas will accept the write, but # will limit the window of exposure for lost writes in case not enough slaves # are available, to the specified number of seconds. # @@ -340,7 +388,7 @@ slave-priority 100 # # This should stay commented out for backward compatibility and because most # people do not need auth (e.g. they run their own servers). -# +# # Warning: since Redis is pretty fast an outside user can try up to # 150k passwords per second against a good box. This means that you should # use a very strong password otherwise it will be very easy to break. @@ -406,18 +454,18 @@ slave-priority 100 # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory # is reached. You can select among five behaviors: -# +# # volatile-lru -> remove the key with an expire set using an LRU algorithm -# allkeys-lru -> remove any key accordingly to the LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations -# +# # Note: with any of the above policies, Redis will return an error on write -# operations, when there are not suitable keys for eviction. +# operations, when there are no suitable keys for eviction. # -# At the date of writing this commands are: set setnx setex append +# At the date of writing these commands are: set setnx setex append # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby @@ -425,15 +473,18 @@ slave-priority 100 # # The default is: # -# maxmemory-policy volatile-lru +# maxmemory-policy noeviction # LRU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can select as well the sample -# size to check. For instance for default Redis will check three keys and -# pick the one that was used less recently, you can change the sample size -# using the following configuration directive. +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. # -# maxmemory-samples 3 +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs a bit more CPU. 3 is very fast but not very accurate. +# +# maxmemory-samples 5 ############################## APPEND ONLY MODE ############################### @@ -462,13 +513,13 @@ appendonly no appendfilename "appendonly.aof" # The fsync() call tells the Operating System to actually write data on disk -# instead to wait for more data in the output buffer. Some OS will really flush +# instead of waiting for more data in the output buffer. Some OS will really flush # data on disk, some other OS will just try to do it ASAP. # # Redis supports three different modes: # # no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log . Slow, Safest. +# always: fsync after every write to the append only log. Slow, Safest. # everysec: fsync only one time every second. Compromise. # # The default is "everysec", as that's usually the right compromise between @@ -503,7 +554,7 @@ appendfsync everysec # the same as "appendfsync none". In practical terms, this means that it is # possible to lose up to 30 seconds of log in the worst scenario (with the # default Linux settings). -# +# # If you have latency problems turn this to "yes". Otherwise leave it as # "no" that is the safest pick from the point of view of durability. @@ -512,7 +563,7 @@ no-appendfsync-on-rewrite no # Automatic rewrite of the append only file. # Redis is able to automatically rewrite the log file implicitly calling # BGREWRITEAOF when the AOF log size grows by the specified percentage. -# +# # This is how it works: Redis remembers the size of the AOF file after the # latest rewrite (if no rewrite has happened since the restart, the size of # the AOF at startup is used). @@ -561,16 +612,124 @@ aof-load-truncated yes # still in execution after the maximum allowed time and will start to # reply to queries with an error. # -# When a long running script exceed the maximum execution time only the +# When a long running script exceeds the maximum execution time only the # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be # used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write commands was -# already issue by the script but the user don't want to wait for the natural +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural # termination of the script. # # Set it to 0 or a negative value for unlimited execution without warnings. lua-time-limit 5000 +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + ################################## SLOW LOG ################################### # The Redis Slow Log is a system to log queries that exceeded a specified @@ -579,7 +738,7 @@ lua-time-limit 5000 # but just the time needed to actually execute the command (this is the only # stage of command execution where the thread is blocked and can not serve # other requests in the meantime). -# +# # You can configure the slow log with two parameters: one tells Redis # what is the execution time, in microseconds, to exceed in order for the # command to get logged, and the other parameter is the length of the @@ -612,15 +771,15 @@ slowlog-max-len 128 # By default latency monitoring is disabled since it is mostly not needed # if you don't have latency issues, and collecting data has a performance # impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enalbed at runtime using the command +# monitoring can easily be enabled at runtime using the command # "CONFIG SET latency-monitor-threshold " if needed. latency-monitor-threshold 0 -############################# Event notification ############################## +############################# EVENT NOTIFICATION ############################## # Redis can notify Pub/Sub clients about events happening in the key space. # This feature is documented at http://redis.io/topics/notifications -# +# # For instance if keyspace events notification is enabled, and a client # performs a DEL operation on key "foo" stored in the Database 0, two # messages will be published via Pub/Sub: @@ -644,8 +803,8 @@ latency-monitor-threshold 0 # A Alias for g$lshzxe, so that the "AKE" string means all the events. # # The "notify-keyspace-events" takes as argument a string that is composed -# by zero or multiple characters. The empty string means that notifications -# are disabled at all. +# of zero or multiple characters. The empty string means that notifications +# are disabled. # # Example: to enable list and generic events, from the point of view of the # event name, use: @@ -677,7 +836,7 @@ list-max-ziplist-entries 512 list-max-ziplist-value 64 # Sets have a special encoding in just one case: when a set is composed -# of just strings that happens to be integers in radix 10 in the range +# of just strings that happen to be integers in radix 10 in the range # of 64 bit signed integers. # The following configuration setting sets the limit in the size of the # set in order to use this special memory saving encoding. @@ -695,7 +854,7 @@ zset-max-ziplist-value 64 # # A value greater than 16000 is totally useless, since at that point the # dense representation is more memory efficient. -# +# # The suggested value is ~ 3000 in order to have the benefits of # the space efficient encoding without slowing down too much PFADD, # which is O(N) with the sparse encoding. The value can be raised to @@ -710,13 +869,13 @@ hll-sparse-max-bytes 3000 # that is rehashing, the more rehashing "steps" are performed, so if the # server is idle the rehashing is never complete and some more memory is used # by the hash table. -# +# # The default is to use this millisecond 10 times every second in order to -# active rehashing the main dictionaries, freeing memory when possible. +# actively rehash the main dictionaries, freeing memory when possible. # # If unsure: # use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply form time to time +# not a good thing in your environment that Redis can reply from time to time # to queries with 2 milliseconds delay. # # use "activerehashing yes" if you don't have such hard requirements but @@ -765,7 +924,7 @@ client-output-buffer-limit pubsub 32mb 8mb 60 # never requested, and so forth. # # Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform accordingly to the specified "hz" value. +# tasks to perform according to the specified "hz" value. # # By default "hz" is set to 10. Raising the value will use more CPU when # Redis is idle, but at the same time will make Redis more responsive when @@ -782,4 +941,3 @@ hz 10 # in order to commit the file to the disk more incrementally and avoid # big latency spikes. aof-rewrite-incremental-fsync yes - diff --git a/redis.submodule/sentinel.conf b/redis.submodule/sentinel.conf index 4b3b792..39d1044 100644 --- a/redis.submodule/sentinel.conf +++ b/redis.submodule/sentinel.conf @@ -29,7 +29,7 @@ port 26379 # dir # Every long running process should have a well-defined working directory. # For Redis Sentinel to chdir to /tmp at startup is the simplest thing -# for the process to don't interferer with administrative tasks such as +# for the process to don't interfere with administrative tasks such as # unmounting filesystems. dir /tmp diff --git a/redis.submodule/src/Makefile b/redis.submodule/src/Makefile index 8b3e959..bfb5d47 100644 --- a/redis.submodule/src/Makefile +++ b/redis.submodule/src/Makefile @@ -58,7 +58,7 @@ ifeq ($(uname_S),SunOS) # SunOS INSTALL=cp -pf FINAL_CFLAGS+= -D__EXTENSIONS__ -D_XPG6 - FINAL_LIBS+= -ldl -lnsl -lsocket -lresolv -lpthread + FINAL_LIBS+= -ldl -lnsl -lsocket -lresolv -lpthread -lrt else ifeq ($(uname_S),Darwin) # Darwin (nothing to do) @@ -113,7 +113,7 @@ endif REDIS_SERVER_NAME=redis-server REDIS_SENTINEL_NAME=redis-sentinel -REDIS_SERVER_OBJ=adlist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o migrate.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o hyperloglog.o latency.o sparkline.o +REDIS_SERVER_OBJ=adlist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o REDIS_CLI_NAME=redis-cli REDIS_CLI_OBJ=anet.o sds.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o REDIS_BENCHMARK_NAME=redis-benchmark @@ -251,3 +251,4 @@ install: all $(REDIS_INSTALL) $(REDIS_CLI_NAME) $(INSTALL_BIN) $(REDIS_INSTALL) $(REDIS_CHECK_DUMP_NAME) $(INSTALL_BIN) $(REDIS_INSTALL) $(REDIS_CHECK_AOF_NAME) $(INSTALL_BIN) + @ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_SENTINEL_NAME) diff --git a/redis.submodule/src/Makefile.dep b/redis.submodule/src/Makefile.dep index 1f24232..33e8913 100644 --- a/redis.submodule/src/Makefile.dep +++ b/redis.submodule/src/Makefile.dep @@ -1,132 +1,142 @@ adlist.o: adlist.c adlist.h zmalloc.h -ae.o: ae.c ae.h zmalloc.h config.h ae_kqueue.c ae_select.c ae_evport.c ae_epoll.c +ae.o: ae.c ae.h zmalloc.h config.h ae_kqueue.c ae_epoll.c ae_select.c ae_evport.c ae_epoll.o: ae_epoll.c ae_evport.o: ae_evport.c ae_kqueue.o: ae_kqueue.c ae_select.o: ae_select.c anet.o: anet.c fmacros.h anet.h aof.o: aof.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ - bio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ + bio.h bio.o: bio.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ - bio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ + bio.h bitops.o: bitops.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h +blocked.o: blocked.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h +cluster.o: cluster.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ + cluster.h endianconv.h config.o: config.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ + cluster.h +crc16.o: crc16.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h crc64.o: crc64.c db.o: db.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ + cluster.h debug.o: debug.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ - sha1.h crc64.h bio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ + sha1.h crc64.h bio.h dict.o: dict.c fmacros.h dict.h zmalloc.h redisassert.h endianconv.o: endianconv.c hyperloglog.o: hyperloglog.c redis.h fmacros.h config.h \ - ../deps/lua/src/lua.h ../deps/lua/src/luaconf.h ae.h sds.h dict.h \ - adlist.h zmalloc.h anet.h ziplist.h intset.h version.h util.h \ - latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/lua.h ../deps/lua/src/luaconf.h ae.h sds.h dict.h \ + adlist.h zmalloc.h anet.h ziplist.h intset.h version.h util.h latency.h \ + sparkline.h rdb.h rio.h intset.o: intset.c intset.h zmalloc.h endianconv.h config.h latency.o: latency.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h lzf_c.o: lzf_c.c lzfP.h lzf_d.o: lzf_d.c lzfP.h memtest.o: memtest.c config.h -migrate.o: migrate.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ - endianconv.h multi.o: multi.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h networking.o: networking.c redis.h fmacros.h config.h \ - ../deps/lua/src/lua.h ../deps/lua/src/luaconf.h ae.h sds.h dict.h \ - adlist.h zmalloc.h anet.h ziplist.h intset.h version.h util.h \ - latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/lua.h ../deps/lua/src/luaconf.h ae.h sds.h dict.h \ + adlist.h zmalloc.h anet.h ziplist.h intset.h version.h util.h latency.h \ + sparkline.h rdb.h rio.h notify.o: notify.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h object.o: object.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h pqsort.o: pqsort.c pubsub.o: pubsub.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h rand.o: rand.c rdb.o: rdb.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ - lzf.h zipmap.h endianconv.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ + lzf.h zipmap.h endianconv.h redis-benchmark.o: redis-benchmark.c fmacros.h ae.h \ - ../deps/hiredis/hiredis.h sds.h adlist.h zmalloc.h + ../deps/hiredis/hiredis.h sds.h adlist.h zmalloc.h redis-check-aof.o: redis-check-aof.c fmacros.h config.h redis-check-dump.o: redis-check-dump.c lzf.h crc64.h redis-cli.o: redis-cli.c fmacros.h version.h ../deps/hiredis/hiredis.h \ - sds.h zmalloc.h ../deps/linenoise/linenoise.h help.h anet.h ae.h + sds.h zmalloc.h ../deps/linenoise/linenoise.h help.h anet.h ae.h redis.o: redis.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ - slowlog.h bio.h asciilogo.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ + cluster.h slowlog.h bio.h asciilogo.h release.o: release.c release.h version.h crc64.h replication.o: replication.c redis.h fmacros.h config.h \ - ../deps/lua/src/lua.h ../deps/lua/src/luaconf.h ae.h sds.h dict.h \ - adlist.h zmalloc.h anet.h ziplist.h intset.h version.h util.h \ - latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/lua.h ../deps/lua/src/luaconf.h ae.h sds.h dict.h \ + adlist.h zmalloc.h anet.h ziplist.h intset.h version.h util.h latency.h \ + sparkline.h rdb.h rio.h rio.o: rio.c fmacros.h rio.h sds.h util.h crc64.h config.h redis.h \ - ../deps/lua/src/lua.h ../deps/lua/src/luaconf.h ae.h dict.h adlist.h \ - zmalloc.h anet.h ziplist.h intset.h version.h latency.h sparkline.h \ - rdb.h + ../deps/lua/src/lua.h ../deps/lua/src/luaconf.h ae.h dict.h adlist.h \ + zmalloc.h anet.h ziplist.h intset.h version.h latency.h sparkline.h \ + rdb.h scripting.o: scripting.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ - sha1.h rand.h ../deps/lua/src/lauxlib.h ../deps/lua/src/lualib.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ + sha1.h rand.h ../deps/lua/src/lauxlib.h ../deps/lua/src/lua.h \ + ../deps/lua/src/lualib.h sds.o: sds.c sds.h zmalloc.h sentinel.o: sentinel.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ - ../deps/hiredis/hiredis.h ../deps/hiredis/async.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ + ../deps/hiredis/hiredis.h ../deps/hiredis/async.h \ + ../deps/hiredis/hiredis.h setproctitle.o: setproctitle.c sha1.o: sha1.c sha1.h config.h slowlog.o: slowlog.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ - slowlog.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ + slowlog.h sort.o: sort.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ - pqsort.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h \ + pqsort.h sparkline.o: sparkline.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h syncio.o: syncio.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h t_hash.o: t_hash.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h t_list.o: t_list.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h t_set.o: t_set.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h t_string.o: t_string.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h t_zset.o: t_zset.c redis.h fmacros.h config.h ../deps/lua/src/lua.h \ - ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ - ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h + ../deps/lua/src/luaconf.h ae.h sds.h dict.h adlist.h zmalloc.h anet.h \ + ziplist.h intset.h version.h util.h latency.h sparkline.h rdb.h rio.h util.o: util.c fmacros.h util.h sds.h ziplist.o: ziplist.c zmalloc.h util.h sds.h ziplist.h endianconv.h \ - config.h redisassert.h + config.h redisassert.h zipmap.o: zipmap.c zmalloc.h endianconv.h config.h zmalloc.o: zmalloc.c config.h zmalloc.h diff --git a/redis.submodule/src/adlist.c b/redis.submodule/src/adlist.c index b4dba42..b4cc785 100644 --- a/redis.submodule/src/adlist.c +++ b/redis.submodule/src/adlist.c @@ -71,7 +71,7 @@ void listRelease(list *list) zfree(list); } -/* Add a new node to the list, to head, contaning the specified 'value' +/* Add a new node to the list, to head, containing the specified 'value' * pointer as value. * * On error, NULL is returned and no operation is performed (i.e. the diff --git a/redis.submodule/src/anet.c b/redis.submodule/src/anet.c index f36575b..0f6da4b 100644 --- a/redis.submodule/src/anet.c +++ b/redis.submodule/src/anet.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -57,24 +58,37 @@ static void anetSetError(char *err, const char *fmt, ...) va_end(ap); } -int anetNonBlock(char *err, int fd) -{ +int anetSetBlock(char *err, int fd, int non_block) { int flags; - /* Set the socket non-blocking. + /* Set the socket blocking (if non_block is zero) or non-blocking. * Note that fcntl(2) for F_GETFL and F_SETFL can't be * interrupted by a signal. */ if ((flags = fcntl(fd, F_GETFL)) == -1) { anetSetError(err, "fcntl(F_GETFL): %s", strerror(errno)); return ANET_ERR; } - if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) { + + if (non_block) + flags |= O_NONBLOCK; + else + flags &= ~O_NONBLOCK; + + if (fcntl(fd, F_SETFL, flags) == -1) { anetSetError(err, "fcntl(F_SETFL,O_NONBLOCK): %s", strerror(errno)); return ANET_ERR; } return ANET_OK; } +int anetNonBlock(char *err, int fd) { + return anetSetBlock(err,fd,1); +} + +int anetBlock(char *err, int fd) { + return anetSetBlock(err,fd,0); +} + /* Set TCP keep alive option to detect dead peers. The interval option * is only used for Linux as we are using Linux-specific APIs to set * the probe send time, interval, and count. */ @@ -165,6 +179,20 @@ int anetTcpKeepAlive(char *err, int fd) return ANET_OK; } +/* Set the socket send timeout (SO_SNDTIMEO socket option) to the specified + * number of milliseconds, or disable it if the 'ms' argument is zero. */ +int anetSendTimeout(char *err, int fd, long long ms) { + struct timeval tv; + + tv.tv_sec = ms/1000; + tv.tv_usec = (ms%1000)*1000; + if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)) == -1) { + anetSetError(err, "setsockopt SO_SNDTIMEO: %s", strerror(errno)); + return ANET_ERR; + } + return ANET_OK; +} + /* anetGenericResolve() is called by anetResolve() and anetResolveIP() to * do the actual work. It resolves the hostname "host" and set the string * representation of the IP address into the buffer pointed by "ipbuf". @@ -236,11 +264,13 @@ static int anetCreateSocket(char *err, int domain) { #define ANET_CONNECT_NONE 0 #define ANET_CONNECT_NONBLOCK 1 -static int anetTcpGenericConnect(char *err, char *addr, int port, int flags) +#define ANET_CONNECT_BE_BINDING 2 /* Best effort binding. */ +static int anetTcpGenericConnect(char *err, char *addr, int port, + char *source_addr, int flags) { int s = ANET_ERR, rv; char portstr[6]; /* strlen("65535") + 1; */ - struct addrinfo hints, *servinfo, *p; + struct addrinfo hints, *servinfo, *bservinfo, *p, *b; snprintf(portstr,sizeof(portstr),"%d",port); memset(&hints,0,sizeof(hints)); @@ -260,6 +290,26 @@ static int anetTcpGenericConnect(char *err, char *addr, int port, int flags) if (anetSetReuseAddr(err,s) == ANET_ERR) goto error; if (flags & ANET_CONNECT_NONBLOCK && anetNonBlock(err,s) != ANET_OK) goto error; + if (source_addr) { + int bound = 0; + /* Using getaddrinfo saves us from self-determining IPv4 vs IPv6 */ + if ((rv = getaddrinfo(source_addr, NULL, &hints, &bservinfo)) != 0) + { + anetSetError(err, "%s", gai_strerror(rv)); + goto error; + } + for (b = bservinfo; b != NULL; b = b->ai_next) { + if (bind(s,b->ai_addr,b->ai_addrlen) != -1) { + bound = 1; + break; + } + } + freeaddrinfo(bservinfo); + if (!bound) { + anetSetError(err, "bind: %s", strerror(errno)); + goto error; + } + } if (connect(s,p->ai_addr,p->ai_addrlen) == -1) { /* If the socket is non-blocking, it is ok for connect() to * return an EINPROGRESS error here. */ @@ -282,19 +332,41 @@ static int anetTcpGenericConnect(char *err, char *addr, int port, int flags) close(s); s = ANET_ERR; } + end: freeaddrinfo(servinfo); - return s; + + /* Handle best effort binding: if a binding address was used, but it is + * not possible to create a socket, try again without a binding address. */ + if (s == ANET_ERR && source_addr && (flags & ANET_CONNECT_BE_BINDING)) { + return anetTcpGenericConnect(err,addr,port,NULL,flags); + } else { + return s; + } } int anetTcpConnect(char *err, char *addr, int port) { - return anetTcpGenericConnect(err,addr,port,ANET_CONNECT_NONE); + return anetTcpGenericConnect(err,addr,port,NULL,ANET_CONNECT_NONE); } int anetTcpNonBlockConnect(char *err, char *addr, int port) { - return anetTcpGenericConnect(err,addr,port,ANET_CONNECT_NONBLOCK); + return anetTcpGenericConnect(err,addr,port,NULL,ANET_CONNECT_NONBLOCK); +} + +int anetTcpNonBlockBindConnect(char *err, char *addr, int port, + char *source_addr) +{ + return anetTcpGenericConnect(err,addr,port,source_addr, + ANET_CONNECT_NONBLOCK); +} + +int anetTcpNonBlockBestEffortBindConnect(char *err, char *addr, int port, + char *source_addr) +{ + return anetTcpGenericConnect(err,addr,port,source_addr, + ANET_CONNECT_NONBLOCK|ANET_CONNECT_BE_BINDING); } int anetUnixGenericConnect(char *err, char *path, int flags) @@ -503,22 +575,36 @@ int anetPeerToString(int fd, char *ip, size_t ip_len, int *port) { struct sockaddr_storage sa; socklen_t salen = sizeof(sa); - if (getpeername(fd,(struct sockaddr*)&sa,&salen) == -1) { - if (port) *port = 0; - ip[0] = '?'; - ip[1] = '\0'; - return -1; - } + if (getpeername(fd,(struct sockaddr*)&sa,&salen) == -1) goto error; + if (ip_len == 0) goto error; + if (sa.ss_family == AF_INET) { struct sockaddr_in *s = (struct sockaddr_in *)&sa; if (ip) inet_ntop(AF_INET,(void*)&(s->sin_addr),ip,ip_len); if (port) *port = ntohs(s->sin_port); - } else { + } else if (sa.ss_family == AF_INET6) { struct sockaddr_in6 *s = (struct sockaddr_in6 *)&sa; if (ip) inet_ntop(AF_INET6,(void*)&(s->sin6_addr),ip,ip_len); if (port) *port = ntohs(s->sin6_port); + } else if (sa.ss_family == AF_UNIX) { + if (ip) strncpy(ip,"/unixsocket",ip_len); + if (port) *port = 0; + } else { + goto error; } return 0; + +error: + if (ip) { + if (ip_len >= 2) { + ip[0] = '?'; + ip[1] = '\0'; + } else if (ip_len == 1) { + ip[0] = '\0'; + } + } + if (port) *port = 0; + return -1; } int anetSockName(int fd, char *ip, size_t ip_len, int *port) { diff --git a/redis.submodule/src/anet.h b/redis.submodule/src/anet.h index c4248b9..ac5c36e 100644 --- a/redis.submodule/src/anet.h +++ b/redis.submodule/src/anet.h @@ -49,6 +49,8 @@ int anetTcpConnect(char *err, char *addr, int port); int anetTcpNonBlockConnect(char *err, char *addr, int port); +int anetTcpNonBlockBindConnect(char *err, char *addr, int port, char *source_addr); +int anetTcpNonBlockBestEffortBindConnect(char *err, char *addr, int port, char *source_addr); int anetUnixConnect(char *err, char *path); int anetUnixNonBlockConnect(char *err, char *path); int anetRead(int fd, char *buf, int count); @@ -61,9 +63,11 @@ int anetTcpAccept(char *err, int serversock, char *ip, size_t ip_len, int *port) int anetUnixAccept(char *err, int serversock); int anetWrite(int fd, char *buf, int count); int anetNonBlock(char *err, int fd); +int anetBlock(char *err, int fd); int anetEnableTcpNoDelay(char *err, int fd); int anetDisableTcpNoDelay(char *err, int fd); int anetTcpKeepAlive(char *err, int fd); +int anetSendTimeout(char *err, int fd, long long ms); int anetPeerToString(int fd, char *ip, size_t ip_len, int *port); int anetKeepAlive(char *err, int fd, int interval); int anetSockName(int fd, char *ip, size_t ip_len, int *port); diff --git a/redis.submodule/src/aof.c b/redis.submodule/src/aof.c index 0f9682c..806a7d0 100644 --- a/redis.submodule/src/aof.c +++ b/redis.submodule/src/aof.c @@ -40,6 +40,7 @@ #include void aofUpdateCurrentSize(void); +void aofClosePipes(void); /* ---------------------------------------------------------------------------- * AOF rewrite buffer implementation. @@ -73,18 +74,51 @@ void aofRewriteBufferReset(void) { listSetFreeMethod(server.aof_rewrite_buf_blocks,zfree); } -/* Return the current size of the AOF rerwite buffer. */ +/* Return the current size of the AOF rewrite buffer. */ unsigned long aofRewriteBufferSize(void) { - listNode *ln = listLast(server.aof_rewrite_buf_blocks); - aofrwblock *block = ln ? ln->value : NULL; + listNode *ln; + listIter li; + unsigned long size = 0; - if (block == NULL) return 0; - unsigned long size = - (listLength(server.aof_rewrite_buf_blocks)-1) * AOF_RW_BUF_BLOCK_SIZE; - size += block->used; + listRewind(server.aof_rewrite_buf_blocks,&li); + while((ln = listNext(&li))) { + aofrwblock *block = listNodeValue(ln); + size += block->used; + } return size; } +/* Event handler used to send data to the child process doing the AOF + * rewrite. We send pieces of our AOF differences buffer so that the final + * write when the child finishes the rewrite will be small. */ +void aofChildWriteDiffData(aeEventLoop *el, int fd, void *privdata, int mask) { + listNode *ln; + aofrwblock *block; + ssize_t nwritten; + REDIS_NOTUSED(el); + REDIS_NOTUSED(fd); + REDIS_NOTUSED(privdata); + REDIS_NOTUSED(mask); + + while(1) { + ln = listFirst(server.aof_rewrite_buf_blocks); + block = ln ? ln->value : NULL; + if (server.aof_stop_sending_diff || !block) { + aeDeleteFileEvent(server.el,server.aof_pipe_write_data_to_child, + AE_WRITABLE); + return; + } + if (block->used > 0) { + nwritten = write(server.aof_pipe_write_data_to_child, + block->buf,block->used); + if (nwritten <= 0) return; + memmove(block->buf,block->buf+nwritten,block->used-nwritten); + block->used -= nwritten; + } + if (block->used == 0) listDelNode(server.aof_rewrite_buf_blocks,ln); + } +} + /* Append data to the AOF rewrite buffer, allocating new blocks if needed. */ void aofRewriteBufferAppend(unsigned char *s, unsigned long len) { listNode *ln = listLast(server.aof_rewrite_buf_blocks); @@ -123,6 +157,13 @@ void aofRewriteBufferAppend(unsigned char *s, unsigned long len) { } } } + + /* Install a file event to send data to the rewrite child if there is + * not one already. */ + if (aeGetFileEvents(server.el,server.aof_pipe_write_data_to_child) == 0) { + aeCreateFileEvent(server.el, server.aof_pipe_write_data_to_child, + AE_WRITABLE, aofChildWriteDiffData, NULL); + } } /* Write the buffer (possibly composed of multiple blocks) into the specified @@ -184,6 +225,8 @@ void stopAppendOnly(void) { aofRemoveTempFile(server.aof_child_pid); server.aof_child_pid = -1; server.aof_rewrite_time_start = -1; + /* close pipes used for IPC between the two processes. */ + aofClosePipes(); } } @@ -202,7 +245,7 @@ int startAppendOnly(void) { redisLog(REDIS_WARNING,"Redis needs to enable the AOF but can't trigger a background AOF rewrite operation. Check the above logs for more info about the error."); return REDIS_ERR; } - /* We correctly switched on AOF, now wait for the rerwite to be complete + /* We correctly switched on AOF, now wait for the rewrite to be complete * in order to append data on disk. */ server.aof_state = REDIS_AOF_WAIT_REWRITE; return REDIS_OK; @@ -243,7 +286,7 @@ void flushAppendOnlyFile(int force) { * the write for a couple of seconds. */ if (sync_in_progress) { if (server.aof_flush_postponed_start == 0) { - /* No previous write postponinig, remember that we are + /* No previous write postponing, remember that we are * postponing the flush and return. */ server.aof_flush_postponed_start = server.unixtime; return; @@ -294,7 +337,7 @@ void flushAppendOnlyFile(int force) { last_write_error_log = server.unixtime; } - /* Lof the AOF write error and record the error code. */ + /* Log the AOF write error and record the error code. */ if (nwritten == -1) { if (can_log) { redisLog(REDIS_WARNING,"Error writing to the AOF file: %s", @@ -318,7 +361,7 @@ void flushAppendOnlyFile(int force) { "ftruncate: %s", strerror(errno)); } } else { - /* If the ftrunacate() succeeded we can set nwritten to + /* If the ftruncate() succeeded we can set nwritten to * -1 since there is no longer partial data into the AOF. */ nwritten = -1; } @@ -330,7 +373,7 @@ void flushAppendOnlyFile(int force) { /* We can't recover when the fsync policy is ALWAYS since the * reply for the client is already in the output buffers, and we * have the contract with the user that on acknowledged write data - * is synched on disk. */ + * is synced on disk. */ redisLog(REDIS_WARNING,"Can't recover from AOF write error when the AOF fsync policy is 'always'. Exiting..."); exit(1); } else { @@ -425,7 +468,7 @@ sds catAppendOnlyExpireAtCommand(sds buf, struct redisCommand *cmd, robj *key, r long long when; robj *argv[3]; - /* Make sure we can use strtol */ + /* Make sure we can use strtoll */ seconds = getDecodedObject(seconds); when = strtoll(seconds->ptr,NULL,10); /* Convert argument into milliseconds for EXPIRE, SETEX, EXPIREAT */ @@ -456,7 +499,7 @@ void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int a robj *tmpargv[3]; /* The DB this command was targeting is not the same as the last command - * we appendend. To issue a SELECT command is needed. */ + * we appended. To issue a SELECT command is needed. */ if (dictid != server.aof_selected_db) { char seldb[64]; @@ -519,6 +562,7 @@ struct redisClient *createFakeClient(void) { c->argv = NULL; c->bufpos = 0; c->flags = 0; + c->btype = REDIS_BLOCKED_NONE; /* We set the fake client as a slave waiting for the synchronization * so that Redis will not try to send replies to this client. */ c->replstate = REDIS_REPL_WAIT_BGSAVE_START; @@ -549,7 +593,7 @@ void freeFakeClient(struct redisClient *c) { zfree(c); } -/* Replay the append log file. On error REDIS_OK is returned. On non fatal +/* Replay the append log file. On success REDIS_OK is returned. On non fatal * error (the append only file is zero-length) REDIS_ERR is returned. On * fatal error an error message is logged and the program exists. */ int loadAppendOnlyFile(char *filename) { @@ -714,7 +758,7 @@ int rioWriteBulkObject(rio *r, robj *obj) { * in a child process when this function is called). */ if (obj->encoding == REDIS_ENCODING_INT) { return rioWriteBulkLongLong(r,(long)obj->ptr); - } else if (obj->encoding == REDIS_ENCODING_RAW) { + } else if (sdsEncodedObject(obj)) { return rioWriteBulkString(r,obj->ptr,sdslen(obj->ptr)); } else { redisPanic("Unknown string encoding"); @@ -952,6 +996,21 @@ int rewriteHashObject(rio *r, robj *key, robj *o) { return 1; } +/* This function is called by the child rewriting the AOF file to read + * the difference accumulated from the parent into a buffer, that is + * concatenated at the end of the rewrite. */ +ssize_t aofReadDiffFromParent(void) { + char buf[65536]; /* Default pipe buffer size on most Linux systems. */ + ssize_t nread, total = 0; + + while ((nread = + read(server.aof_pipe_read_data_from_parent,buf,sizeof(buf))) > 0) { + server.aof_child_diff = sdscatlen(server.aof_child_diff,buf,nread); + total += nread; + } + return total; +} + /* Write a sequence of commands able to fully rebuild the dataset into * "filename". Used both by REWRITEAOF and BGREWRITEAOF. * @@ -967,6 +1026,8 @@ int rewriteAppendOnlyFile(char *filename) { char tmpfile[256]; int j; long long now = mstime(); + char byte; + size_t processed = 0; /* Note that we have to use a different temp name here compared to the * one used by rewriteAppendOnlyFileBackground() function. */ @@ -977,6 +1038,7 @@ int rewriteAppendOnlyFile(char *filename) { return REDIS_ERR; } + server.aof_child_diff = sdsempty(); rioInitWithFile(&aof,fp); if (server.aof_rewrite_incremental_fsync) rioSetAutoSync(&aof,REDIS_AOF_AUTOSYNC_BYTES); @@ -1036,10 +1098,61 @@ int rewriteAppendOnlyFile(char *filename) { if (rioWriteBulkObject(&aof,&key) == 0) goto werr; if (rioWriteBulkLongLong(&aof,expiretime) == 0) goto werr; } + /* Read some diff from the parent process from time to time. */ + if (aof.processed_bytes > processed+1024*10) { + processed = aof.processed_bytes; + aofReadDiffFromParent(); + } } dictReleaseIterator(di); + di = NULL; } + /* Do an initial slow fsync here while the parent is still sending + * data, in order to make the next final fsync faster. */ + if (fflush(fp) == EOF) goto werr; + if (fsync(fileno(fp)) == -1) goto werr; + + /* Read again a few times to get more data from the parent. + * We can't read forever (the server may receive data from clients + * faster than it is able to send data to the child), so we try to read + * some more data in a loop as soon as there is a good chance more data + * will come. If it looks like we are wasting time, we abort (this + * happens after 20 ms without new data). */ + int nodata = 0; + mstime_t start = mstime(); + while(mstime()-start < 1000 && nodata < 20) { + if (aeWait(server.aof_pipe_read_data_from_parent, AE_READABLE, 1) <= 0) + { + nodata++; + continue; + } + nodata = 0; /* Start counting from zero, we stop on N *contiguous* + timeouts. */ + aofReadDiffFromParent(); + } + + /* Ask the master to stop sending diffs. */ + if (write(server.aof_pipe_write_ack_to_parent,"!",1) != 1) goto werr; + if (anetNonBlock(NULL,server.aof_pipe_read_ack_from_parent) != ANET_OK) + goto werr; + /* We read the ACK from the server using a 10 seconds timeout. Normally + * it should reply ASAP, but just in case we lose its reply, we are sure + * the child will eventually get terminated. */ + if (syncRead(server.aof_pipe_read_ack_from_parent,&byte,1,5000) != 1 || + byte != '!') goto werr; + redisLog(REDIS_NOTICE,"Parent agreed to stop sending diffs. Finalizing AOF..."); + + /* Read the final diff if any. */ + aofReadDiffFromParent(); + + /* Write the received diff to the file. */ + redisLog(REDIS_NOTICE, + "Concatenating %.2f MB of AOF diff received from parent.", + (double) sdslen(server.aof_child_diff) / (1024*1024)); + if (rioWrite(&aof,server.aof_child_diff,sdslen(server.aof_child_diff)) == 0) + goto werr; + /* Make sure data will not remain on the OS's output buffers */ if (fflush(fp) == EOF) goto werr; if (fsync(fileno(fp)) == -1) goto werr; @@ -1056,13 +1169,91 @@ int rewriteAppendOnlyFile(char *filename) { return REDIS_OK; werr: + redisLog(REDIS_WARNING,"Write error writing append only file on disk: %s", strerror(errno)); fclose(fp); unlink(tmpfile); - redisLog(REDIS_WARNING,"Write error writing append only file on disk: %s", strerror(errno)); if (di) dictReleaseIterator(di); return REDIS_ERR; } +/* ---------------------------------------------------------------------------- + * AOF rewrite pipes for IPC + * -------------------------------------------------------------------------- */ + +/* This event handler is called when the AOF rewriting child sends us a + * single '!' char to signal we should stop sending buffer diffs. The + * parent sends a '!' as well to acknowledge. */ +void aofChildPipeReadable(aeEventLoop *el, int fd, void *privdata, int mask) { + char byte; + REDIS_NOTUSED(el); + REDIS_NOTUSED(privdata); + REDIS_NOTUSED(mask); + + if (read(fd,&byte,1) == 1 && byte == '!') { + redisLog(REDIS_NOTICE,"AOF rewrite child asks to stop sending diffs."); + server.aof_stop_sending_diff = 1; + if (write(server.aof_pipe_write_ack_to_child,"!",1) != 1) { + /* If we can't send the ack, inform the user, but don't try again + * since in the other side the children will use a timeout if the + * kernel can't buffer our write, or, the children was + * terminated. */ + redisLog(REDIS_WARNING,"Can't send ACK to AOF child: %s", + strerror(errno)); + } + } + /* Remove the handler since this can be called only one time during a + * rewrite. */ + aeDeleteFileEvent(server.el,server.aof_pipe_read_ack_from_child,AE_READABLE); +} + +/* Create the pipes used for parent - child process IPC during rewrite. + * We have a data pipe used to send AOF incremental diffs to the child, + * and two other pipes used by the children to signal it finished with + * the rewrite so no more data should be written, and another for the + * parent to acknowledge it understood this new condition. */ +int aofCreatePipes(void) { + int fds[6] = {-1, -1, -1, -1, -1, -1}; + int j; + + if (pipe(fds) == -1) goto error; /* parent -> children data. */ + if (pipe(fds+2) == -1) goto error; /* children -> parent ack. */ + if (pipe(fds+4) == -1) goto error; /* children -> parent ack. */ + /* Parent -> children data is non blocking. */ + if (anetNonBlock(NULL,fds[0]) != ANET_OK) goto error; + if (anetNonBlock(NULL,fds[1]) != ANET_OK) goto error; + if (aeCreateFileEvent(server.el, fds[2], AE_READABLE, aofChildPipeReadable, NULL) == AE_ERR) goto error; + + server.aof_pipe_write_data_to_child = fds[1]; + server.aof_pipe_read_data_from_parent = fds[0]; + server.aof_pipe_write_ack_to_parent = fds[3]; + server.aof_pipe_read_ack_from_child = fds[2]; + server.aof_pipe_write_ack_to_child = fds[5]; + server.aof_pipe_read_ack_from_parent = fds[4]; + server.aof_stop_sending_diff = 0; + return REDIS_OK; + +error: + redisLog(REDIS_WARNING,"Error opening /setting AOF rewrite IPC pipes: %s", + strerror(errno)); + for (j = 0; j < 6; j++) if(fds[j] != -1) close(fds[j]); + return REDIS_ERR; +} + +void aofClosePipes(void) { + aeDeleteFileEvent(server.el,server.aof_pipe_read_ack_from_child,AE_READABLE); + aeDeleteFileEvent(server.el,server.aof_pipe_write_data_to_child,AE_WRITABLE); + close(server.aof_pipe_write_data_to_child); + close(server.aof_pipe_read_data_from_parent); + close(server.aof_pipe_write_ack_to_parent); + close(server.aof_pipe_read_ack_from_child); + close(server.aof_pipe_write_ack_to_child); + close(server.aof_pipe_read_ack_from_parent); +} + +/* ---------------------------------------------------------------------------- + * AOF background rewrite + * ------------------------------------------------------------------------- */ + /* This is how rewriting of the append only file in background works: * * 1) The user calls BGREWRITEAOF @@ -1080,6 +1271,7 @@ int rewriteAppendOnlyFileBackground(void) { long long start; if (server.aof_child_pid != -1) return REDIS_ERR; + if (aofCreatePipes() != REDIS_OK) return REDIS_ERR; start = ustime(); if ((childpid = fork()) == 0) { char tmpfile[256]; @@ -1201,7 +1393,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { latencyAddSampleIfNeeded("aof-rewrite-diff-write",latency); redisLog(REDIS_NOTICE, - "Parent diff successfully flushed to the rewritten AOF (%lu bytes)", aofRewriteBufferSize()); + "Residual parent diff successfully flushed to the rewritten AOF (%.2f MB)", (double) aofRewriteBufferSize() / (1024*1024)); /* The only remaining thing to do is to rename the temporary file to * the configured file and switch the file descriptor used to do AOF @@ -1302,6 +1494,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { } cleanup: + aofClosePipes(); aofRewriteBufferReset(); aofRemoveTempFile(server.aof_child_pid); server.aof_child_pid = -1; diff --git a/redis.submodule/src/bitops.c b/redis.submodule/src/bitops.c index 209f9b0..4c86622 100644 --- a/redis.submodule/src/bitops.c +++ b/redis.submodule/src/bitops.c @@ -70,16 +70,19 @@ size_t redisPopcount(void *s, long count) { count--; } - /* Count bits 16 bytes at a time */ + /* Count bits 28 bytes at a time */ p4 = (uint32_t*)p; - while(count>=16) { - uint32_t aux1, aux2, aux3, aux4; + while(count>=28) { + uint32_t aux1, aux2, aux3, aux4, aux5, aux6, aux7; aux1 = *p4++; aux2 = *p4++; aux3 = *p4++; aux4 = *p4++; - count -= 16; + aux5 = *p4++; + aux6 = *p4++; + aux7 = *p4++; + count -= 28; aux1 = aux1 - ((aux1 >> 1) & 0x55555555); aux1 = (aux1 & 0x33333333) + ((aux1 >> 2) & 0x33333333); @@ -89,10 +92,19 @@ size_t redisPopcount(void *s, long count) { aux3 = (aux3 & 0x33333333) + ((aux3 >> 2) & 0x33333333); aux4 = aux4 - ((aux4 >> 1) & 0x55555555); aux4 = (aux4 & 0x33333333) + ((aux4 >> 2) & 0x33333333); - bits += ((((aux1 + (aux1 >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24) + - ((((aux2 + (aux2 >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24) + - ((((aux3 + (aux3 >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24) + - ((((aux4 + (aux4 >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24); + aux5 = aux5 - ((aux5 >> 1) & 0x55555555); + aux5 = (aux5 & 0x33333333) + ((aux5 >> 2) & 0x33333333); + aux6 = aux6 - ((aux6 >> 1) & 0x55555555); + aux6 = (aux6 & 0x33333333) + ((aux6 >> 2) & 0x33333333); + aux7 = aux7 - ((aux7 >> 1) & 0x55555555); + aux7 = (aux7 & 0x33333333) + ((aux7 >> 2) & 0x33333333); + bits += ((((aux1 + (aux1 >> 4)) & 0x0F0F0F0F) + + ((aux2 + (aux2 >> 4)) & 0x0F0F0F0F) + + ((aux3 + (aux3 >> 4)) & 0x0F0F0F0F) + + ((aux4 + (aux4 >> 4)) & 0x0F0F0F0F) + + ((aux5 + (aux5 >> 4)) & 0x0F0F0F0F) + + ((aux6 + (aux6 >> 4)) & 0x0F0F0F0F) + + ((aux7 + (aux7 >> 4)) & 0x0F0F0F0F))* 0x01010101) >> 24; } /* Count the remaining bytes. */ p = (unsigned char*)p4; @@ -261,12 +273,12 @@ void getbitCommand(redisClient *c) { byte = bitoffset >> 3; bit = 7 - (bitoffset & 0x7); - if (o->encoding != REDIS_ENCODING_RAW) { - if (byte < (size_t)ll2string(llbuf,sizeof(llbuf),(long)o->ptr)) - bitval = llbuf[byte] & (1 << bit); - } else { + if (sdsEncodedObject(o)) { if (byte < sdslen(o->ptr)) bitval = ((uint8_t*)o->ptr)[byte] & (1 << bit); + } else { + if (byte < (size_t)ll2string(llbuf,sizeof(llbuf),(long)o->ptr)) + bitval = llbuf[byte] & (1 << bit); } addReply(c, bitval ? shared.cone : shared.czero); @@ -348,7 +360,7 @@ void bitopCommand(redisClient *c) { * can take a fast path that performs much better than the * vanilla algorithm. */ j = 0; - if (minlen && numkeys <= 16) { + if (minlen >= sizeof(unsigned long)*4 && numkeys <= 16) { unsigned long *lp[16]; unsigned long *lres = (unsigned long*) res; diff --git a/redis.submodule/src/config.c b/redis.submodule/src/config.c index a9fc74f..8998af9 100644 --- a/redis.submodule/src/config.c +++ b/redis.submodule/src/config.c @@ -29,6 +29,7 @@ */ #include "redis.h" +#include "cluster.h" #include #include @@ -81,6 +82,7 @@ void resetServerSaveParams(void) { void loadServerConfigFromString(char *config) { char *err = NULL; int linenum = 0, totlines, i; + int slaveof_linenum = 0; sds *lines; lines = sdssplitlen(config,strlen(config),"\n",1,&totlines); @@ -248,6 +250,7 @@ void loadServerConfigFromString(char *config) { goto loaderr; } } else if (!strcasecmp(argv[0],"slaveof") && argc == 3) { + slaveof_linenum = linenum; server.masterhost = sdsnew(argv[1]); server.masterport = atoi(argv[2]); server.repl_state = REDIS_REPL_CONNECT; @@ -267,6 +270,16 @@ void loadServerConfigFromString(char *config) { if ((server.repl_disable_tcp_nodelay = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } + } else if (!strcasecmp(argv[0],"repl-diskless-sync") && argc==2) { + if ((server.repl_diskless_sync = yesnotoi(argv[1])) == -1) { + err = "argument must be 'yes' or 'no'"; goto loaderr; + } + } else if (!strcasecmp(argv[0],"repl-diskless-sync-delay") && argc==2) { + server.repl_diskless_sync_delay = atoi(argv[1]); + if (server.repl_diskless_sync_delay < 0) { + err = "repl-diskless-sync-delay can't be negative"; + goto loaderr; + } } else if (!strcasecmp(argv[0],"repl-backlog-size") && argc == 2) { long long size = memtoll(argv[1],NULL); if (size <= 0) { @@ -281,7 +294,7 @@ void loadServerConfigFromString(char *config) { goto loaderr; } } else if (!strcasecmp(argv[0],"masterauth") && argc == 2) { - server.masterauth = zstrdup(argv[1]); + server.masterauth = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"slave-serve-stale-data") && argc == 2) { if ((server.repl_serve_stale_data = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; @@ -419,6 +432,41 @@ void loadServerConfigFromString(char *config) { err = "Target command name already exists"; goto loaderr; } } + } else if (!strcasecmp(argv[0],"cluster-enabled") && argc == 2) { + if ((server.cluster_enabled = yesnotoi(argv[1])) == -1) { + err = "argument must be 'yes' or 'no'"; goto loaderr; + } + } else if (!strcasecmp(argv[0],"cluster-config-file") && argc == 2) { + zfree(server.cluster_configfile); + server.cluster_configfile = zstrdup(argv[1]); + } else if (!strcasecmp(argv[0],"cluster-require-full-coverage") && + argc == 2) + { + if ((server.cluster_require_full_coverage = yesnotoi(argv[1])) == -1) + { + err = "argument must be 'yes' or 'no'"; goto loaderr; + } + } else if (!strcasecmp(argv[0],"cluster-node-timeout") && argc == 2) { + server.cluster_node_timeout = strtoll(argv[1],NULL,10); + if (server.cluster_node_timeout <= 0) { + err = "cluster node timeout must be 1 or greater"; goto loaderr; + } + } else if (!strcasecmp(argv[0],"cluster-migration-barrier") + && argc == 2) + { + server.cluster_migration_barrier = atoi(argv[1]); + if (server.cluster_migration_barrier < 0) { + err = "cluster migration barrier must zero or positive"; + goto loaderr; + } + } else if (!strcasecmp(argv[0],"cluster-slave-validity-factor") + && argc == 2) + { + server.cluster_slave_validity_factor = atoi(argv[1]); + if (server.cluster_slave_validity_factor < 0) { + err = "cluster slave validity factor must be zero or positive"; + goto loaderr; + } } else if (!strcasecmp(argv[0],"lua-time-limit") && argc == 2) { server.lua_time_limit = strtoll(argv[1],NULL,10); } else if (!strcasecmp(argv[0],"slowlog-log-slower-than") && @@ -497,6 +545,15 @@ void loadServerConfigFromString(char *config) { } sdsfreesplitres(argv,argc); } + + /* Sanity checks. */ + if (server.cluster_enabled && server.masterhost) { + linenum = slaveof_linenum; + i = linenum-1; + err = "slaveof directive not allowed in cluster mode"; + goto loaderr; + } + sdsfreesplitres(lines,totlines); return; @@ -552,8 +609,9 @@ void loadServerConfig(char *filename, char *options) { void configSetCommand(redisClient *c) { robj *o; long long ll; - redisAssertWithInfo(c,c->argv[2],c->argv[2]->encoding == REDIS_ENCODING_RAW); - redisAssertWithInfo(c,c->argv[2],c->argv[3]->encoding == REDIS_ENCODING_RAW); + int err; + redisAssertWithInfo(c,c->argv[2],sdsEncodedObject(c->argv[2])); + redisAssertWithInfo(c,c->argv[3],sdsEncodedObject(c->argv[3])); o = c->argv[3]; if (!strcasecmp(c->argv[2]->ptr,"dbfilename")) { @@ -571,8 +629,8 @@ void configSetCommand(redisClient *c) { zfree(server.masterauth); server.masterauth = ((char*)o->ptr)[0] ? zstrdup(o->ptr) : NULL; } else if (!strcasecmp(c->argv[2]->ptr,"maxmemory")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || - ll < 0) goto badfmt; + ll = memtoll(o->ptr,&err); + if (err || ll < 0) goto badfmt; server.maxmemory = ll; if (server.maxmemory) { if (server.maxmemory < zmalloc_used_memory()) { @@ -727,6 +785,11 @@ void configSetCommand(redisClient *c) { if (yn == -1) goto badfmt; server.repl_slave_ro = yn; + } else if (!strcasecmp(c->argv[2]->ptr,"activerehashing")) { + int yn = yesnotoi(o->ptr); + + if (yn == -1) goto badfmt; + server.activerehashing = yn; } else if (!strcasecmp(c->argv[2]->ptr,"dir")) { if (chdir((char*)o->ptr) == -1) { addReplyErrorFormat(c,"Changing directory: %s", strerror(errno)); @@ -794,7 +857,6 @@ void configSetCommand(redisClient *c) { * whole configuration string or accept it all, even if a single * error in a single client class is present. */ for (j = 0; j < vlen; j++) { - char *eptr; long val; if ((j % 4) == 0) { @@ -803,8 +865,8 @@ void configSetCommand(redisClient *c) { goto badfmt; } } else { - val = strtoll(v[j], &eptr, 10); - if (eptr[0] != '\0' || val < 0) { + val = memtoll(v[j], &err); + if (err || val < 0) { sdsfreesplitres(v,vlen); goto badfmt; } @@ -838,7 +900,8 @@ void configSetCommand(redisClient *c) { if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll <= 0) goto badfmt; server.repl_timeout = ll; } else if (!strcasecmp(c->argv[2]->ptr,"repl-backlog-size")) { - if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll <= 0) goto badfmt; + ll = memtoll(o->ptr,&err); + if (err || ll < 0) goto badfmt; resizeReplicationBacklog(ll); } else if (!strcasecmp(c->argv[2]->ptr,"repl-backlog-ttl")) { if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; @@ -864,6 +927,15 @@ void configSetCommand(redisClient *c) { if (yn == -1) goto badfmt; server.repl_disable_tcp_nodelay = yn; + } else if (!strcasecmp(c->argv[2]->ptr,"repl-diskless-sync")) { + int yn = yesnotoi(o->ptr); + + if (yn == -1) goto badfmt; + server.repl_diskless_sync = yn; + } else if (!strcasecmp(c->argv[2]->ptr,"repl-diskless-sync-delay")) { + if (getLongLongFromObject(o,&ll) == REDIS_ERR || + ll < 0) goto badfmt; + server.repl_diskless_sync_delay = ll; } else if (!strcasecmp(c->argv[2]->ptr,"slave-priority")) { if (getLongLongFromObject(o,&ll) == REDIS_ERR || ll < 0) goto badfmt; @@ -878,6 +950,23 @@ void configSetCommand(redisClient *c) { ll < 0) goto badfmt; server.repl_min_slaves_max_lag = ll; refreshGoodSlavesCount(); + } else if (!strcasecmp(c->argv[2]->ptr,"cluster-require-full-coverage")) { + int yn = yesnotoi(o->ptr); + + if (yn == -1) goto badfmt; + server.cluster_require_full_coverage = yn; + } else if (!strcasecmp(c->argv[2]->ptr,"cluster-node-timeout")) { + if (getLongLongFromObject(o,&ll) == REDIS_ERR || + ll <= 0) goto badfmt; + server.cluster_node_timeout = ll; + } else if (!strcasecmp(c->argv[2]->ptr,"cluster-migration-barrier")) { + if (getLongLongFromObject(o,&ll) == REDIS_ERR || + ll < 0) goto badfmt; + server.cluster_migration_barrier = ll; + } else if (!strcasecmp(c->argv[2]->ptr,"cluster-slave-validity-factor")) { + if (getLongLongFromObject(o,&ll) == REDIS_ERR || + ll < 0) goto badfmt; + server.cluster_slave_validity_factor = ll; } else { addReplyErrorFormat(c,"Unsupported CONFIG parameter: %s", (char*)c->argv[2]->ptr); @@ -927,7 +1016,7 @@ void configGetCommand(redisClient *c) { char *pattern = o->ptr; char buf[128]; int matches = 0; - redisAssertWithInfo(c,o,o->encoding == REDIS_ENCODING_RAW); + redisAssertWithInfo(c,o,sdsEncodedObject(o)); /* String values */ config_get_string_field("dbfilename",server.rdb_filename); @@ -982,8 +1071,14 @@ void configGetCommand(redisClient *c) { config_get_numerical_field("min-slaves-to-write",server.repl_min_slaves_to_write); config_get_numerical_field("min-slaves-max-lag",server.repl_min_slaves_max_lag); config_get_numerical_field("hz",server.hz); + config_get_numerical_field("cluster-node-timeout",server.cluster_node_timeout); + config_get_numerical_field("cluster-migration-barrier",server.cluster_migration_barrier); + config_get_numerical_field("cluster-slave-validity-factor",server.cluster_slave_validity_factor); + config_get_numerical_field("repl-diskless-sync-delay",server.repl_diskless_sync_delay); /* Bool (yes/no) values */ + config_get_bool_field("cluster-require-full-coverage", + server.cluster_require_full_coverage); config_get_bool_field("no-appendfsync-on-rewrite", server.aof_no_fsync_on_rewrite); config_get_bool_field("slave-serve-stale-data", @@ -998,6 +1093,8 @@ void configGetCommand(redisClient *c) { config_get_bool_field("activerehashing", server.activerehashing); config_get_bool_field("repl-disable-tcp-nodelay", server.repl_disable_tcp_nodelay); + config_get_bool_field("repl-diskless-sync", + server.repl_diskless_sync); config_get_bool_field("aof-rewrite-incremental-fsync", server.aof_rewrite_incremental_fsync); config_get_bool_field("aof-load-truncated", @@ -1424,7 +1521,7 @@ void rewriteConfigEnumOption(struct rewriteConfigState *state, char *option, int rewriteConfigRewriteLine(state,option,line,force); } -/* Rewrite the syslog-fability option. */ +/* Rewrite the syslog-facility option. */ void rewriteConfigSyslogfacilityOption(struct rewriteConfigState *state) { int value = server.syslog_facility, j; int force = value != LOG_LOCAL0; @@ -1475,8 +1572,9 @@ void rewriteConfigSlaveofOption(struct rewriteConfigState *state) { sds line; /* If this is a master, we want all the slaveof config options - * in the file to be removed. */ - if (server.masterhost == NULL) { + * in the file to be removed. Note that if this is a cluster instance + * we don't want a slaveof directive inside redis.conf. */ + if (server.cluster_enabled || server.masterhost == NULL) { rewriteConfigMarkAsProcessed(state,"slaveof"); return; } @@ -1722,6 +1820,8 @@ int rewriteConfig(char *path) { rewriteConfigBytesOption(state,"repl-backlog-size",server.repl_backlog_size,REDIS_DEFAULT_REPL_BACKLOG_SIZE); rewriteConfigBytesOption(state,"repl-backlog-ttl",server.repl_backlog_time_limit,REDIS_DEFAULT_REPL_BACKLOG_TIME_LIMIT); rewriteConfigYesNoOption(state,"repl-disable-tcp-nodelay",server.repl_disable_tcp_nodelay,REDIS_DEFAULT_REPL_DISABLE_TCP_NODELAY); + rewriteConfigYesNoOption(state,"repl-diskless-sync",server.repl_diskless_sync,REDIS_DEFAULT_REPL_DISKLESS_SYNC); + rewriteConfigNumericalOption(state,"repl-diskless-sync-delay",server.repl_diskless_sync_delay,REDIS_DEFAULT_REPL_DISKLESS_SYNC_DELAY); rewriteConfigNumericalOption(state,"slave-priority",server.slave_priority,REDIS_DEFAULT_SLAVE_PRIORITY); rewriteConfigNumericalOption(state,"min-slaves-to-write",server.repl_min_slaves_to_write,REDIS_DEFAULT_MIN_SLAVES_TO_WRITE); rewriteConfigNumericalOption(state,"min-slaves-max-lag",server.repl_min_slaves_max_lag,REDIS_DEFAULT_MIN_SLAVES_MAX_LAG); @@ -1748,6 +1848,12 @@ int rewriteConfig(char *path) { rewriteConfigNumericalOption(state,"auto-aof-rewrite-percentage",server.aof_rewrite_perc,REDIS_AOF_REWRITE_PERC); rewriteConfigBytesOption(state,"auto-aof-rewrite-min-size",server.aof_rewrite_min_size,REDIS_AOF_REWRITE_MIN_SIZE); rewriteConfigNumericalOption(state,"lua-time-limit",server.lua_time_limit,REDIS_LUA_TIME_LIMIT); + rewriteConfigYesNoOption(state,"cluster-enabled",server.cluster_enabled,0); + rewriteConfigStringOption(state,"cluster-config-file",server.cluster_configfile,REDIS_DEFAULT_CLUSTER_CONFIG_FILE); + rewriteConfigYesNoOption(state,"cluster-require-full-coverage",server.cluster_require_full_coverage,REDIS_CLUSTER_DEFAULT_REQUIRE_FULL_COVERAGE); + rewriteConfigNumericalOption(state,"cluster-node-timeout",server.cluster_node_timeout,REDIS_CLUSTER_DEFAULT_NODE_TIMEOUT); + rewriteConfigNumericalOption(state,"cluster-migration-barrier",server.cluster_migration_barrier,REDIS_CLUSTER_DEFAULT_MIGRATION_BARRIER); + rewriteConfigNumericalOption(state,"cluster-slave-validity-factor",server.cluster_slave_validity_factor,REDIS_CLUSTER_DEFAULT_SLAVE_VALIDITY); rewriteConfigNumericalOption(state,"slowlog-log-slower-than",server.slowlog_log_slower_than,REDIS_SLOWLOG_LOG_SLOWER_THAN); rewriteConfigNumericalOption(state,"latency-monitor-threshold",server.latency_monitor_threshold,REDIS_DEFAULT_LATENCY_MONITOR_THRESHOLD); rewriteConfigNumericalOption(state,"slowlog-max-len",server.slowlog_max_len,REDIS_SLOWLOG_MAX_LEN); diff --git a/redis.submodule/src/config.h b/redis.submodule/src/config.h index 57d0759..9ec65fc 100644 --- a/redis.submodule/src/config.h +++ b/redis.submodule/src/config.h @@ -34,6 +34,11 @@ #include #endif +#ifdef __linux__ +#include +#include +#endif + /* Define redis_fstat to fstat or fstat64() */ #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_6) #define redis_fstat fstat64 @@ -48,6 +53,7 @@ #define HAVE_PROC_STAT 1 #define HAVE_PROC_MAPS 1 #define HAVE_PROC_SMAPS 1 +#define HAVE_PROC_SOMAXCONN 1 #endif /* Test for task_info() */ @@ -56,7 +62,7 @@ #endif /* Test for backtrace() */ -#if defined(__APPLE__) || defined(__linux__) +#if defined(__APPLE__) || (defined(__linux__) && defined(__GLIBC__)) #define HAVE_BACKTRACE 1 #endif @@ -86,8 +92,6 @@ /* Define rdb_fsync_range to sync_file_range() on Linux, otherwise we use * the plain fsync() call. */ #ifdef __linux__ -#include -#include #if defined(__GLIBC__) && defined(__GLIBC_PREREQ) #if (LINUX_VERSION_CODE >= 0x020611 && __GLIBC_PREREQ(2, 6)) #define HAVE_SYNC_FILE_RANGE 1 @@ -112,7 +116,7 @@ #define USE_SETPROCTITLE #endif -#if (defined __linux || defined __APPLE__) +#if ((defined __linux && defined(__GLIBC__)) || defined __APPLE__) #define USE_SETPROCTITLE #define INIT_SETPROCTITLE_REPLACEMENT void spt_init(int argc, char *argv[]); diff --git a/redis.submodule/src/db.c b/redis.submodule/src/db.c index b39fbe3..c74cde2 100644 --- a/redis.submodule/src/db.c +++ b/redis.submodule/src/db.c @@ -28,12 +28,14 @@ */ #include "redis.h" +#include "cluster.h" #include #include -void SlotToKeyAdd(robj *key); -void SlotToKeyDel(robj *key); +void slotToKeyAdd(robj *key); +void slotToKeyDel(robj *key); +void slotToKeyFlush(void); /*----------------------------------------------------------------------------- * C-level DB API @@ -48,7 +50,7 @@ robj *lookupKey(redisDb *db, robj *key) { * Don't do it if we have a saving child, as this will trigger * a copy on write madness. */ if (server.rdb_child_pid == -1 && server.aof_child_pid == -1) - val->lru = server.lruclock; + val->lru = LRU_CLOCK(); return val; } else { return NULL; @@ -94,6 +96,7 @@ void dbAdd(redisDb *db, robj *key, robj *val) { redisAssertWithInfo(NULL,key,retval == REDIS_OK); if (val->type == REDIS_LIST) signalListAsReady(db, key); + if (server.cluster_enabled) slotToKeyAdd(key); } /* Overwrite an existing key with a new value. Incrementing the reference @@ -102,7 +105,7 @@ void dbAdd(redisDb *db, robj *key, robj *val) { * * The program is aborted if the key was not already present. */ void dbOverwrite(redisDb *db, robj *key, robj *val) { - struct dictEntry *de = dictFind(db->dict,key->ptr); + dictEntry *de = dictFind(db->dict,key->ptr); redisAssertWithInfo(NULL,key,de != NULL); dictReplace(db->dict, key->ptr, val); @@ -134,7 +137,7 @@ int dbExists(redisDb *db, robj *key) { * * The function makes sure to return keys not already expired. */ robj *dbRandomKey(redisDb *db) { - struct dictEntry *de; + dictEntry *de; while(1) { sds key; @@ -161,6 +164,7 @@ int dbDelete(redisDb *db, robj *key) { * the key, because it is shared with the main dictionary. */ if (dictSize(db->expires) > 0) dictDelete(db->expires,key->ptr); if (dictDelete(db->dict,key->ptr) == DICT_OK) { + if (server.cluster_enabled) slotToKeyDel(key); return 1; } else { return 0; @@ -198,7 +202,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) { redisAssert(o->type == REDIS_STRING); if (o->refcount != 1 || o->encoding != REDIS_ENCODING_RAW) { robj *decoded = getDecodedObject(o); - o = createStringObject(decoded->ptr, sdslen(decoded->ptr)); + o = createRawStringObject(decoded->ptr, sdslen(decoded->ptr)); decrRefCount(decoded); dbOverwrite(db,key,o); } @@ -214,6 +218,7 @@ long long emptyDb(void(callback)(void*)) { dictEmpty(server.db[j].dict,callback); dictEmpty(server.db[j].expires,callback); } + if (server.cluster_enabled) slotToKeyFlush(); return removed; } @@ -250,6 +255,7 @@ void flushdbCommand(redisClient *c) { signalFlushedDb(c->db->id); dictEmpty(c->db->dict,NULL); dictEmpty(c->db->expires,NULL); + if (server.cluster_enabled) slotToKeyFlush(); addReply(c,shared.ok); } @@ -287,13 +293,17 @@ void delCommand(redisClient *c) { addReplyLongLong(c,deleted); } +/* EXISTS key1 key2 ... key_N. + * Return value is the number of keys existing. */ void existsCommand(redisClient *c) { - expireIfNeeded(c->db,c->argv[1]); - if (dbExists(c->db,c->argv[1])) { - addReply(c, shared.cone); - } else { - addReply(c, shared.czero); + long long count = 0; + int j; + + for (j = 1; j < c->argc; j++) { + expireIfNeeded(c->db,c->argv[j]); + if (dbExists(c->db,c->argv[j])) count++; } + addReplyLongLong(c,count); } void selectCommand(redisClient *c) { @@ -303,6 +313,10 @@ void selectCommand(redisClient *c) { "invalid DB index") != REDIS_OK) return; + if (server.cluster_enabled && id != 0) { + addReplyError(c,"SELECT is not allowed in cluster mode"); + return; + } if (selectDb(c,id) == REDIS_ERR) { addReplyError(c,"invalid DB index"); } else { @@ -371,7 +385,7 @@ void scanCallback(void *privdata, const dictEntry *de) { } else if (o->type == REDIS_ZSET) { key = dictGetKey(de); incrRefCount(key); - val = createStringObjectFromLongDouble(*(double*)dictGetVal(de)); + val = createStringObjectFromLongDouble(*(double*)dictGetVal(de),0); } else { redisPanic("Type not handled in SCAN callback."); } @@ -533,16 +547,16 @@ void scanGenericCommand(redisClient *c, robj *o, unsigned long cursor) { /* Filter element if it does not match the pattern. */ if (!filter && use_pattern) { - if (kobj->encoding == REDIS_ENCODING_INT) { + if (sdsEncodedObject(kobj)) { + if (!stringmatchlen(pat, patlen, kobj->ptr, sdslen(kobj->ptr), 0)) + filter = 1; + } else { char buf[REDIS_LONGSTR_SIZE]; int len; redisAssert(kobj->encoding == REDIS_ENCODING_INT); len = ll2string(buf,sizeof(buf),(long)kobj->ptr); if (!stringmatchlen(pat, patlen, buf, len, 0)) filter = 1; - } else { - if (!stringmatchlen(pat, patlen, kobj->ptr, sdslen(kobj->ptr), 0)) - filter = 1; } } @@ -700,7 +714,12 @@ void moveCommand(redisClient *c) { robj *o; redisDb *src, *dst; int srcid; - long long dbid; + long long dbid, expire; + + if (server.cluster_enabled) { + addReplyError(c,"MOVE is not allowed in cluster mode"); + return; + } /* Obtain source and target DB pointers */ src = c->db; @@ -729,6 +748,7 @@ void moveCommand(redisClient *c) { addReply(c,shared.czero); return; } + expire = getExpire(c->db,c->argv[1]); /* Return zero if the key already exists in the target DB */ if (lookupKeyWrite(dst,c->argv[1]) != NULL) { @@ -736,6 +756,7 @@ void moveCommand(redisClient *c) { return; } dbAdd(dst,c->argv[1],o); + if (expire != -1) setExpire(dst,c->argv[1],expire); incrRefCount(o); /* OK! key moved, free the entry in the source DB */ @@ -963,6 +984,8 @@ void persistCommand(redisClient *c) { * API to get key arguments from commands * ---------------------------------------------------------------------------*/ +/* The base case is to use the keys position as given in the command table + * (firstkey, lastkey, step). */ int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, int *numkeys) { int j, i = 0, last, *keys; REDIS_NOTUSED(argv); @@ -982,42 +1005,65 @@ int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, in return keys; } -int *getKeysFromCommand(struct redisCommand *cmd,robj **argv, int argc, int *numkeys, int flags) { +/* Return all the arguments that are keys in the command passed via argc / argv. + * + * The command returns the positions of all the key arguments inside the array, + * so the actual return value is an heap allocated array of integers. The + * length of the array is returned by reference into *numkeys. + * + * 'cmd' must be point to the corresponding entry into the redisCommand + * table, according to the command name in argv[0]. + * + * This function uses the command table if a command-specific helper function + * is not required, otherwise it calls the command-specific function. */ +int *getKeysFromCommand(struct redisCommand *cmd, robj **argv, int argc, int *numkeys) { if (cmd->getkeys_proc) { - return cmd->getkeys_proc(cmd,argv,argc,numkeys,flags); + return cmd->getkeys_proc(cmd,argv,argc,numkeys); } else { return getKeysUsingCommandTable(cmd,argv,argc,numkeys); } } +/* Free the result of getKeysFromCommand. */ void getKeysFreeResult(int *result) { zfree(result); } -int *noPreloadGetKeys(struct redisCommand *cmd,robj **argv, int argc, int *numkeys, int flags) { - if (flags & REDIS_GETKEYS_PRELOAD) { +/* Helper function to extract keys from following commands: + * ZUNIONSTORE ... + * ZINTERSTORE ... */ +int *zunionInterGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkeys) { + int i, num, *keys; + REDIS_NOTUSED(cmd); + + num = atoi(argv[2]->ptr); + /* Sanity check. Don't return any key if the command is going to + * reply with syntax error. */ + if (num > (argc-3)) { *numkeys = 0; return NULL; - } else { - return getKeysUsingCommandTable(cmd,argv,argc,numkeys); } -} -int *renameGetKeys(struct redisCommand *cmd,robj **argv, int argc, int *numkeys, int flags) { - if (flags & REDIS_GETKEYS_PRELOAD) { - int *keys = zmalloc(sizeof(int)); - *numkeys = 1; - keys[0] = 1; - return keys; - } else { - return getKeysUsingCommandTable(cmd,argv,argc,numkeys); - } + /* Keys in z{union,inter}store come from two places: + * argv[1] = storage key, + * argv[3...n] = keys to intersect */ + keys = zmalloc(sizeof(int)*(num+1)); + + /* Add all key positions for argv[3...n] to keys[] */ + for (i = 0; i < num; i++) keys[i] = 3+i; + + /* Finally add the argv[1] key position (the storage key target). */ + keys[num] = 1; + *numkeys = num+1; /* Total keys = {union,inter} keys + storage key */ + return keys; } -int *zunionInterGetKeys(struct redisCommand *cmd,robj **argv, int argc, int *numkeys, int flags) { +/* Helper function to extract keys from the following commands: + * EVAL + + +EOF +} + +sub HtmlListingFooter { + return <<'EOF'; + + +EOF +} + +sub HtmlEscape { + my $text = shift; + $text =~ s/&/&/g; + $text =~ s//>/g; + return $text; +} + +# Returns the indentation of the line, if it has any non-whitespace +# characters. Otherwise, returns -1. +sub Indentation { + my $line = shift; + if (m/^(\s*)\S/) { + return length($1); + } else { + return -1; + } +} + +# If the symbol table contains inlining info, Disassemble() may tag an +# instruction with a location inside an inlined function. But for +# source listings, we prefer to use the location in the function we +# are listing. So use MapToSymbols() to fetch full location +# information for each instruction and then pick out the first +# location from a location list (location list contains callers before +# callees in case of inlining). +# +# After this routine has run, each entry in $instructions contains: +# [0] start address +# [1] filename for function we are listing +# [2] line number for function we are listing +# [3] disassembly +# [4] limit address +# [5] most specific filename (may be different from [1] due to inlining) +# [6] most specific line number (may be different from [2] due to inlining) +sub GetTopLevelLineNumbers { + my ($lib, $offset, $instructions) = @_; + my $pcs = []; + for (my $i = 0; $i <= $#{$instructions}; $i++) { + push(@{$pcs}, $instructions->[$i]->[0]); + } + my $symbols = {}; + MapToSymbols($lib, $offset, $pcs, $symbols); + for (my $i = 0; $i <= $#{$instructions}; $i++) { + my $e = $instructions->[$i]; + push(@{$e}, $e->[1]); + push(@{$e}, $e->[2]); + my $addr = $e->[0]; + my $sym = $symbols->{$addr}; + if (defined($sym)) { + if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) { + $e->[1] = $1; # File name + $e->[2] = $2; # Line number + } + } + } +} + +# Print source-listing for one routine +sub PrintSource { + my $prog = shift; + my $offset = shift; + my $routine = shift; + my $flat = shift; + my $cumulative = shift; + my $start_addr = shift; + my $end_addr = shift; + my $html = shift; + my $output = shift; + + # Disassemble all instructions (just to get line numbers) + my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); + GetTopLevelLineNumbers($prog, $offset, \@instructions); + + # Hack 1: assume that the first source file encountered in the + # disassembly contains the routine + my $filename = undef; + for (my $i = 0; $i <= $#instructions; $i++) { + if ($instructions[$i]->[2] >= 0) { + $filename = $instructions[$i]->[1]; + last; + } + } + if (!defined($filename)) { + print STDERR "no filename found in $routine\n"; + return 0; + } + + # Hack 2: assume that the largest line number from $filename is the + # end of the procedure. This is typically safe since if P1 contains + # an inlined call to P2, then P2 usually occurs earlier in the + # source file. If this does not work, we might have to compute a + # density profile or just print all regions we find. + my $lastline = 0; + for (my $i = 0; $i <= $#instructions; $i++) { + my $f = $instructions[$i]->[1]; + my $l = $instructions[$i]->[2]; + if (($f eq $filename) && ($l > $lastline)) { + $lastline = $l; + } + } + + # Hack 3: assume the first source location from "filename" is the start of + # the source code. + my $firstline = 1; + for (my $i = 0; $i <= $#instructions; $i++) { + if ($instructions[$i]->[1] eq $filename) { + $firstline = $instructions[$i]->[2]; + last; + } + } + + # Hack 4: Extend last line forward until its indentation is less than + # the indentation we saw on $firstline + my $oldlastline = $lastline; + { + if (!open(FILE, "<$filename")) { + print STDERR "$filename: $!\n"; + return 0; + } + my $l = 0; + my $first_indentation = -1; + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + $l++; + my $indent = Indentation($_); + if ($l >= $firstline) { + if ($first_indentation < 0 && $indent >= 0) { + $first_indentation = $indent; + last if ($first_indentation == 0); + } + } + if ($l >= $lastline && $indent >= 0) { + if ($indent >= $first_indentation) { + $lastline = $l+1; + } else { + last; + } + } + } + close(FILE); + } + + # Assign all samples to the range $firstline,$lastline, + # Hack 4: If an instruction does not occur in the range, its samples + # are moved to the next instruction that occurs in the range. + my $samples1 = {}; # Map from line number to flat count + my $samples2 = {}; # Map from line number to cumulative count + my $running1 = 0; # Unassigned flat counts + my $running2 = 0; # Unassigned cumulative counts + my $total1 = 0; # Total flat counts + my $total2 = 0; # Total cumulative counts + my %disasm = (); # Map from line number to disassembly + my $running_disasm = ""; # Unassigned disassembly + my $skip_marker = "---\n"; + if ($html) { + $skip_marker = ""; + for (my $l = $firstline; $l <= $lastline; $l++) { + $disasm{$l} = ""; + } + } + my $last_dis_filename = ''; + my $last_dis_linenum = -1; + my $last_touched_line = -1; # To detect gaps in disassembly for a line + foreach my $e (@instructions) { + # Add up counts for all address that fall inside this instruction + my $c1 = 0; + my $c2 = 0; + for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { + $c1 += GetEntry($flat, $a); + $c2 += GetEntry($cumulative, $a); + } + + if ($html) { + my $dis = sprintf(" %6s %6s \t\t%8s: %s ", + HtmlPrintNumber($c1), + HtmlPrintNumber($c2), + UnparseAddress($offset, $e->[0]), + CleanDisassembly($e->[3])); + + # Append the most specific source line associated with this instruction + if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) }; + $dis = HtmlEscape($dis); + my $f = $e->[5]; + my $l = $e->[6]; + if ($f ne $last_dis_filename) { + $dis .= sprintf("%s:%d", + HtmlEscape(CleanFileName($f)), $l); + } elsif ($l ne $last_dis_linenum) { + # De-emphasize the unchanged file name portion + $dis .= sprintf("%s" . + ":%d", + HtmlEscape(CleanFileName($f)), $l); + } else { + # De-emphasize the entire location + $dis .= sprintf("%s:%d", + HtmlEscape(CleanFileName($f)), $l); + } + $last_dis_filename = $f; + $last_dis_linenum = $l; + $running_disasm .= $dis; + $running_disasm .= "\n"; + } + + $running1 += $c1; + $running2 += $c2; + $total1 += $c1; + $total2 += $c2; + my $file = $e->[1]; + my $line = $e->[2]; + if (($file eq $filename) && + ($line >= $firstline) && + ($line <= $lastline)) { + # Assign all accumulated samples to this line + AddEntry($samples1, $line, $running1); + AddEntry($samples2, $line, $running2); + $running1 = 0; + $running2 = 0; + if ($html) { + if ($line != $last_touched_line && $disasm{$line} ne '') { + $disasm{$line} .= "\n"; + } + $disasm{$line} .= $running_disasm; + $running_disasm = ''; + $last_touched_line = $line; + } + } + } + + # Assign any leftover samples to $lastline + AddEntry($samples1, $lastline, $running1); + AddEntry($samples2, $lastline, $running2); + if ($html) { + if ($lastline != $last_touched_line && $disasm{$lastline} ne '') { + $disasm{$lastline} .= "\n"; + } + $disasm{$lastline} .= $running_disasm; + } + + if ($html) { + printf $output ( + "

%s

%s\n
\n" .
+      "Total:%6s %6s (flat / cumulative %s)\n",
+      HtmlEscape(ShortFunctionName($routine)),
+      HtmlEscape(CleanFileName($filename)),
+      Unparse($total1),
+      Unparse($total2),
+      Units());
+  } else {
+    printf $output (
+      "ROUTINE ====================== %s in %s\n" .
+      "%6s %6s Total %s (flat / cumulative)\n",
+      ShortFunctionName($routine),
+      CleanFileName($filename),
+      Unparse($total1),
+      Unparse($total2),
+      Units());
+  }
+  if (!open(FILE, "<$filename")) {
+    print STDERR "$filename: $!\n";
+    return 0;
+  }
+  my $l = 0;
+  while () {
+    s/\r//g;         # turn windows-looking lines into unix-looking lines
+    $l++;
+    if ($l >= $firstline - 5 &&
+        (($l <= $oldlastline + 5) || ($l <= $lastline))) {
+      chop;
+      my $text = $_;
+      if ($l == $firstline) { print $output $skip_marker; }
+      my $n1 = GetEntry($samples1, $l);
+      my $n2 = GetEntry($samples2, $l);
+      if ($html) {
+        # Emit a span that has one of the following classes:
+        #    livesrc -- has samples
+        #    deadsrc -- has disassembly, but with no samples
+        #    nop     -- has no matching disasembly
+        # Also emit an optional span containing disassembly.
+        my $dis = $disasm{$l};
+        my $asm = "";
+        if (defined($dis) && $dis ne '') {
+          $asm = "" . $dis . "";
+        }
+        my $source_class = (($n1 + $n2 > 0)
+                            ? "livesrc"
+                            : (($asm ne "") ? "deadsrc" : "nop"));
+        printf $output (
+          "%5d " .
+          "%6s %6s %s%s\n",
+          $l, $source_class,
+          HtmlPrintNumber($n1),
+          HtmlPrintNumber($n2),
+          HtmlEscape($text),
+          $asm);
+      } else {
+        printf $output(
+          "%6s %6s %4d: %s\n",
+          UnparseAlt($n1),
+          UnparseAlt($n2),
+          $l,
+          $text);
+      }
+      if ($l == $lastline)  { print $output $skip_marker; }
+    };
+  }
+  close(FILE);
+  if ($html) {
+    print $output "
\n"; + } + return 1; +} + +# Return the source line for the specified file/linenumber. +# Returns undef if not found. +sub SourceLine { + my $file = shift; + my $line = shift; + + # Look in cache + if (!defined($main::source_cache{$file})) { + if (100 < scalar keys(%main::source_cache)) { + # Clear the cache when it gets too big + $main::source_cache = (); + } + + # Read all lines from the file + if (!open(FILE, "<$file")) { + print STDERR "$file: $!\n"; + $main::source_cache{$file} = []; # Cache the negative result + return undef; + } + my $lines = []; + push(@{$lines}, ""); # So we can use 1-based line numbers as indices + while () { + push(@{$lines}, $_); + } + close(FILE); + + # Save the lines in the cache + $main::source_cache{$file} = $lines; + } + + my $lines = $main::source_cache{$file}; + if (($line < 0) || ($line > $#{$lines})) { + return undef; + } else { + return $lines->[$line]; + } +} + +# Print disassembly for one routine with interspersed source if available +sub PrintDisassembledFunction { + my $prog = shift; + my $offset = shift; + my $routine = shift; + my $flat = shift; + my $cumulative = shift; + my $start_addr = shift; + my $end_addr = shift; + my $total = shift; + + # Disassemble all instructions + my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr); + + # Make array of counts per instruction + my @flat_count = (); + my @cum_count = (); + my $flat_total = 0; + my $cum_total = 0; + foreach my $e (@instructions) { + # Add up counts for all address that fall inside this instruction + my $c1 = 0; + my $c2 = 0; + for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) { + $c1 += GetEntry($flat, $a); + $c2 += GetEntry($cumulative, $a); + } + push(@flat_count, $c1); + push(@cum_count, $c2); + $flat_total += $c1; + $cum_total += $c2; + } + + # Print header with total counts + printf("ROUTINE ====================== %s\n" . + "%6s %6s %s (flat, cumulative) %.1f%% of total\n", + ShortFunctionName($routine), + Unparse($flat_total), + Unparse($cum_total), + Units(), + ($cum_total * 100.0) / $total); + + # Process instructions in order + my $current_file = ""; + for (my $i = 0; $i <= $#instructions; ) { + my $e = $instructions[$i]; + + # Print the new file name whenever we switch files + if ($e->[1] ne $current_file) { + $current_file = $e->[1]; + my $fname = $current_file; + $fname =~ s|^\./||; # Trim leading "./" + + # Shorten long file names + if (length($fname) >= 58) { + $fname = "..." . substr($fname, -55); + } + printf("-------------------- %s\n", $fname); + } + + # TODO: Compute range of lines to print together to deal with + # small reorderings. + my $first_line = $e->[2]; + my $last_line = $first_line; + my %flat_sum = (); + my %cum_sum = (); + for (my $l = $first_line; $l <= $last_line; $l++) { + $flat_sum{$l} = 0; + $cum_sum{$l} = 0; + } + + # Find run of instructions for this range of source lines + my $first_inst = $i; + while (($i <= $#instructions) && + ($instructions[$i]->[2] >= $first_line) && + ($instructions[$i]->[2] <= $last_line)) { + $e = $instructions[$i]; + $flat_sum{$e->[2]} += $flat_count[$i]; + $cum_sum{$e->[2]} += $cum_count[$i]; + $i++; + } + my $last_inst = $i - 1; + + # Print source lines + for (my $l = $first_line; $l <= $last_line; $l++) { + my $line = SourceLine($current_file, $l); + if (!defined($line)) { + $line = "?\n"; + next; + } else { + $line =~ s/^\s+//; + } + printf("%6s %6s %5d: %s", + UnparseAlt($flat_sum{$l}), + UnparseAlt($cum_sum{$l}), + $l, + $line); + } + + # Print disassembly + for (my $x = $first_inst; $x <= $last_inst; $x++) { + my $e = $instructions[$x]; + printf("%6s %6s %8s: %6s\n", + UnparseAlt($flat_count[$x]), + UnparseAlt($cum_count[$x]), + UnparseAddress($offset, $e->[0]), + CleanDisassembly($e->[3])); + } + } +} + +# Print DOT graph +sub PrintDot { + my $prog = shift; + my $symbols = shift; + my $raw = shift; + my $flat = shift; + my $cumulative = shift; + my $overall_total = shift; + + # Get total + my $local_total = TotalProfile($flat); + my $nodelimit = int($main::opt_nodefraction * $local_total); + my $edgelimit = int($main::opt_edgefraction * $local_total); + my $nodecount = $main::opt_nodecount; + + # Find nodes to include + my @list = (sort { abs(GetEntry($cumulative, $b)) <=> + abs(GetEntry($cumulative, $a)) + || $a cmp $b } + keys(%{$cumulative})); + my $last = $nodecount - 1; + if ($last > $#list) { + $last = $#list; + } + while (($last >= 0) && + (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) { + $last--; + } + if ($last < 0) { + print STDERR "No nodes to print\n"; + return 0; + } + + if ($nodelimit > 0 || $edgelimit > 0) { + printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n", + Unparse($nodelimit), Units(), + Unparse($edgelimit), Units()); + } + + # Open DOT output file + my $output; + my $escaped_dot = ShellEscape(@DOT); + my $escaped_ps2pdf = ShellEscape(@PS2PDF); + if ($main::opt_gv) { + my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps")); + $output = "| $escaped_dot -Tps2 >$escaped_outfile"; + } elsif ($main::opt_evince) { + my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf")); + $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile"; + } elsif ($main::opt_ps) { + $output = "| $escaped_dot -Tps2"; + } elsif ($main::opt_pdf) { + $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -"; + } elsif ($main::opt_web || $main::opt_svg) { + # We need to post-process the SVG, so write to a temporary file always. + my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg")); + $output = "| $escaped_dot -Tsvg >$escaped_outfile"; + } elsif ($main::opt_gif) { + $output = "| $escaped_dot -Tgif"; + } else { + $output = ">&STDOUT"; + } + open(DOT, $output) || error("$output: $!\n"); + + # Title + printf DOT ("digraph \"%s; %s %s\" {\n", + $prog, + Unparse($overall_total), + Units()); + if ($main::opt_pdf) { + # The output is more printable if we set the page size for dot. + printf DOT ("size=\"8,11\"\n"); + } + printf DOT ("node [width=0.375,height=0.25];\n"); + + # Print legend + printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," . + "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n", + $prog, + sprintf("Total %s: %s", Units(), Unparse($overall_total)), + sprintf("Focusing on: %s", Unparse($local_total)), + sprintf("Dropped nodes with <= %s abs(%s)", + Unparse($nodelimit), Units()), + sprintf("Dropped edges with <= %s %s", + Unparse($edgelimit), Units()) + ); + + # Print nodes + my %node = (); + my $nextnode = 1; + foreach my $a (@list[0..$last]) { + # Pick font size + my $f = GetEntry($flat, $a); + my $c = GetEntry($cumulative, $a); + + my $fs = 8; + if ($local_total > 0) { + $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total))); + } + + $node{$a} = $nextnode++; + my $sym = $a; + $sym =~ s/\s+/\\n/g; + $sym =~ s/::/\\n/g; + + # Extra cumulative info to print for non-leaves + my $extra = ""; + if ($f != $c) { + $extra = sprintf("\\rof %s (%s)", + Unparse($c), + Percent($c, $local_total)); + } + my $style = ""; + if ($main::opt_heapcheck) { + if ($f > 0) { + # make leak-causing nodes more visible (add a background) + $style = ",style=filled,fillcolor=gray" + } elsif ($f < 0) { + # make anti-leak-causing nodes (which almost never occur) + # stand out as well (triple border) + $style = ",peripheries=3" + } + } + + printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" . + "\",shape=box,fontsize=%.1f%s];\n", + $node{$a}, + $sym, + Unparse($f), + Percent($f, $local_total), + $extra, + $fs, + $style, + ); + } + + # Get edges and counts per edge + my %edge = (); + my $n; + my $fullname_to_shortname_map = {}; + FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); + foreach my $k (keys(%{$raw})) { + # TODO: omit low %age edges + $n = $raw->{$k}; + my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); + for (my $i = 1; $i <= $#translated; $i++) { + my $src = $translated[$i]; + my $dst = $translated[$i-1]; + #next if ($src eq $dst); # Avoid self-edges? + if (exists($node{$src}) && exists($node{$dst})) { + my $edge_label = "$src\001$dst"; + if (!exists($edge{$edge_label})) { + $edge{$edge_label} = 0; + } + $edge{$edge_label} += $n; + } + } + } + + # Print edges (process in order of decreasing counts) + my %indegree = (); # Number of incoming edges added per node so far + my %outdegree = (); # Number of outgoing edges added per node so far + foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) { + my @x = split(/\001/, $e); + $n = $edge{$e}; + + # Initialize degree of kept incoming and outgoing edges if necessary + my $src = $x[0]; + my $dst = $x[1]; + if (!exists($outdegree{$src})) { $outdegree{$src} = 0; } + if (!exists($indegree{$dst})) { $indegree{$dst} = 0; } + + my $keep; + if ($indegree{$dst} == 0) { + # Keep edge if needed for reachability + $keep = 1; + } elsif (abs($n) <= $edgelimit) { + # Drop if we are below --edgefraction + $keep = 0; + } elsif ($outdegree{$src} >= $main::opt_maxdegree || + $indegree{$dst} >= $main::opt_maxdegree) { + # Keep limited number of in/out edges per node + $keep = 0; + } else { + $keep = 1; + } + + if ($keep) { + $outdegree{$src}++; + $indegree{$dst}++; + + # Compute line width based on edge count + my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0); + if ($fraction > 1) { $fraction = 1; } + my $w = $fraction * 2; + if ($w < 1 && ($main::opt_web || $main::opt_svg)) { + # SVG output treats line widths < 1 poorly. + $w = 1; + } + + # Dot sometimes segfaults if given edge weights that are too large, so + # we cap the weights at a large value + my $edgeweight = abs($n) ** 0.7; + if ($edgeweight > 100000) { $edgeweight = 100000; } + $edgeweight = int($edgeweight); + + my $style = sprintf("setlinewidth(%f)", $w); + if ($x[1] =~ m/\(inline\)/) { + $style .= ",dashed"; + } + + # Use a slightly squashed function of the edge count as the weight + printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n", + $node{$x[0]}, + $node{$x[1]}, + Unparse($n), + $edgeweight, + $style); + } + } + + print DOT ("}\n"); + close(DOT); + + if ($main::opt_web || $main::opt_svg) { + # Rewrite SVG to be more usable inside web browser. + RewriteSvg(TempName($main::next_tmpfile, "svg")); + } + + return 1; +} + +sub RewriteSvg { + my $svgfile = shift; + + open(SVG, $svgfile) || die "open temp svg: $!"; + my @svg = ; + close(SVG); + unlink $svgfile; + my $svg = join('', @svg); + + # Dot's SVG output is + # + # + # + # ... + # + # + # + # Change it to + # + # + # $svg_javascript + # + # + # ... + # + # + # + + # Fix width, height; drop viewBox. + $svg =~ s/(?s) above first + my $svg_javascript = SvgJavascript(); + my $viewport = "\n"; + $svg =~ s/ above . + $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/; + $svg =~ s/$svgfile") || die "open $svgfile: $!"; + print SVG $svg; + close(SVG); + } +} + +sub SvgJavascript { + return <<'EOF'; + +EOF +} + +# Provides a map from fullname to shortname for cases where the +# shortname is ambiguous. The symlist has both the fullname and +# shortname for all symbols, which is usually fine, but sometimes -- +# such as overloaded functions -- two different fullnames can map to +# the same shortname. In that case, we use the address of the +# function to disambiguate the two. This function fills in a map that +# maps fullnames to modified shortnames in such cases. If a fullname +# is not present in the map, the 'normal' shortname provided by the +# symlist is the appropriate one to use. +sub FillFullnameToShortnameMap { + my $symbols = shift; + my $fullname_to_shortname_map = shift; + my $shortnames_seen_once = {}; + my $shortnames_seen_more_than_once = {}; + + foreach my $symlist (values(%{$symbols})) { + # TODO(csilvers): deal with inlined symbols too. + my $shortname = $symlist->[0]; + my $fullname = $symlist->[2]; + if ($fullname !~ /<[0-9a-fA-F]+>$/) { # fullname doesn't end in an address + next; # the only collisions we care about are when addresses differ + } + if (defined($shortnames_seen_once->{$shortname}) && + $shortnames_seen_once->{$shortname} ne $fullname) { + $shortnames_seen_more_than_once->{$shortname} = 1; + } else { + $shortnames_seen_once->{$shortname} = $fullname; + } + } + + foreach my $symlist (values(%{$symbols})) { + my $shortname = $symlist->[0]; + my $fullname = $symlist->[2]; + # TODO(csilvers): take in a list of addresses we care about, and only + # store in the map if $symlist->[1] is in that list. Saves space. + next if defined($fullname_to_shortname_map->{$fullname}); + if (defined($shortnames_seen_more_than_once->{$shortname})) { + if ($fullname =~ /<0*([^>]*)>$/) { # fullname has address at end of it + $fullname_to_shortname_map->{$fullname} = "$shortname\@$1"; + } + } + } +} + +# Return a small number that identifies the argument. +# Multiple calls with the same argument will return the same number. +# Calls with different arguments will return different numbers. +sub ShortIdFor { + my $key = shift; + my $id = $main::uniqueid{$key}; + if (!defined($id)) { + $id = keys(%main::uniqueid) + 1; + $main::uniqueid{$key} = $id; + } + return $id; +} + +# Translate a stack of addresses into a stack of symbols +sub TranslateStack { + my $symbols = shift; + my $fullname_to_shortname_map = shift; + my $k = shift; + + my @addrs = split(/\n/, $k); + my @result = (); + for (my $i = 0; $i <= $#addrs; $i++) { + my $a = $addrs[$i]; + + # Skip large addresses since they sometimes show up as fake entries on RH9 + if (length($a) > 8 && $a gt "7fffffffffffffff") { + next; + } + + if ($main::opt_disasm || $main::opt_list) { + # We want just the address for the key + push(@result, $a); + next; + } + + my $symlist = $symbols->{$a}; + if (!defined($symlist)) { + $symlist = [$a, "", $a]; + } + + # We can have a sequence of symbols for a particular entry + # (more than one symbol in the case of inlining). Callers + # come before callees in symlist, so walk backwards since + # the translated stack should contain callees before callers. + for (my $j = $#{$symlist}; $j >= 2; $j -= 3) { + my $func = $symlist->[$j-2]; + my $fileline = $symlist->[$j-1]; + my $fullfunc = $symlist->[$j]; + if (defined($fullname_to_shortname_map->{$fullfunc})) { + $func = $fullname_to_shortname_map->{$fullfunc}; + } + if ($j > 2) { + $func = "$func (inline)"; + } + + # Do not merge nodes corresponding to Callback::Run since that + # causes confusing cycles in dot display. Instead, we synthesize + # a unique name for this frame per caller. + if ($func =~ m/Callback.*::Run$/) { + my $caller = ($i > 0) ? $addrs[$i-1] : 0; + $func = "Run#" . ShortIdFor($caller); + } + + if ($main::opt_addresses) { + push(@result, "$a $func $fileline"); + } elsif ($main::opt_lines) { + if ($func eq '??' && $fileline eq '??:0') { + push(@result, "$a"); + } else { + push(@result, "$func $fileline"); + } + } elsif ($main::opt_functions) { + if ($func eq '??') { + push(@result, "$a"); + } else { + push(@result, $func); + } + } elsif ($main::opt_files) { + if ($fileline eq '??:0' || $fileline eq '') { + push(@result, "$a"); + } else { + my $f = $fileline; + $f =~ s/:\d+$//; + push(@result, $f); + } + } else { + push(@result, $a); + last; # Do not print inlined info + } + } + } + + # print join(",", @addrs), " => ", join(",", @result), "\n"; + return @result; +} + +# Generate percent string for a number and a total +sub Percent { + my $num = shift; + my $tot = shift; + if ($tot != 0) { + return sprintf("%.1f%%", $num * 100.0 / $tot); + } else { + return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf"); + } +} + +# Generate pretty-printed form of number +sub Unparse { + my $num = shift; + if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { + if ($main::opt_inuse_objects || $main::opt_alloc_objects) { + return sprintf("%d", $num); + } else { + if ($main::opt_show_bytes) { + return sprintf("%d", $num); + } else { + return sprintf("%.1f", $num / 1048576.0); + } + } + } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { + return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds + } else { + return sprintf("%d", $num); + } +} + +# Alternate pretty-printed form: 0 maps to "." +sub UnparseAlt { + my $num = shift; + if ($num == 0) { + return "."; + } else { + return Unparse($num); + } +} + +# Alternate pretty-printed form: 0 maps to "" +sub HtmlPrintNumber { + my $num = shift; + if ($num == 0) { + return ""; + } else { + return Unparse($num); + } +} + +# Return output units +sub Units { + if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { + if ($main::opt_inuse_objects || $main::opt_alloc_objects) { + return "objects"; + } else { + if ($main::opt_show_bytes) { + return "B"; + } else { + return "MB"; + } + } + } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) { + return "seconds"; + } else { + return "samples"; + } +} + +##### Profile manipulation code ##### + +# Generate flattened profile: +# If count is charged to stack [a,b,c,d], in generated profile, +# it will be charged to [a] +sub FlatProfile { + my $profile = shift; + my $result = {}; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + if ($#addrs >= 0) { + AddEntry($result, $addrs[0], $count); + } + } + return $result; +} + +# Generate cumulative profile: +# If count is charged to stack [a,b,c,d], in generated profile, +# it will be charged to [a], [b], [c], [d] +sub CumulativeProfile { + my $profile = shift; + my $result = {}; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + foreach my $a (@addrs) { + AddEntry($result, $a, $count); + } + } + return $result; +} + +# If the second-youngest PC on the stack is always the same, returns +# that pc. Otherwise, returns undef. +sub IsSecondPcAlwaysTheSame { + my $profile = shift; + + my $second_pc = undef; + foreach my $k (keys(%{$profile})) { + my @addrs = split(/\n/, $k); + if ($#addrs < 1) { + return undef; + } + if (not defined $second_pc) { + $second_pc = $addrs[1]; + } else { + if ($second_pc ne $addrs[1]) { + return undef; + } + } + } + return $second_pc; +} + +sub ExtractSymbolLocation { + my $symbols = shift; + my $address = shift; + # 'addr2line' outputs "??:0" for unknown locations; we do the + # same to be consistent. + my $location = "??:0:unknown"; + if (exists $symbols->{$address}) { + my $file = $symbols->{$address}->[1]; + if ($file eq "?") { + $file = "??:0" + } + $location = $file . ":" . $symbols->{$address}->[0]; + } + return $location; +} + +# Extracts a graph of calls. +sub ExtractCalls { + my $symbols = shift; + my $profile = shift; + + my $calls = {}; + while( my ($stack_trace, $count) = each %$profile ) { + my @address = split(/\n/, $stack_trace); + my $destination = ExtractSymbolLocation($symbols, $address[0]); + AddEntry($calls, $destination, $count); + for (my $i = 1; $i <= $#address; $i++) { + my $source = ExtractSymbolLocation($symbols, $address[$i]); + my $call = "$source -> $destination"; + AddEntry($calls, $call, $count); + $destination = $source; + } + } + + return $calls; +} + +sub RemoveUninterestingFrames { + my $symbols = shift; + my $profile = shift; + + # List of function names to skip + my %skip = (); + my $skip_regexp = 'NOMATCH'; + if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { + foreach my $name ('calloc', + 'cfree', + 'malloc', + 'free', + 'memalign', + 'posix_memalign', + 'aligned_alloc', + 'pvalloc', + 'valloc', + 'realloc', + 'mallocx', # jemalloc + 'rallocx', # jemalloc + 'xallocx', # jemalloc + 'dallocx', # jemalloc + 'sdallocx', # jemalloc + 'tc_calloc', + 'tc_cfree', + 'tc_malloc', + 'tc_free', + 'tc_memalign', + 'tc_posix_memalign', + 'tc_pvalloc', + 'tc_valloc', + 'tc_realloc', + 'tc_new', + 'tc_delete', + 'tc_newarray', + 'tc_deletearray', + 'tc_new_nothrow', + 'tc_newarray_nothrow', + 'do_malloc', + '::do_malloc', # new name -- got moved to an unnamed ns + '::do_malloc_or_cpp_alloc', + 'DoSampledAllocation', + 'simple_alloc::allocate', + '__malloc_alloc_template::allocate', + '__builtin_delete', + '__builtin_new', + '__builtin_vec_delete', + '__builtin_vec_new', + 'operator new', + 'operator new[]', + # The entry to our memory-allocation routines on OS X + 'malloc_zone_malloc', + 'malloc_zone_calloc', + 'malloc_zone_valloc', + 'malloc_zone_realloc', + 'malloc_zone_memalign', + 'malloc_zone_free', + # These mark the beginning/end of our custom sections + '__start_google_malloc', + '__stop_google_malloc', + '__start_malloc_hook', + '__stop_malloc_hook') { + $skip{$name} = 1; + $skip{"_" . $name} = 1; # Mach (OS X) adds a _ prefix to everything + } + # TODO: Remove TCMalloc once everything has been + # moved into the tcmalloc:: namespace and we have flushed + # old code out of the system. + $skip_regexp = "TCMalloc|^tcmalloc::"; + } elsif ($main::profile_type eq 'contention') { + foreach my $vname ('base::RecordLockProfileData', + 'base::SubmitMutexProfileData', + 'base::SubmitSpinLockProfileData', + 'Mutex::Unlock', + 'Mutex::UnlockSlow', + 'Mutex::ReaderUnlock', + 'MutexLock::~MutexLock', + 'SpinLock::Unlock', + 'SpinLock::SlowUnlock', + 'SpinLockHolder::~SpinLockHolder') { + $skip{$vname} = 1; + } + } elsif ($main::profile_type eq 'cpu') { + # Drop signal handlers used for CPU profile collection + # TODO(dpeng): this should not be necessary; it's taken + # care of by the general 2nd-pc mechanism below. + foreach my $name ('ProfileData::Add', # historical + 'ProfileData::prof_handler', # historical + 'CpuProfiler::prof_handler', + '__FRAME_END__', + '__pthread_sighandler', + '__restore') { + $skip{$name} = 1; + } + } else { + # Nothing skipped for unknown types + } + + if ($main::profile_type eq 'cpu') { + # If all the second-youngest program counters are the same, + # this STRONGLY suggests that it is an artifact of measurement, + # i.e., stack frames pushed by the CPU profiler signal handler. + # Hence, we delete them. + # (The topmost PC is read from the signal structure, not from + # the stack, so it does not get involved.) + while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) { + my $result = {}; + my $func = ''; + if (exists($symbols->{$second_pc})) { + $second_pc = $symbols->{$second_pc}->[0]; + } + print STDERR "Removing $second_pc from all stack traces.\n"; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + splice @addrs, 1, 1; + my $reduced_path = join("\n", @addrs); + AddEntry($result, $reduced_path, $count); + } + $profile = $result; + } + } + + my $result = {}; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + my @path = (); + foreach my $a (@addrs) { + if (exists($symbols->{$a})) { + my $func = $symbols->{$a}->[0]; + if ($skip{$func} || ($func =~ m/$skip_regexp/)) { + # Throw away the portion of the backtrace seen so far, under the + # assumption that previous frames were for functions internal to the + # allocator. + @path = (); + next; + } + } + push(@path, $a); + } + my $reduced_path = join("\n", @path); + AddEntry($result, $reduced_path, $count); + } + return $result; +} + +# Reduce profile to granularity given by user +sub ReduceProfile { + my $symbols = shift; + my $profile = shift; + my $result = {}; + my $fullname_to_shortname_map = {}; + FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map); + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k); + my @path = (); + my %seen = (); + $seen{''} = 1; # So that empty keys are skipped + foreach my $e (@translated) { + # To avoid double-counting due to recursion, skip a stack-trace + # entry if it has already been seen + if (!$seen{$e}) { + $seen{$e} = 1; + push(@path, $e); + } + } + my $reduced_path = join("\n", @path); + AddEntry($result, $reduced_path, $count); + } + return $result; +} + +# Does the specified symbol array match the regexp? +sub SymbolMatches { + my $sym = shift; + my $re = shift; + if (defined($sym)) { + for (my $i = 0; $i < $#{$sym}; $i += 3) { + if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) { + return 1; + } + } + } + return 0; +} + +# Focus only on paths involving specified regexps +sub FocusProfile { + my $symbols = shift; + my $profile = shift; + my $focus = shift; + my $result = {}; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + foreach my $a (@addrs) { + # Reply if it matches either the address/shortname/fileline + if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) { + AddEntry($result, $k, $count); + last; + } + } + } + return $result; +} + +# Focus only on paths not involving specified regexps +sub IgnoreProfile { + my $symbols = shift; + my $profile = shift; + my $ignore = shift; + my $result = {}; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + my $matched = 0; + foreach my $a (@addrs) { + # Reply if it matches either the address/shortname/fileline + if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) { + $matched = 1; + last; + } + } + if (!$matched) { + AddEntry($result, $k, $count); + } + } + return $result; +} + +# Get total count in profile +sub TotalProfile { + my $profile = shift; + my $result = 0; + foreach my $k (keys(%{$profile})) { + $result += $profile->{$k}; + } + return $result; +} + +# Add A to B +sub AddProfile { + my $A = shift; + my $B = shift; + + my $R = {}; + # add all keys in A + foreach my $k (keys(%{$A})) { + my $v = $A->{$k}; + AddEntry($R, $k, $v); + } + # add all keys in B + foreach my $k (keys(%{$B})) { + my $v = $B->{$k}; + AddEntry($R, $k, $v); + } + return $R; +} + +# Merges symbol maps +sub MergeSymbols { + my $A = shift; + my $B = shift; + + my $R = {}; + foreach my $k (keys(%{$A})) { + $R->{$k} = $A->{$k}; + } + if (defined($B)) { + foreach my $k (keys(%{$B})) { + $R->{$k} = $B->{$k}; + } + } + return $R; +} + + +# Add A to B +sub AddPcs { + my $A = shift; + my $B = shift; + + my $R = {}; + # add all keys in A + foreach my $k (keys(%{$A})) { + $R->{$k} = 1 + } + # add all keys in B + foreach my $k (keys(%{$B})) { + $R->{$k} = 1 + } + return $R; +} + +# Subtract B from A +sub SubtractProfile { + my $A = shift; + my $B = shift; + + my $R = {}; + foreach my $k (keys(%{$A})) { + my $v = $A->{$k} - GetEntry($B, $k); + if ($v < 0 && $main::opt_drop_negative) { + $v = 0; + } + AddEntry($R, $k, $v); + } + if (!$main::opt_drop_negative) { + # Take care of when subtracted profile has more entries + foreach my $k (keys(%{$B})) { + if (!exists($A->{$k})) { + AddEntry($R, $k, 0 - $B->{$k}); + } + } + } + return $R; +} + +# Get entry from profile; zero if not present +sub GetEntry { + my $profile = shift; + my $k = shift; + if (exists($profile->{$k})) { + return $profile->{$k}; + } else { + return 0; + } +} + +# Add entry to specified profile +sub AddEntry { + my $profile = shift; + my $k = shift; + my $n = shift; + if (!exists($profile->{$k})) { + $profile->{$k} = 0; + } + $profile->{$k} += $n; +} + +# Add a stack of entries to specified profile, and add them to the $pcs +# list. +sub AddEntries { + my $profile = shift; + my $pcs = shift; + my $stack = shift; + my $count = shift; + my @k = (); + + foreach my $e (split(/\s+/, $stack)) { + my $pc = HexExtend($e); + $pcs->{$pc} = 1; + push @k, $pc; + } + AddEntry($profile, (join "\n", @k), $count); +} + +##### Code to profile a server dynamically ##### + +sub CheckSymbolPage { + my $url = SymbolPageURL(); + my $command = ShellEscape(@URL_FETCHER, $url); + open(SYMBOL, "$command |") or error($command); + my $line = ; + $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines + close(SYMBOL); + unless (defined($line)) { + error("$url doesn't exist\n"); + } + + if ($line =~ /^num_symbols:\s+(\d+)$/) { + if ($1 == 0) { + error("Stripped binary. No symbols available.\n"); + } + } else { + error("Failed to get the number of symbols from $url\n"); + } +} + +sub IsProfileURL { + my $profile_name = shift; + if (-f $profile_name) { + printf STDERR "Using local file $profile_name.\n"; + return 0; + } + return 1; +} + +sub ParseProfileURL { + my $profile_name = shift; + + if (!defined($profile_name) || $profile_name eq "") { + return (); + } + + # Split profile URL - matches all non-empty strings, so no test. + $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,; + + my $proto = $1 || "http://"; + my $hostport = $2; + my $prefix = $3; + my $profile = $4 || "/"; + + my $host = $hostport; + $host =~ s/:.*//; + + my $baseurl = "$proto$hostport$prefix"; + return ($host, $baseurl, $profile); +} + +# We fetch symbols from the first profile argument. +sub SymbolPageURL { + my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); + return "$baseURL$SYMBOL_PAGE"; +} + +sub FetchProgramName() { + my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]); + my $url = "$baseURL$PROGRAM_NAME_PAGE"; + my $command_line = ShellEscape(@URL_FETCHER, $url); + open(CMDLINE, "$command_line |") or error($command_line); + my $cmdline = ; + $cmdline =~ s/\r//g; # turn windows-looking lines into unix-looking lines + close(CMDLINE); + error("Failed to get program name from $url\n") unless defined($cmdline); + $cmdline =~ s/\x00.+//; # Remove argv[1] and latters. + $cmdline =~ s!\n!!g; # Remove LFs. + return $cmdline; +} + +# Gee, curl's -L (--location) option isn't reliable at least +# with its 7.12.3 version. Curl will forget to post data if +# there is a redirection. This function is a workaround for +# curl. Redirection happens on borg hosts. +sub ResolveRedirectionForCurl { + my $url = shift; + my $command_line = ShellEscape(@URL_FETCHER, "--head", $url); + open(CMDLINE, "$command_line |") or error($command_line); + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + if (/^Location: (.*)/) { + $url = $1; + } + } + close(CMDLINE); + return $url; +} + +# Add a timeout flat to URL_FETCHER. Returns a new list. +sub AddFetchTimeout { + my $timeout = shift; + my @fetcher = shift; + if (defined($timeout)) { + if (join(" ", @fetcher) =~ m/\bcurl -s/) { + push(@fetcher, "--max-time", sprintf("%d", $timeout)); + } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) { + push(@fetcher, sprintf("--deadline=%d", $timeout)); + } + } + return @fetcher; +} + +# Reads a symbol map from the file handle name given as $1, returning +# the resulting symbol map. Also processes variables relating to symbols. +# Currently, the only variable processed is 'binary=' which updates +# $main::prog to have the correct program name. +sub ReadSymbols { + my $in = shift; + my $map = {}; + while (<$in>) { + s/\r//g; # turn windows-looking lines into unix-looking lines + # Removes all the leading zeroes from the symbols, see comment below. + if (m/^0x0*([0-9a-f]+)\s+(.+)/) { + $map->{$1} = $2; + } elsif (m/^---/) { + last; + } elsif (m/^([a-z][^=]*)=(.*)$/ ) { + my ($variable, $value) = ($1, $2); + for ($variable, $value) { + s/^\s+//; + s/\s+$//; + } + if ($variable eq "binary") { + if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) { + printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n", + $main::prog, $value); + } + $main::prog = $value; + } else { + printf STDERR ("Ignoring unknown variable in symbols list: " . + "'%s' = '%s'\n", $variable, $value); + } + } + } + return $map; +} + +# Fetches and processes symbols to prepare them for use in the profile output +# code. If the optional 'symbol_map' arg is not given, fetches symbols from +# $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols +# are assumed to have already been fetched into 'symbol_map' and are simply +# extracted and processed. +sub FetchSymbols { + my $pcset = shift; + my $symbol_map = shift; + + my %seen = (); + my @pcs = grep { !$seen{$_}++ } keys(%$pcset); # uniq + + if (!defined($symbol_map)) { + my $post_data = join("+", sort((map {"0x" . "$_"} @pcs))); + + open(POSTFILE, ">$main::tmpfile_sym"); + print POSTFILE $post_data; + close(POSTFILE); + + my $url = SymbolPageURL(); + + my $command_line; + if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) { + $url = ResolveRedirectionForCurl($url); + $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym", + $url); + } else { + $command_line = (ShellEscape(@URL_FETCHER, "--post", $url) + . " < " . ShellEscape($main::tmpfile_sym)); + } + # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols. + my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"}); + open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line); + $symbol_map = ReadSymbols(*SYMBOL{IO}); + close(SYMBOL); + } + + my $symbols = {}; + foreach my $pc (@pcs) { + my $fullname; + # For 64 bits binaries, symbols are extracted with 8 leading zeroes. + # Then /symbol reads the long symbols in as uint64, and outputs + # the result with a "0x%08llx" format which get rid of the zeroes. + # By removing all the leading zeroes in both $pc and the symbols from + # /symbol, the symbols match and are retrievable from the map. + my $shortpc = $pc; + $shortpc =~ s/^0*//; + # Each line may have a list of names, which includes the function + # and also other functions it has inlined. They are separated (in + # PrintSymbolizedProfile), by --, which is illegal in function names. + my $fullnames; + if (defined($symbol_map->{$shortpc})) { + $fullnames = $symbol_map->{$shortpc}; + } else { + $fullnames = "0x" . $pc; # Just use addresses + } + my $sym = []; + $symbols->{$pc} = $sym; + foreach my $fullname (split("--", $fullnames)) { + my $name = ShortFunctionName($fullname); + push(@{$sym}, $name, "?", $fullname); + } + } + return $symbols; +} + +sub BaseName { + my $file_name = shift; + $file_name =~ s!^.*/!!; # Remove directory name + return $file_name; +} + +sub MakeProfileBaseName { + my ($binary_name, $profile_name) = @_; + my ($host, $baseURL, $path) = ParseProfileURL($profile_name); + my $binary_shortname = BaseName($binary_name); + return sprintf("%s.%s.%s", + $binary_shortname, $main::op_time, $host); +} + +sub FetchDynamicProfile { + my $binary_name = shift; + my $profile_name = shift; + my $fetch_name_only = shift; + my $encourage_patience = shift; + + if (!IsProfileURL($profile_name)) { + return $profile_name; + } else { + my ($host, $baseURL, $path) = ParseProfileURL($profile_name); + if ($path eq "" || $path eq "/") { + # Missing type specifier defaults to cpu-profile + $path = $PROFILE_PAGE; + } + + my $profile_file = MakeProfileBaseName($binary_name, $profile_name); + + my $url = "$baseURL$path"; + my $fetch_timeout = undef; + if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) { + if ($path =~ m/[?]/) { + $url .= "&"; + } else { + $url .= "?"; + } + $url .= sprintf("seconds=%d", $main::opt_seconds); + $fetch_timeout = $main::opt_seconds * 1.01 + 60; + } else { + # For non-CPU profiles, we add a type-extension to + # the target profile file name. + my $suffix = $path; + $suffix =~ s,/,.,g; + $profile_file .= $suffix; + } + + my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof"); + if (! -d $profile_dir) { + mkdir($profile_dir) + || die("Unable to create profile directory $profile_dir: $!\n"); + } + my $tmp_profile = "$profile_dir/.tmp.$profile_file"; + my $real_profile = "$profile_dir/$profile_file"; + + if ($fetch_name_only > 0) { + return $real_profile; + } + + my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER); + my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile); + if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){ + print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n ${real_profile}\n"; + if ($encourage_patience) { + print STDERR "Be patient...\n"; + } + } else { + print STDERR "Fetching $path profile from $url to\n ${real_profile}\n"; + } + + (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n"); + (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n"); + print STDERR "Wrote profile to $real_profile\n"; + $main::collected_profile = $real_profile; + return $main::collected_profile; + } +} + +# Collect profiles in parallel +sub FetchDynamicProfiles { + my $items = scalar(@main::pfile_args); + my $levels = log($items) / log(2); + + if ($items == 1) { + $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1); + } else { + # math rounding issues + if ((2 ** $levels) < $items) { + $levels++; + } + my $count = scalar(@main::pfile_args); + for (my $i = 0; $i < $count; $i++) { + $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0); + } + print STDERR "Fetching $count profiles, Be patient...\n"; + FetchDynamicProfilesRecurse($levels, 0, 0); + $main::collected_profile = join(" \\\n ", @main::profile_files); + } +} + +# Recursively fork a process to get enough processes +# collecting profiles +sub FetchDynamicProfilesRecurse { + my $maxlevel = shift; + my $level = shift; + my $position = shift; + + if (my $pid = fork()) { + $position = 0 | ($position << 1); + TryCollectProfile($maxlevel, $level, $position); + wait; + } else { + $position = 1 | ($position << 1); + TryCollectProfile($maxlevel, $level, $position); + cleanup(); + exit(0); + } +} + +# Collect a single profile +sub TryCollectProfile { + my $maxlevel = shift; + my $level = shift; + my $position = shift; + + if ($level >= ($maxlevel - 1)) { + if ($position < scalar(@main::pfile_args)) { + FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0); + } + } else { + FetchDynamicProfilesRecurse($maxlevel, $level+1, $position); + } +} + +##### Parsing code ##### + +# Provide a small streaming-read module to handle very large +# cpu-profile files. Stream in chunks along a sliding window. +# Provides an interface to get one 'slot', correctly handling +# endian-ness differences. A slot is one 32-bit or 64-bit word +# (depending on the input profile). We tell endianness and bit-size +# for the profile by looking at the first 8 bytes: in cpu profiles, +# the second slot is always 3 (we'll accept anything that's not 0). +BEGIN { + package CpuProfileStream; + + sub new { + my ($class, $file, $fname) = @_; + my $self = { file => $file, + base => 0, + stride => 512 * 1024, # must be a multiple of bitsize/8 + slots => [], + unpack_code => "", # N for big-endian, V for little + perl_is_64bit => 1, # matters if profile is 64-bit + }; + bless $self, $class; + # Let unittests adjust the stride + if ($main::opt_test_stride > 0) { + $self->{stride} = $main::opt_test_stride; + } + # Read the first two slots to figure out bitsize and endianness. + my $slots = $self->{slots}; + my $str; + read($self->{file}, $str, 8); + # Set the global $address_length based on what we see here. + # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars). + $address_length = ($str eq (chr(0)x8)) ? 16 : 8; + if ($address_length == 8) { + if (substr($str, 6, 2) eq chr(0)x2) { + $self->{unpack_code} = 'V'; # Little-endian. + } elsif (substr($str, 4, 2) eq chr(0)x2) { + $self->{unpack_code} = 'N'; # Big-endian + } else { + ::error("$fname: header size >= 2**16\n"); + } + @$slots = unpack($self->{unpack_code} . "*", $str); + } else { + # If we're a 64-bit profile, check if we're a 64-bit-capable + # perl. Otherwise, each slot will be represented as a float + # instead of an int64, losing precision and making all the + # 64-bit addresses wrong. We won't complain yet, but will + # later if we ever see a value that doesn't fit in 32 bits. + my $has_q = 0; + eval { $has_q = pack("Q", "1") ? 1 : 1; }; + if (!$has_q) { + $self->{perl_is_64bit} = 0; + } + read($self->{file}, $str, 8); + if (substr($str, 4, 4) eq chr(0)x4) { + # We'd love to use 'Q', but it's a) not universal, b) not endian-proof. + $self->{unpack_code} = 'V'; # Little-endian. + } elsif (substr($str, 0, 4) eq chr(0)x4) { + $self->{unpack_code} = 'N'; # Big-endian + } else { + ::error("$fname: header size >= 2**32\n"); + } + my @pair = unpack($self->{unpack_code} . "*", $str); + # Since we know one of the pair is 0, it's fine to just add them. + @$slots = (0, $pair[0] + $pair[1]); + } + return $self; + } + + # Load more data when we access slots->get(X) which is not yet in memory. + sub overflow { + my ($self) = @_; + my $slots = $self->{slots}; + $self->{base} += $#$slots + 1; # skip over data we're replacing + my $str; + read($self->{file}, $str, $self->{stride}); + if ($address_length == 8) { # the 32-bit case + # This is the easy case: unpack provides 32-bit unpacking primitives. + @$slots = unpack($self->{unpack_code} . "*", $str); + } else { + # We need to unpack 32 bits at a time and combine. + my @b32_values = unpack($self->{unpack_code} . "*", $str); + my @b64_values = (); + for (my $i = 0; $i < $#b32_values; $i += 2) { + # TODO(csilvers): if this is a 32-bit perl, the math below + # could end up in a too-large int, which perl will promote + # to a double, losing necessary precision. Deal with that. + # Right now, we just die. + my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]); + if ($self->{unpack_code} eq 'N') { # big-endian + ($lo, $hi) = ($hi, $lo); + } + my $value = $lo + $hi * (2**32); + if (!$self->{perl_is_64bit} && # check value is exactly represented + (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) { + ::error("Need a 64-bit perl to process this 64-bit profile.\n"); + } + push(@b64_values, $value); + } + @$slots = @b64_values; + } + } + + # Access the i-th long in the file (logically), or -1 at EOF. + sub get { + my ($self, $idx) = @_; + my $slots = $self->{slots}; + while ($#$slots >= 0) { + if ($idx < $self->{base}) { + # The only time we expect a reference to $slots[$i - something] + # after referencing $slots[$i] is reading the very first header. + # Since $stride > |header|, that shouldn't cause any lookback + # errors. And everything after the header is sequential. + print STDERR "Unexpected look-back reading CPU profile"; + return -1; # shrug, don't know what better to return + } elsif ($idx > $self->{base} + $#$slots) { + $self->overflow(); + } else { + return $slots->[$idx - $self->{base}]; + } + } + # If we get here, $slots is [], which means we've reached EOF + return -1; # unique since slots is supposed to hold unsigned numbers + } +} + +# Reads the top, 'header' section of a profile, and returns the last +# line of the header, commonly called a 'header line'. The header +# section of a profile consists of zero or more 'command' lines that +# are instructions to jeprof, which jeprof executes when reading the +# header. All 'command' lines start with a %. After the command +# lines is the 'header line', which is a profile-specific line that +# indicates what type of profile it is, and perhaps other global +# information about the profile. For instance, here's a header line +# for a heap profile: +# heap profile: 53: 38236 [ 5525: 1284029] @ heapprofile +# For historical reasons, the CPU profile does not contain a text- +# readable header line. If the profile looks like a CPU profile, +# this function returns "". If no header line could be found, this +# function returns undef. +# +# The following commands are recognized: +# %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:' +# +# The input file should be in binmode. +sub ReadProfileHeader { + local *PROFILE = shift; + my $firstchar = ""; + my $line = ""; + read(PROFILE, $firstchar, 1); + seek(PROFILE, -1, 1); # unread the firstchar + if ($firstchar !~ /[[:print:]]/) { # is not a text character + return ""; + } + while (defined($line = )) { + $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines + if ($line =~ /^%warn\s+(.*)/) { # 'warn' command + # Note this matches both '%warn blah\n' and '%warn\n'. + print STDERR "WARNING: $1\n"; # print the rest of the line + } elsif ($line =~ /^%/) { + print STDERR "Ignoring unknown command from profile header: $line"; + } else { + # End of commands, must be the header line. + return $line; + } + } + return undef; # got to EOF without seeing a header line +} + +sub IsSymbolizedProfileFile { + my $file_name = shift; + if (!(-e $file_name) || !(-r $file_name)) { + return 0; + } + # Check if the file contains a symbol-section marker. + open(TFILE, "<$file_name"); + binmode TFILE; + my $firstline = ReadProfileHeader(*TFILE); + close(TFILE); + if (!$firstline) { + return 0; + } + $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $symbol_marker = $&; + return $firstline =~ /^--- *$symbol_marker/; +} + +# Parse profile generated by common/profiler.cc and return a reference +# to a map: +# $result->{version} Version number of profile file +# $result->{period} Sampling period (in microseconds) +# $result->{profile} Profile object +# $result->{threads} Map of thread IDs to profile objects +# $result->{map} Memory map info from profile +# $result->{pcs} Hash of all PC values seen, key is hex address +sub ReadProfile { + my $prog = shift; + my $fname = shift; + my $result; # return value + + $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $contention_marker = $&; + $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $growth_marker = $&; + $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $symbol_marker = $&; + $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $profile_marker = $&; + + # Look at first line to see if it is a heap or a CPU profile. + # CPU profile may start with no header at all, and just binary data + # (starting with \0\0\0\0) -- in that case, don't try to read the + # whole firstline, since it may be gigabytes(!) of data. + open(PROFILE, "<$fname") || error("$fname: $!\n"); + binmode PROFILE; # New perls do UTF-8 processing + my $header = ReadProfileHeader(*PROFILE); + if (!defined($header)) { # means "at EOF" + error("Profile is empty.\n"); + } + + my $symbols; + if ($header =~ m/^--- *$symbol_marker/o) { + # Verify that the user asked for a symbolized profile + if (!$main::use_symbolized_profile) { + # we have both a binary and symbolized profiles, abort + error("FATAL ERROR: Symbolized profile\n $fname\ncannot be used with " . + "a binary arg. Try again without passing\n $prog\n"); + } + # Read the symbol section of the symbolized profile file. + $symbols = ReadSymbols(*PROFILE{IO}); + # Read the next line to get the header for the remaining profile. + $header = ReadProfileHeader(*PROFILE) || ""; + } + + $main::profile_type = ''; + if ($header =~ m/^heap profile:.*$growth_marker/o) { + $main::profile_type = 'growth'; + $result = ReadHeapProfile($prog, *PROFILE, $header); + } elsif ($header =~ m/^heap profile:/) { + $main::profile_type = 'heap'; + $result = ReadHeapProfile($prog, *PROFILE, $header); + } elsif ($header =~ m/^heap/) { + $main::profile_type = 'heap'; + $result = ReadThreadedHeapProfile($prog, $fname, $header); + } elsif ($header =~ m/^--- *$contention_marker/o) { + $main::profile_type = 'contention'; + $result = ReadSynchProfile($prog, *PROFILE); + } elsif ($header =~ m/^--- *Stacks:/) { + print STDERR + "Old format contention profile: mistakenly reports " . + "condition variable signals as lock contentions.\n"; + $main::profile_type = 'contention'; + $result = ReadSynchProfile($prog, *PROFILE); + } elsif ($header =~ m/^--- *$profile_marker/) { + # the binary cpu profile data starts immediately after this line + $main::profile_type = 'cpu'; + $result = ReadCPUProfile($prog, $fname, *PROFILE); + } else { + if (defined($symbols)) { + # a symbolized profile contains a format we don't recognize, bail out + error("$fname: Cannot recognize profile section after symbols.\n"); + } + # no ascii header present -- must be a CPU profile + $main::profile_type = 'cpu'; + $result = ReadCPUProfile($prog, $fname, *PROFILE); + } + + close(PROFILE); + + # if we got symbols along with the profile, return those as well + if (defined($symbols)) { + $result->{symbols} = $symbols; + } + + return $result; +} + +# Subtract one from caller pc so we map back to call instr. +# However, don't do this if we're reading a symbolized profile +# file, in which case the subtract-one was done when the file +# was written. +# +# We apply the same logic to all readers, though ReadCPUProfile uses an +# independent implementation. +sub FixCallerAddresses { + my $stack = shift; + if ($main::use_symbolized_profile) { + return $stack; + } else { + $stack =~ /(\s)/; + my $delimiter = $1; + my @addrs = split(' ', $stack); + my @fixedaddrs; + $#fixedaddrs = $#addrs; + if ($#addrs >= 0) { + $fixedaddrs[0] = $addrs[0]; + } + for (my $i = 1; $i <= $#addrs; $i++) { + $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1"); + } + return join $delimiter, @fixedaddrs; + } +} + +# CPU profile reader +sub ReadCPUProfile { + my $prog = shift; + my $fname = shift; # just used for logging + local *PROFILE = shift; + my $version; + my $period; + my $i; + my $profile = {}; + my $pcs = {}; + + # Parse string into array of slots. + my $slots = CpuProfileStream->new(*PROFILE, $fname); + + # Read header. The current header version is a 5-element structure + # containing: + # 0: header count (always 0) + # 1: header "words" (after this one: 3) + # 2: format version (0) + # 3: sampling period (usec) + # 4: unused padding (always 0) + if ($slots->get(0) != 0 ) { + error("$fname: not a profile file, or old format profile file\n"); + } + $i = 2 + $slots->get(1); + $version = $slots->get(2); + $period = $slots->get(3); + # Do some sanity checking on these header values. + if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) { + error("$fname: not a profile file, or corrupted profile file\n"); + } + + # Parse profile + while ($slots->get($i) != -1) { + my $n = $slots->get($i++); + my $d = $slots->get($i++); + if ($d > (2**16)) { # TODO(csilvers): what's a reasonable max-stack-depth? + my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8)); + print STDERR "At index $i (address $addr):\n"; + error("$fname: stack trace depth >= 2**32\n"); + } + if ($slots->get($i) == 0) { + # End of profile data marker + $i += $d; + last; + } + + # Make key out of the stack entries + my @k = (); + for (my $j = 0; $j < $d; $j++) { + my $pc = $slots->get($i+$j); + # Subtract one from caller pc so we map back to call instr. + # However, don't do this if we're reading a symbolized profile + # file, in which case the subtract-one was done when the file + # was written. + if ($j > 0 && !$main::use_symbolized_profile) { + $pc--; + } + $pc = sprintf("%0*x", $address_length, $pc); + $pcs->{$pc} = 1; + push @k, $pc; + } + + AddEntry($profile, (join "\n", @k), $n); + $i += $d; + } + + # Parse map + my $map = ''; + seek(PROFILE, $i * 4, 0); + read(PROFILE, $map, (stat PROFILE)[7]); + + my $r = {}; + $r->{version} = $version; + $r->{period} = $period; + $r->{profile} = $profile; + $r->{libs} = ParseLibraries($prog, $map, $pcs); + $r->{pcs} = $pcs; + + return $r; +} + +sub HeapProfileIndex { + my $index = 1; + if ($main::opt_inuse_space) { + $index = 1; + } elsif ($main::opt_inuse_objects) { + $index = 0; + } elsif ($main::opt_alloc_space) { + $index = 3; + } elsif ($main::opt_alloc_objects) { + $index = 2; + } + return $index; +} + +sub ReadMappedLibraries { + my $fh = shift; + my $map = ""; + # Read the /proc/self/maps data + while (<$fh>) { + s/\r//g; # turn windows-looking lines into unix-looking lines + $map .= $_; + } + return $map; +} + +sub ReadMemoryMap { + my $fh = shift; + my $map = ""; + # Read /proc/self/maps data as formatted by DumpAddressMap() + my $buildvar = ""; + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + # Parse "build=" specification if supplied + if (m/^\s*build=(.*)\n/) { + $buildvar = $1; + } + + # Expand "$build" variable if available + $_ =~ s/\$build\b/$buildvar/g; + + $map .= $_; + } + return $map; +} + +sub AdjustSamples { + my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_; + if ($sample_adjustment) { + if ($sampling_algorithm == 2) { + # Remote-heap version 2 + # The sampling frequency is the rate of a Poisson process. + # This means that the probability of sampling an allocation of + # size X with sampling rate Y is 1 - exp(-X/Y) + if ($n1 != 0) { + my $ratio = (($s1*1.0)/$n1)/($sample_adjustment); + my $scale_factor = 1/(1 - exp(-$ratio)); + $n1 *= $scale_factor; + $s1 *= $scale_factor; + } + if ($n2 != 0) { + my $ratio = (($s2*1.0)/$n2)/($sample_adjustment); + my $scale_factor = 1/(1 - exp(-$ratio)); + $n2 *= $scale_factor; + $s2 *= $scale_factor; + } + } else { + # Remote-heap version 1 + my $ratio; + $ratio = (($s1*1.0)/$n1)/($sample_adjustment); + if ($ratio < 1) { + $n1 /= $ratio; + $s1 /= $ratio; + } + $ratio = (($s2*1.0)/$n2)/($sample_adjustment); + if ($ratio < 1) { + $n2 /= $ratio; + $s2 /= $ratio; + } + } + } + return ($n1, $s1, $n2, $s2); +} + +sub ReadHeapProfile { + my $prog = shift; + local *PROFILE = shift; + my $header = shift; + + my $index = HeapProfileIndex(); + + # Find the type of this profile. The header line looks like: + # heap profile: 1246: 8800744 [ 1246: 8800744] @ /266053 + # There are two pairs , the first inuse objects/space, and the + # second allocated objects/space. This is followed optionally by a profile + # type, and if that is present, optionally by a sampling frequency. + # For remote heap profiles (v1): + # The interpretation of the sampling frequency is that the profiler, for + # each sample, calculates a uniformly distributed random integer less than + # the given value, and records the next sample after that many bytes have + # been allocated. Therefore, the expected sample interval is half of the + # given frequency. By default, if not specified, the expected sample + # interval is 128KB. Only remote-heap-page profiles are adjusted for + # sample size. + # For remote heap profiles (v2): + # The sampling frequency is the rate of a Poisson process. This means that + # the probability of sampling an allocation of size X with sampling rate Y + # is 1 - exp(-X/Y) + # For version 2, a typical header line might look like this: + # heap profile: 1922: 127792360 [ 1922: 127792360] @ _v2/524288 + # the trailing number (524288) is the sampling rate. (Version 1 showed + # double the 'rate' here) + my $sampling_algorithm = 0; + my $sample_adjustment = 0; + chomp($header); + my $type = "unknown"; + if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") { + if (defined($6) && ($6 ne '')) { + $type = $6; + my $sample_period = $8; + # $type is "heapprofile" for profiles generated by the + # heap-profiler, and either "heap" or "heap_v2" for profiles + # generated by sampling directly within tcmalloc. It can also + # be "growth" for heap-growth profiles. The first is typically + # found for profiles generated locally, and the others for + # remote profiles. + if (($type eq "heapprofile") || ($type !~ /heap/) ) { + # No need to adjust for the sampling rate with heap-profiler-derived data + $sampling_algorithm = 0; + } elsif ($type =~ /_v2/) { + $sampling_algorithm = 2; # version 2 sampling + if (defined($sample_period) && ($sample_period ne '')) { + $sample_adjustment = int($sample_period); + } + } else { + $sampling_algorithm = 1; # version 1 sampling + if (defined($sample_period) && ($sample_period ne '')) { + $sample_adjustment = int($sample_period)/2; + } + } + } else { + # We detect whether or not this is a remote-heap profile by checking + # that the total-allocated stats ($n2,$s2) are exactly the + # same as the in-use stats ($n1,$s1). It is remotely conceivable + # that a non-remote-heap profile may pass this check, but it is hard + # to imagine how that could happen. + # In this case it's so old it's guaranteed to be remote-heap version 1. + my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); + if (($n1 == $n2) && ($s1 == $s2)) { + # This is likely to be a remote-heap based sample profile + $sampling_algorithm = 1; + } + } + } + + if ($sampling_algorithm > 0) { + # For remote-heap generated profiles, adjust the counts and sizes to + # account for the sample rate (we sample once every 128KB by default). + if ($sample_adjustment == 0) { + # Turn on profile adjustment. + $sample_adjustment = 128*1024; + print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n"; + } else { + printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n", + $sample_adjustment); + } + if ($sampling_algorithm > 1) { + # We don't bother printing anything for the original version (version 1) + printf STDERR "Heap version $sampling_algorithm\n"; + } + } + + my $profile = {}; + my $pcs = {}; + my $map = ""; + + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + if (/^MAPPED_LIBRARIES:/) { + $map .= ReadMappedLibraries(*PROFILE); + last; + } + + if (/^--- Memory map:/) { + $map .= ReadMemoryMap(*PROFILE); + last; + } + + # Read entry of the form: + # : [: ] @ a1 a2 a3 ... an + s/^\s*//; + s/\s*$//; + if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) { + my $stack = $5; + my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4); + my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm, + $n1, $s1, $n2, $s2); + AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); + } + } + + my $r = {}; + $r->{version} = "heap"; + $r->{period} = 1; + $r->{profile} = $profile; + $r->{libs} = ParseLibraries($prog, $map, $pcs); + $r->{pcs} = $pcs; + return $r; +} + +sub ReadThreadedHeapProfile { + my ($prog, $fname, $header) = @_; + + my $index = HeapProfileIndex(); + my $sampling_algorithm = 0; + my $sample_adjustment = 0; + chomp($header); + my $type = "unknown"; + # Assuming a very specific type of header for now. + if ($header =~ m"^heap_v2/(\d+)") { + $type = "_v2"; + $sampling_algorithm = 2; + $sample_adjustment = int($1); + } + if ($type ne "_v2" || !defined($sample_adjustment)) { + die "Threaded heap profiles require v2 sampling with a sample rate\n"; + } + + my $profile = {}; + my $thread_profiles = {}; + my $pcs = {}; + my $map = ""; + my $stack = ""; + + while () { + s/\r//g; + if (/^MAPPED_LIBRARIES:/) { + $map .= ReadMappedLibraries(*PROFILE); + last; + } + + if (/^--- Memory map:/) { + $map .= ReadMemoryMap(*PROFILE); + last; + } + + # Read entry of the form: + # @ a1 a2 ... an + # t*: : [: ] + # t1: : [: ] + # ... + # tn: : [: ] + s/^\s*//; + s/\s*$//; + if (m/^@\s+(.*)$/) { + $stack = $1; + } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) { + if ($stack eq "") { + # Still in the header, so this is just a per-thread summary. + next; + } + my $thread = $2; + my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6); + my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm, + $n1, $s1, $n2, $s2); + if ($thread eq "*") { + AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]); + } else { + if (!exists($thread_profiles->{$thread})) { + $thread_profiles->{$thread} = {}; + } + AddEntries($thread_profiles->{$thread}, $pcs, + FixCallerAddresses($stack), $counts[$index]); + } + } + } + + my $r = {}; + $r->{version} = "heap"; + $r->{period} = 1; + $r->{profile} = $profile; + $r->{threads} = $thread_profiles; + $r->{libs} = ParseLibraries($prog, $map, $pcs); + $r->{pcs} = $pcs; + return $r; +} + +sub ReadSynchProfile { + my $prog = shift; + local *PROFILE = shift; + my $header = shift; + + my $map = ''; + my $profile = {}; + my $pcs = {}; + my $sampling_period = 1; + my $cyclespernanosec = 2.8; # Default assumption for old binaries + my $seen_clockrate = 0; + my $line; + + my $index = 0; + if ($main::opt_total_delay) { + $index = 0; + } elsif ($main::opt_contentions) { + $index = 1; + } elsif ($main::opt_mean_delay) { + $index = 2; + } + + while ( $line = ) { + $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines + if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) { + my ($cycles, $count, $stack) = ($1, $2, $3); + + # Convert cycles to nanoseconds + $cycles /= $cyclespernanosec; + + # Adjust for sampling done by application + $cycles *= $sampling_period; + $count *= $sampling_period; + + my @values = ($cycles, $count, $cycles / $count); + AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]); + + } elsif ( $line =~ /^(slow release).*thread \d+ \@\s*(.*?)\s*$/ || + $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) { + my ($cycles, $stack) = ($1, $2); + if ($cycles !~ /^\d+$/) { + next; + } + + # Convert cycles to nanoseconds + $cycles /= $cyclespernanosec; + + # Adjust for sampling done by application + $cycles *= $sampling_period; + + AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles); + + } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) { + my ($variable, $value) = ($1,$2); + for ($variable, $value) { + s/^\s+//; + s/\s+$//; + } + if ($variable eq "cycles/second") { + $cyclespernanosec = $value / 1e9; + $seen_clockrate = 1; + } elsif ($variable eq "sampling period") { + $sampling_period = $value; + } elsif ($variable eq "ms since reset") { + # Currently nothing is done with this value in jeprof + # So we just silently ignore it for now + } elsif ($variable eq "discarded samples") { + # Currently nothing is done with this value in jeprof + # So we just silently ignore it for now + } else { + printf STDERR ("Ignoring unnknown variable in /contention output: " . + "'%s' = '%s'\n",$variable,$value); + } + } else { + # Memory map entry + $map .= $line; + } + } + + if (!$seen_clockrate) { + printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n", + $cyclespernanosec); + } + + my $r = {}; + $r->{version} = 0; + $r->{period} = $sampling_period; + $r->{profile} = $profile; + $r->{libs} = ParseLibraries($prog, $map, $pcs); + $r->{pcs} = $pcs; + return $r; +} + +# Given a hex value in the form "0x1abcd" or "1abcd", return either +# "0001abcd" or "000000000001abcd", depending on the current (global) +# address length. +sub HexExtend { + my $addr = shift; + + $addr =~ s/^(0x)?0*//; + my $zeros_needed = $address_length - length($addr); + if ($zeros_needed < 0) { + printf STDERR "Warning: address $addr is longer than address length $address_length\n"; + return $addr; + } + return ("0" x $zeros_needed) . $addr; +} + +##### Symbol extraction ##### + +# Aggressively search the lib_prefix values for the given library +# If all else fails, just return the name of the library unmodified. +# If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so" +# it will search the following locations in this order, until it finds a file: +# /my/path/lib/dir/mylib.so +# /other/path/lib/dir/mylib.so +# /my/path/dir/mylib.so +# /other/path/dir/mylib.so +# /my/path/mylib.so +# /other/path/mylib.so +# /lib/dir/mylib.so (returned as last resort) +sub FindLibrary { + my $file = shift; + my $suffix = $file; + + # Search for the library as described above + do { + foreach my $prefix (@prefix_list) { + my $fullpath = $prefix . $suffix; + if (-e $fullpath) { + return $fullpath; + } + } + } while ($suffix =~ s|^/[^/]+/|/|); + return $file; +} + +# Return path to library with debugging symbols. +# For libc libraries, the copy in /usr/lib/debug contains debugging symbols +sub DebuggingLibrary { + my $file = shift; + if ($file =~ m|^/|) { + if (-f "/usr/lib/debug$file") { + return "/usr/lib/debug$file"; + } elsif (-f "/usr/lib/debug$file.debug") { + return "/usr/lib/debug$file.debug"; + } + } + return undef; +} + +# Parse text section header of a library using objdump +sub ParseTextSectionHeaderFromObjdump { + my $lib = shift; + + my $size = undef; + my $vma; + my $file_offset; + # Get objdump output from the library file to figure out how to + # map between mapped addresses and addresses in the library. + my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib); + open(OBJDUMP, "$cmd |") || error("$cmd: $!\n"); + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + # Idx Name Size VMA LMA File off Algn + # 10 .text 00104b2c 420156f0 420156f0 000156f0 2**4 + # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file + # offset may still be 8. But AddressSub below will still handle that. + my @x = split; + if (($#x >= 6) && ($x[1] eq '.text')) { + $size = $x[2]; + $vma = $x[3]; + $file_offset = $x[5]; + last; + } + } + close(OBJDUMP); + + if (!defined($size)) { + return undef; + } + + my $r = {}; + $r->{size} = $size; + $r->{vma} = $vma; + $r->{file_offset} = $file_offset; + + return $r; +} + +# Parse text section header of a library using otool (on OS X) +sub ParseTextSectionHeaderFromOtool { + my $lib = shift; + + my $size = undef; + my $vma = undef; + my $file_offset = undef; + # Get otool output from the library file to figure out how to + # map between mapped addresses and addresses in the library. + my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib); + open(OTOOL, "$command |") || error("$command: $!\n"); + my $cmd = ""; + my $sectname = ""; + my $segname = ""; + foreach my $line () { + $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines + # Load command <#> + # cmd LC_SEGMENT + # [...] + # Section + # sectname __text + # segname __TEXT + # addr 0x000009f8 + # size 0x00018b9e + # offset 2552 + # align 2^2 (4) + # We will need to strip off the leading 0x from the hex addresses, + # and convert the offset into hex. + if ($line =~ /Load command/) { + $cmd = ""; + $sectname = ""; + $segname = ""; + } elsif ($line =~ /Section/) { + $sectname = ""; + $segname = ""; + } elsif ($line =~ /cmd (\w+)/) { + $cmd = $1; + } elsif ($line =~ /sectname (\w+)/) { + $sectname = $1; + } elsif ($line =~ /segname (\w+)/) { + $segname = $1; + } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") && + $sectname eq "__text" && + $segname eq "__TEXT")) { + next; + } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) { + $vma = $1; + } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) { + $size = $1; + } elsif ($line =~ /\boffset ([0-9]+)/) { + $file_offset = sprintf("%016x", $1); + } + if (defined($vma) && defined($size) && defined($file_offset)) { + last; + } + } + close(OTOOL); + + if (!defined($vma) || !defined($size) || !defined($file_offset)) { + return undef; + } + + my $r = {}; + $r->{size} = $size; + $r->{vma} = $vma; + $r->{file_offset} = $file_offset; + + return $r; +} + +sub ParseTextSectionHeader { + # obj_tool_map("otool") is only defined if we're in a Mach-O environment + if (defined($obj_tool_map{"otool"})) { + my $r = ParseTextSectionHeaderFromOtool(@_); + if (defined($r)){ + return $r; + } + } + # If otool doesn't work, or we don't have it, fall back to objdump + return ParseTextSectionHeaderFromObjdump(@_); +} + +# Split /proc/pid/maps dump into a list of libraries +sub ParseLibraries { + return if $main::use_symbol_page; # We don't need libraries info. + my $prog = shift; + my $map = shift; + my $pcs = shift; + + my $result = []; + my $h = "[a-f0-9]+"; + my $zero_offset = HexExtend("0"); + + my $buildvar = ""; + foreach my $l (split("\n", $map)) { + if ($l =~ m/^\s*build=(.*)$/) { + $buildvar = $1; + } + + my $start; + my $finish; + my $offset; + my $lib; + if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) { + # Full line from /proc/self/maps. Example: + # 40000000-40015000 r-xp 00000000 03:01 12845071 /lib/ld-2.3.2.so + $start = HexExtend($1); + $finish = HexExtend($2); + $offset = HexExtend($3); + $lib = $4; + $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths + } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) { + # Cooked line from DumpAddressMap. Example: + # 40000000-40015000: /lib/ld-2.3.2.so + $start = HexExtend($1); + $finish = HexExtend($2); + $offset = $zero_offset; + $lib = $3; + } + # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in + # function procfs_doprocmap (sys/fs/procfs/procfs_map.c) + # + # Example: + # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s + # o.1 NCH -1 + elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) { + $start = HexExtend($1); + $finish = HexExtend($2); + $offset = $zero_offset; + $lib = FindLibrary($5); + + } else { + next; + } + + # Expand "$build" variable if available + $lib =~ s/\$build\b/$buildvar/g; + + $lib = FindLibrary($lib); + + # Check for pre-relocated libraries, which use pre-relocated symbol tables + # and thus require adjusting the offset that we'll use to translate + # VM addresses into symbol table addresses. + # Only do this if we're not going to fetch the symbol table from a + # debugging copy of the library. + if (!DebuggingLibrary($lib)) { + my $text = ParseTextSectionHeader($lib); + if (defined($text)) { + my $vma_offset = AddressSub($text->{vma}, $text->{file_offset}); + $offset = AddressAdd($offset, $vma_offset); + } + } + + if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; } + push(@{$result}, [$lib, $start, $finish, $offset]); + } + + # Append special entry for additional library (not relocated) + if ($main::opt_lib ne "") { + my $text = ParseTextSectionHeader($main::opt_lib); + if (defined($text)) { + my $start = $text->{vma}; + my $finish = AddressAdd($start, $text->{size}); + + push(@{$result}, [$main::opt_lib, $start, $finish, $start]); + } + } + + # Append special entry for the main program. This covers + # 0..max_pc_value_seen, so that we assume pc values not found in one + # of the library ranges will be treated as coming from the main + # program binary. + my $min_pc = HexExtend("0"); + my $max_pc = $min_pc; # find the maximal PC value in any sample + foreach my $pc (keys(%{$pcs})) { + if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); } + } + push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]); + + return $result; +} + +# Add two hex addresses of length $address_length. +# Run jeprof --test for unit test if this is changed. +sub AddressAdd { + my $addr1 = shift; + my $addr2 = shift; + my $sum; + + if ($address_length == 8) { + # Perl doesn't cope with wraparound arithmetic, so do it explicitly: + $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16); + return sprintf("%08x", $sum); + + } else { + # Do the addition in 7-nibble chunks to trivialize carry handling. + + if ($main::opt_debug and $main::opt_test) { + print STDERR "AddressAdd $addr1 + $addr2 = "; + } + + my $a1 = substr($addr1,-7); + $addr1 = substr($addr1,0,-7); + my $a2 = substr($addr2,-7); + $addr2 = substr($addr2,0,-7); + $sum = hex($a1) + hex($a2); + my $c = 0; + if ($sum > 0xfffffff) { + $c = 1; + $sum -= 0x10000000; + } + my $r = sprintf("%07x", $sum); + + $a1 = substr($addr1,-7); + $addr1 = substr($addr1,0,-7); + $a2 = substr($addr2,-7); + $addr2 = substr($addr2,0,-7); + $sum = hex($a1) + hex($a2) + $c; + $c = 0; + if ($sum > 0xfffffff) { + $c = 1; + $sum -= 0x10000000; + } + $r = sprintf("%07x", $sum) . $r; + + $sum = hex($addr1) + hex($addr2) + $c; + if ($sum > 0xff) { $sum -= 0x100; } + $r = sprintf("%02x", $sum) . $r; + + if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; } + + return $r; + } +} + + +# Subtract two hex addresses of length $address_length. +# Run jeprof --test for unit test if this is changed. +sub AddressSub { + my $addr1 = shift; + my $addr2 = shift; + my $diff; + + if ($address_length == 8) { + # Perl doesn't cope with wraparound arithmetic, so do it explicitly: + $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16); + return sprintf("%08x", $diff); + + } else { + # Do the addition in 7-nibble chunks to trivialize borrow handling. + # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; } + + my $a1 = hex(substr($addr1,-7)); + $addr1 = substr($addr1,0,-7); + my $a2 = hex(substr($addr2,-7)); + $addr2 = substr($addr2,0,-7); + my $b = 0; + if ($a2 > $a1) { + $b = 1; + $a1 += 0x10000000; + } + $diff = $a1 - $a2; + my $r = sprintf("%07x", $diff); + + $a1 = hex(substr($addr1,-7)); + $addr1 = substr($addr1,0,-7); + $a2 = hex(substr($addr2,-7)) + $b; + $addr2 = substr($addr2,0,-7); + $b = 0; + if ($a2 > $a1) { + $b = 1; + $a1 += 0x10000000; + } + $diff = $a1 - $a2; + $r = sprintf("%07x", $diff) . $r; + + $a1 = hex($addr1); + $a2 = hex($addr2) + $b; + if ($a2 > $a1) { $a1 += 0x100; } + $diff = $a1 - $a2; + $r = sprintf("%02x", $diff) . $r; + + # if ($main::opt_debug) { print STDERR "$r\n"; } + + return $r; + } +} + +# Increment a hex addresses of length $address_length. +# Run jeprof --test for unit test if this is changed. +sub AddressInc { + my $addr = shift; + my $sum; + + if ($address_length == 8) { + # Perl doesn't cope with wraparound arithmetic, so do it explicitly: + $sum = (hex($addr)+1) % (0x10000000 * 16); + return sprintf("%08x", $sum); + + } else { + # Do the addition in 7-nibble chunks to trivialize carry handling. + # We are always doing this to step through the addresses in a function, + # and will almost never overflow the first chunk, so we check for this + # case and exit early. + + # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; } + + my $a1 = substr($addr,-7); + $addr = substr($addr,0,-7); + $sum = hex($a1) + 1; + my $r = sprintf("%07x", $sum); + if ($sum <= 0xfffffff) { + $r = $addr . $r; + # if ($main::opt_debug) { print STDERR "$r\n"; } + return HexExtend($r); + } else { + $r = "0000000"; + } + + $a1 = substr($addr,-7); + $addr = substr($addr,0,-7); + $sum = hex($a1) + 1; + $r = sprintf("%07x", $sum) . $r; + if ($sum <= 0xfffffff) { + $r = $addr . $r; + # if ($main::opt_debug) { print STDERR "$r\n"; } + return HexExtend($r); + } else { + $r = "00000000000000"; + } + + $sum = hex($addr) + 1; + if ($sum > 0xff) { $sum -= 0x100; } + $r = sprintf("%02x", $sum) . $r; + + # if ($main::opt_debug) { print STDERR "$r\n"; } + return $r; + } +} + +# Extract symbols for all PC values found in profile +sub ExtractSymbols { + my $libs = shift; + my $pcset = shift; + + my $symbols = {}; + + # Map each PC value to the containing library. To make this faster, + # we sort libraries by their starting pc value (highest first), and + # advance through the libraries as we advance the pc. Sometimes the + # addresses of libraries may overlap with the addresses of the main + # binary, so to make sure the libraries 'win', we iterate over the + # libraries in reverse order (which assumes the binary doesn't start + # in the middle of a library, which seems a fair assumption). + my @pcs = (sort { $a cmp $b } keys(%{$pcset})); # pcset is 0-extended strings + foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) { + my $libname = $lib->[0]; + my $start = $lib->[1]; + my $finish = $lib->[2]; + my $offset = $lib->[3]; + + # Use debug library if it exists + my $debug_libname = DebuggingLibrary($libname); + if ($debug_libname) { + $libname = $debug_libname; + } + + # Get list of pcs that belong in this library. + my $contained = []; + my ($start_pc_index, $finish_pc_index); + # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index]. + for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0; + $finish_pc_index--) { + last if $pcs[$finish_pc_index - 1] le $finish; + } + # Find smallest start_pc_index such that $start <= $pc[$start_pc_index]. + for ($start_pc_index = $finish_pc_index; $start_pc_index > 0; + $start_pc_index--) { + last if $pcs[$start_pc_index - 1] lt $start; + } + # This keeps PC values higher than $pc[$finish_pc_index] in @pcs, + # in case there are overlaps in libraries and the main binary. + @{$contained} = splice(@pcs, $start_pc_index, + $finish_pc_index - $start_pc_index); + # Map to symbols + MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols); + } + + return $symbols; +} + +# Map list of PC values to symbols for a given image +sub MapToSymbols { + my $image = shift; + my $offset = shift; + my $pclist = shift; + my $symbols = shift; + + my $debug = 0; + + # Ignore empty binaries + if ($#{$pclist} < 0) { return; } + + # Figure out the addr2line command to use + my $addr2line = $obj_tool_map{"addr2line"}; + my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image); + if (exists $obj_tool_map{"addr2line_pdb"}) { + $addr2line = $obj_tool_map{"addr2line_pdb"}; + $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image); + } + + # If "addr2line" isn't installed on the system at all, just use + # nm to get what info we can (function names, but not line numbers). + if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) { + MapSymbolsWithNM($image, $offset, $pclist, $symbols); + return; + } + + # "addr2line -i" can produce a variable number of lines per input + # address, with no separator that allows us to tell when data for + # the next address starts. So we find the address for a special + # symbol (_fini) and interleave this address between all real + # addresses passed to addr2line. The name of this special symbol + # can then be used as a separator. + $sep_address = undef; # May be filled in by MapSymbolsWithNM() + my $nm_symbols = {}; + MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols); + if (defined($sep_address)) { + # Only add " -i" to addr2line if the binary supports it. + # addr2line --help returns 0, but not if it sees an unknown flag first. + if (system("$cmd -i --help >$dev_null 2>&1") == 0) { + $cmd .= " -i"; + } else { + $sep_address = undef; # no need for sep_address if we don't support -i + } + } + + # Make file with all PC values with intervening 'sep_address' so + # that we can reliably detect the end of inlined function list + open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n"); + if ($debug) { print("---- $image ---\n"); } + for (my $i = 0; $i <= $#{$pclist}; $i++) { + # addr2line always reads hex addresses, and does not need '0x' prefix. + if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); } + printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset)); + if (defined($sep_address)) { + printf ADDRESSES ("%s\n", $sep_address); + } + } + close(ADDRESSES); + if ($debug) { + print("----\n"); + system("cat", $main::tmpfile_sym); + print("----\n"); + system("$cmd < " . ShellEscape($main::tmpfile_sym)); + print("----\n"); + } + + open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |") + || error("$cmd: $!\n"); + my $count = 0; # Index in pclist + while () { + # Read fullfunction and filelineinfo from next pair of lines + s/\r?\n$//g; + my $fullfunction = $_; + $_ = ; + s/\r?\n$//g; + my $filelinenum = $_; + + if (defined($sep_address) && $fullfunction eq $sep_symbol) { + # Terminating marker for data for this address + $count++; + next; + } + + $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths + + my $pcstr = $pclist->[$count]; + my $function = ShortFunctionName($fullfunction); + my $nms = $nm_symbols->{$pcstr}; + if (defined($nms)) { + if ($fullfunction eq '??') { + # nm found a symbol for us. + $function = $nms->[0]; + $fullfunction = $nms->[2]; + } else { + # MapSymbolsWithNM tags each routine with its starting address, + # useful in case the image has multiple occurrences of this + # routine. (It uses a syntax that resembles template paramters, + # that are automatically stripped out by ShortFunctionName().) + # addr2line does not provide the same information. So we check + # if nm disambiguated our symbol, and if so take the annotated + # (nm) version of the routine-name. TODO(csilvers): this won't + # catch overloaded, inlined symbols, which nm doesn't see. + # Better would be to do a check similar to nm's, in this fn. + if ($nms->[2] =~ m/^\Q$function\E/) { # sanity check it's the right fn + $function = $nms->[0]; + $fullfunction = $nms->[2]; + } + } + } + + # Prepend to accumulated symbols for pcstr + # (so that caller comes before callee) + my $sym = $symbols->{$pcstr}; + if (!defined($sym)) { + $sym = []; + $symbols->{$pcstr} = $sym; + } + unshift(@{$sym}, $function, $filelinenum, $fullfunction); + if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); } + if (!defined($sep_address)) { + # Inlining is off, so this entry ends immediately + $count++; + } + } + close(SYMBOLS); +} + +# Use nm to map the list of referenced PCs to symbols. Return true iff we +# are able to read procedure information via nm. +sub MapSymbolsWithNM { + my $image = shift; + my $offset = shift; + my $pclist = shift; + my $symbols = shift; + + # Get nm output sorted by increasing address + my $symbol_table = GetProcedureBoundaries($image, "."); + if (!%{$symbol_table}) { + return 0; + } + # Start addresses are already the right length (8 or 16 hex digits). + my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] } + keys(%{$symbol_table}); + + if ($#names < 0) { + # No symbols: just use addresses + foreach my $pc (@{$pclist}) { + my $pcstr = "0x" . $pc; + $symbols->{$pc} = [$pcstr, "?", $pcstr]; + } + return 0; + } + + # Sort addresses so we can do a join against nm output + my $index = 0; + my $fullname = $names[0]; + my $name = ShortFunctionName($fullname); + foreach my $pc (sort { $a cmp $b } @{$pclist}) { + # Adjust for mapped offset + my $mpc = AddressSub($pc, $offset); + while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){ + $index++; + $fullname = $names[$index]; + $name = ShortFunctionName($fullname); + } + if ($mpc lt $symbol_table->{$fullname}->[1]) { + $symbols->{$pc} = [$name, "?", $fullname]; + } else { + my $pcstr = "0x" . $pc; + $symbols->{$pc} = [$pcstr, "?", $pcstr]; + } + } + return 1; +} + +sub ShortFunctionName { + my $function = shift; + while ($function =~ s/\([^()]*\)(\s*const)?//g) { } # Argument types + while ($function =~ s/<[^<>]*>//g) { } # Remove template arguments + $function =~ s/^.*\s+(\w+::)/$1/; # Remove leading type + return $function; +} + +# Trim overly long symbols found in disassembler output +sub CleanDisassembly { + my $d = shift; + while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax) + while ($d =~ s/(\w+)<[^<>]*>/$1/g) { } # Remove template arguments + return $d; +} + +# Clean file name for display +sub CleanFileName { + my ($f) = @_; + $f =~ s|^/proc/self/cwd/||; + $f =~ s|^\./||; + return $f; +} + +# Make address relative to section and clean up for display +sub UnparseAddress { + my ($offset, $address) = @_; + $address = AddressSub($address, $offset); + $address =~ s/^0x//; + $address =~ s/^0*//; + return $address; +} + +##### Miscellaneous ##### + +# Find the right versions of the above object tools to use. The +# argument is the program file being analyzed, and should be an ELF +# 32-bit or ELF 64-bit executable file. The location of the tools +# is determined by considering the following options in this order: +# 1) --tools option, if set +# 2) JEPROF_TOOLS environment variable, if set +# 3) the environment +sub ConfigureObjTools { + my $prog_file = shift; + + # Check for the existence of $prog_file because /usr/bin/file does not + # predictably return error status in prod. + (-e $prog_file) || error("$prog_file does not exist.\n"); + + my $file_type = undef; + if (-e "/usr/bin/file") { + # Follow symlinks (at least for systems where "file" supports that). + my $escaped_prog_file = ShellEscape($prog_file); + $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null || + /usr/bin/file $escaped_prog_file`; + } elsif ($^O == "MSWin32") { + $file_type = "MS Windows"; + } else { + print STDERR "WARNING: Can't determine the file type of $prog_file"; + } + + if ($file_type =~ /64-bit/) { + # Change $address_length to 16 if the program file is ELF 64-bit. + # We can't detect this from many (most?) heap or lock contention + # profiles, since the actual addresses referenced are generally in low + # memory even for 64-bit programs. + $address_length = 16; + } + + if ($file_type =~ /MS Windows/) { + # For windows, we provide a version of nm and addr2line as part of + # the opensource release, which is capable of parsing + # Windows-style PDB executables. It should live in the path, or + # in the same directory as jeprof. + $obj_tool_map{"nm_pdb"} = "nm-pdb"; + $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb"; + } + + if ($file_type =~ /Mach-O/) { + # OS X uses otool to examine Mach-O files, rather than objdump. + $obj_tool_map{"otool"} = "otool"; + $obj_tool_map{"addr2line"} = "false"; # no addr2line + $obj_tool_map{"objdump"} = "false"; # no objdump + } + + # Go fill in %obj_tool_map with the pathnames to use: + foreach my $tool (keys %obj_tool_map) { + $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool}); + } +} + +# Returns the path of a caller-specified object tool. If --tools or +# JEPROF_TOOLS are specified, then returns the full path to the tool +# with that prefix. Otherwise, returns the path unmodified (which +# means we will look for it on PATH). +sub ConfigureTool { + my $tool = shift; + my $path; + + # --tools (or $JEPROF_TOOLS) is a comma separated list, where each + # item is either a) a pathname prefix, or b) a map of the form + # :. First we look for an entry of type (b) for our + # tool. If one is found, we use it. Otherwise, we consider all the + # pathname prefixes in turn, until one yields an existing file. If + # none does, we use a default path. + my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || ""; + if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) { + $path = $2; + # TODO(csilvers): sanity-check that $path exists? Hard if it's relative. + } elsif ($tools ne '') { + foreach my $prefix (split(',', $tools)) { + next if ($prefix =~ /:/); # ignore "tool:fullpath" entries in the list + if (-x $prefix . $tool) { + $path = $prefix . $tool; + last; + } + } + if (!$path) { + error("No '$tool' found with prefix specified by " . + "--tools (or \$JEPROF_TOOLS) '$tools'\n"); + } + } else { + # ... otherwise use the version that exists in the same directory as + # jeprof. If there's nothing there, use $PATH. + $0 =~ m,[^/]*$,; # this is everything after the last slash + my $dirname = $`; # this is everything up to and including the last slash + if (-x "$dirname$tool") { + $path = "$dirname$tool"; + } else { + $path = $tool; + } + } + if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; } + return $path; +} + +sub ShellEscape { + my @escaped_words = (); + foreach my $word (@_) { + my $escaped_word = $word; + if ($word =~ m![^a-zA-Z0-9/.,_=-]!) { # check for anything not in whitelist + $escaped_word =~ s/'/'\\''/; + $escaped_word = "'$escaped_word'"; + } + push(@escaped_words, $escaped_word); + } + return join(" ", @escaped_words); +} + +sub cleanup { + unlink($main::tmpfile_sym); + unlink(keys %main::tempnames); + + # We leave any collected profiles in $HOME/jeprof in case the user wants + # to look at them later. We print a message informing them of this. + if ((scalar(@main::profile_files) > 0) && + defined($main::collected_profile)) { + if (scalar(@main::profile_files) == 1) { + print STDERR "Dynamically gathered profile is in $main::collected_profile\n"; + } + print STDERR "If you want to investigate this profile further, you can do:\n"; + print STDERR "\n"; + print STDERR " jeprof \\\n"; + print STDERR " $main::prog \\\n"; + print STDERR " $main::collected_profile\n"; + print STDERR "\n"; + } +} + +sub sighandler { + cleanup(); + exit(1); +} + +sub error { + my $msg = shift; + print STDERR $msg; + cleanup(); + exit(1); +} + + +# Run $nm_command and get all the resulting procedure boundaries whose +# names match "$regexp" and returns them in a hashtable mapping from +# procedure name to a two-element vector of [start address, end address] +sub GetProcedureBoundariesViaNm { + my $escaped_nm_command = shift; # shell-escaped + my $regexp = shift; + + my $symbol_table = {}; + open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n"); + my $last_start = "0"; + my $routine = ""; + while () { + s/\r//g; # turn windows-looking lines into unix-looking lines + if (m/^\s*([0-9a-f]+) (.) (..*)/) { + my $start_val = $1; + my $type = $2; + my $this_routine = $3; + + # It's possible for two symbols to share the same address, if + # one is a zero-length variable (like __start_google_malloc) or + # one symbol is a weak alias to another (like __libc_malloc). + # In such cases, we want to ignore all values except for the + # actual symbol, which in nm-speak has type "T". The logic + # below does this, though it's a bit tricky: what happens when + # we have a series of lines with the same address, is the first + # one gets queued up to be processed. However, it won't + # *actually* be processed until later, when we read a line with + # a different address. That means that as long as we're reading + # lines with the same address, we have a chance to replace that + # item in the queue, which we do whenever we see a 'T' entry -- + # that is, a line with type 'T'. If we never see a 'T' entry, + # we'll just go ahead and process the first entry (which never + # got touched in the queue), and ignore the others. + if ($start_val eq $last_start && $type =~ /t/i) { + # We are the 'T' symbol at this address, replace previous symbol. + $routine = $this_routine; + next; + } elsif ($start_val eq $last_start) { + # We're not the 'T' symbol at this address, so ignore us. + next; + } + + if ($this_routine eq $sep_symbol) { + $sep_address = HexExtend($start_val); + } + + # Tag this routine with the starting address in case the image + # has multiple occurrences of this routine. We use a syntax + # that resembles template parameters that are automatically + # stripped out by ShortFunctionName() + $this_routine .= "<$start_val>"; + + if (defined($routine) && $routine =~ m/$regexp/) { + $symbol_table->{$routine} = [HexExtend($last_start), + HexExtend($start_val)]; + } + $last_start = $start_val; + $routine = $this_routine; + } elsif (m/^Loaded image name: (.+)/) { + # The win32 nm workalike emits information about the binary it is using. + if ($main::opt_debug) { print STDERR "Using Image $1\n"; } + } elsif (m/^PDB file name: (.+)/) { + # The win32 nm workalike emits information about the pdb it is using. + if ($main::opt_debug) { print STDERR "Using PDB $1\n"; } + } + } + close(NM); + # Handle the last line in the nm output. Unfortunately, we don't know + # how big this last symbol is, because we don't know how big the file + # is. For now, we just give it a size of 0. + # TODO(csilvers): do better here. + if (defined($routine) && $routine =~ m/$regexp/) { + $symbol_table->{$routine} = [HexExtend($last_start), + HexExtend($last_start)]; + } + return $symbol_table; +} + +# Gets the procedure boundaries for all routines in "$image" whose names +# match "$regexp" and returns them in a hashtable mapping from procedure +# name to a two-element vector of [start address, end address]. +# Will return an empty map if nm is not installed or not working properly. +sub GetProcedureBoundaries { + my $image = shift; + my $regexp = shift; + + # If $image doesn't start with /, then put ./ in front of it. This works + # around an obnoxious bug in our probing of nm -f behavior. + # "nm -f $image" is supposed to fail on GNU nm, but if: + # + # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND + # b. you have a.out in your current directory (a not uncommon occurence) + # + # then "nm -f $image" succeeds because -f only looks at the first letter of + # the argument, which looks valid because it's [BbSsPp], and then since + # there's no image provided, it looks for a.out and finds it. + # + # This regex makes sure that $image starts with . or /, forcing the -f + # parsing to fail since . and / are not valid formats. + $image =~ s#^[^/]#./$&#; + + # For libc libraries, the copy in /usr/lib/debug contains debugging symbols + my $debugging = DebuggingLibrary($image); + if ($debugging) { + $image = $debugging; + } + + my $nm = $obj_tool_map{"nm"}; + my $cppfilt = $obj_tool_map{"c++filt"}; + + # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm + # binary doesn't support --demangle. In addition, for OS X we need + # to use the -f flag to get 'flat' nm output (otherwise we don't sort + # properly and get incorrect results). Unfortunately, GNU nm uses -f + # in an incompatible way. So first we test whether our nm supports + # --demangle and -f. + my $demangle_flag = ""; + my $cppfilt_flag = ""; + my $to_devnull = ">$dev_null 2>&1"; + if (system(ShellEscape($nm, "--demangle", "image") . $to_devnull) == 0) { + # In this mode, we do "nm --demangle " + $demangle_flag = "--demangle"; + $cppfilt_flag = ""; + } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) { + # In this mode, we do "nm | c++filt" + $cppfilt_flag = " | " . ShellEscape($cppfilt); + }; + my $flatten_flag = ""; + if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) { + $flatten_flag = "-f"; + } + + # Finally, in the case $imagie isn't a debug library, we try again with + # -D to at least get *exported* symbols. If we can't use --demangle, + # we use c++filt instead, if it exists on this system. + my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag, + $image) . " 2>$dev_null $cppfilt_flag", + ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag, + $image) . " 2>$dev_null $cppfilt_flag", + # 6nm is for Go binaries + ShellEscape("6nm", "$image") . " 2>$dev_null | sort", + ); + + # If the executable is an MS Windows PDB-format executable, we'll + # have set up obj_tool_map("nm_pdb"). In this case, we actually + # want to use both unix nm and windows-specific nm_pdb, since + # PDB-format executables can apparently include dwarf .o files. + if (exists $obj_tool_map{"nm_pdb"}) { + push(@nm_commands, + ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image) + . " 2>$dev_null"); + } + + foreach my $nm_command (@nm_commands) { + my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp); + return $symbol_table if (%{$symbol_table}); + } + my $symbol_table = {}; + return $symbol_table; +} + + +# The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings. +# To make them more readable, we add underscores at interesting places. +# This routine removes the underscores, producing the canonical representation +# used by jeprof to represent addresses, particularly in the tested routines. +sub CanonicalHex { + my $arg = shift; + return join '', (split '_',$arg); +} + + +# Unit test for AddressAdd: +sub AddressAddUnitTest { + my $test_data_8 = shift; + my $test_data_16 = shift; + my $error_count = 0; + my $fail_count = 0; + my $pass_count = 0; + # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n"; + + # First a few 8-nibble addresses. Note that this implementation uses + # plain old arithmetic, so a quick sanity check along with verifying what + # happens to overflow (we want it to wrap): + $address_length = 8; + foreach my $row (@{$test_data_8}) { + if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } + my $sum = AddressAdd ($row->[0], $row->[1]); + if ($sum ne $row->[2]) { + printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, + $row->[0], $row->[1], $row->[2]; + ++$fail_count; + } else { + ++$pass_count; + } + } + printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n", + $pass_count, $fail_count; + $error_count = $fail_count; + $fail_count = 0; + $pass_count = 0; + + # Now 16-nibble addresses. + $address_length = 16; + foreach my $row (@{$test_data_16}) { + if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } + my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1])); + my $expected = join '', (split '_',$row->[2]); + if ($sum ne CanonicalHex($row->[2])) { + printf STDERR "ERROR: %s != %s + %s = %s\n", $sum, + $row->[0], $row->[1], $row->[2]; + ++$fail_count; + } else { + ++$pass_count; + } + } + printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n", + $pass_count, $fail_count; + $error_count += $fail_count; + + return $error_count; +} + + +# Unit test for AddressSub: +sub AddressSubUnitTest { + my $test_data_8 = shift; + my $test_data_16 = shift; + my $error_count = 0; + my $fail_count = 0; + my $pass_count = 0; + # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n"; + + # First a few 8-nibble addresses. Note that this implementation uses + # plain old arithmetic, so a quick sanity check along with verifying what + # happens to overflow (we want it to wrap): + $address_length = 8; + foreach my $row (@{$test_data_8}) { + if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } + my $sum = AddressSub ($row->[0], $row->[1]); + if ($sum ne $row->[3]) { + printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, + $row->[0], $row->[1], $row->[3]; + ++$fail_count; + } else { + ++$pass_count; + } + } + printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n", + $pass_count, $fail_count; + $error_count = $fail_count; + $fail_count = 0; + $pass_count = 0; + + # Now 16-nibble addresses. + $address_length = 16; + foreach my $row (@{$test_data_16}) { + if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } + my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1])); + if ($sum ne CanonicalHex($row->[3])) { + printf STDERR "ERROR: %s != %s - %s = %s\n", $sum, + $row->[0], $row->[1], $row->[3]; + ++$fail_count; + } else { + ++$pass_count; + } + } + printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n", + $pass_count, $fail_count; + $error_count += $fail_count; + + return $error_count; +} + + +# Unit test for AddressInc: +sub AddressIncUnitTest { + my $test_data_8 = shift; + my $test_data_16 = shift; + my $error_count = 0; + my $fail_count = 0; + my $pass_count = 0; + # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n"; + + # First a few 8-nibble addresses. Note that this implementation uses + # plain old arithmetic, so a quick sanity check along with verifying what + # happens to overflow (we want it to wrap): + $address_length = 8; + foreach my $row (@{$test_data_8}) { + if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } + my $sum = AddressInc ($row->[0]); + if ($sum ne $row->[4]) { + printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, + $row->[0], $row->[4]; + ++$fail_count; + } else { + ++$pass_count; + } + } + printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n", + $pass_count, $fail_count; + $error_count = $fail_count; + $fail_count = 0; + $pass_count = 0; + + # Now 16-nibble addresses. + $address_length = 16; + foreach my $row (@{$test_data_16}) { + if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; } + my $sum = AddressInc (CanonicalHex($row->[0])); + if ($sum ne CanonicalHex($row->[4])) { + printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum, + $row->[0], $row->[4]; + ++$fail_count; + } else { + ++$pass_count; + } + } + printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n", + $pass_count, $fail_count; + $error_count += $fail_count; + + return $error_count; +} + + +# Driver for unit tests. +# Currently just the address add/subtract/increment routines for 64-bit. +sub RunUnitTests { + my $error_count = 0; + + # This is a list of tuples [a, b, a+b, a-b, a+1] + my $unit_test_data_8 = [ + [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)], + [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)], + [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)], + [qw(00000001 ffffffff 00000000 00000002 00000002)], + [qw(00000001 fffffff0 fffffff1 00000011 00000002)], + ]; + my $unit_test_data_16 = [ + # The implementation handles data in 7-nibble chunks, so those are the + # interesting boundaries. + [qw(aaaaaaaa 50505050 + 00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)], + [qw(50505050 aaaaaaaa + 00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)], + [qw(ffffffff aaaaaaaa + 00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)], + [qw(00000001 ffffffff + 00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)], + [qw(00000001 fffffff0 + 00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)], + + [qw(00_a00000a_aaaaaaa 50505050 + 00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)], + [qw(0f_fff0005_0505050 aaaaaaaa + 0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)], + [qw(00_000000f_fffffff 01_800000a_aaaaaaa + 01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)], + [qw(00_0000000_0000001 ff_fffffff_fffffff + 00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)], + [qw(00_0000000_0000001 ff_fffffff_ffffff0 + ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)], + ]; + + $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16); + $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16); + $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16); + if ($error_count > 0) { + print STDERR $error_count, " errors: FAILED\n"; + } else { + print STDERR "PASS\n"; + } + exit ($error_count); +} diff --git a/redis.submodule/jemalloc/config.guess b/redis.submodule/jemalloc/config.guess new file mode 100755 index 0000000..1f5c50c --- /dev/null +++ b/redis.submodule/jemalloc/config.guess @@ -0,0 +1,1420 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2014 Free Software Foundation, Inc. + +timestamp='2014-03-23' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner. +# +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD +# +# Please send patches with a ChangeLog entry to config-patches@gnu.org. + + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2014 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +case "${UNAME_SYSTEM}" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + eval $set_cc_for_build + cat <<-EOF > $dummy.c + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently, or will in the future. + case "${UNAME_MACHINE_ARCH}" in + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE="alpha" ;; + "EV4.5 (21064)") + UNAME_MACHINE="alpha" ;; + "LCA4 (21066/21068)") + UNAME_MACHINE="alpha" ;; + "EV5 (21164)") + UNAME_MACHINE="alphaev5" ;; + "EV5.6 (21164A)") + UNAME_MACHINE="alphaev56" ;; + "EV5.6 (21164PC)") + UNAME_MACHINE="alphapca56" ;; + "EV5.7 (21164PC)") + UNAME_MACHINE="alphapca57" ;; + "EV6 (21264)") + UNAME_MACHINE="alphaev6" ;; + "EV6.7 (21264A)") + UNAME_MACHINE="alphaev67" ;; + "EV6.8CB (21264C)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8AL (21264B)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8CX (21264D)") + UNAME_MACHINE="alphaev68" ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE="alphaev69" ;; + "EV7 (21364)") + UNAME_MACHINE="alphaev7" ;; + "EV7.9 (21364A)") + UNAME_MACHINE="alphaev79" ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux${UNAME_RELEASE} + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval $set_cc_for_build + SUN_ARCH="i386" + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH="x86_64" + fi + fi + echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 + 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH="hppa2.0n" ;; + 64) HP_ARCH="hppa2.0w" ;; + '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = "hppa2.0w" ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH="hppa2.0w" + else + HP_ARCH="hppa64" + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case ${UNAME_PROCESSOR} in + amd64) + echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + *) + echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + esac + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW64*:*) + echo ${UNAME_MACHINE}-pc-mingw64 + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + *:MSYS*:*) + echo ${UNAME_MACHINE}-pc-msys + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + *:Interix*:*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + IA64) + echo ia64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + 8664:Windows_NT:*) + echo x86_64-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + aarch64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC="gnulibc1" ; fi + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi + else + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + crisv32:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + frv:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + hexagon:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-${LIBC} + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-${LIBC} + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-${LIBC} + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; + PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; + *) echo hppa-unknown-linux-${LIBC} ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-${LIBC} + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-${LIBC} + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-${LIBC} + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-${LIBC} + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux-${LIBC} + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + tile*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-${LIBC} + exit ;; + x86_64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + xtensa*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configury will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + eval $set_cc_for_build + if test "$UNAME_PROCESSOR" = unknown ; then + UNAME_PROCESSOR=powerpc + fi + if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # Avoid executing cc on OS X 10.9, as it ships with a stub + # that puts up a graphical alert prompting to install + # developer tools. Any system running Mac OS X 10.7 or + # later (Darwin 11 and later) is required to have a 64-bit + # processor. This is not true of the ARM version of Darwin + # that Apple uses in portable devices. + UNAME_PROCESSOR=x86_64 + fi + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = "x86"; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-?:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk${UNAME_RELEASE} + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = "386"; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; + i*86:AROS:*:*) + echo ${UNAME_MACHINE}-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo ${UNAME_MACHINE}-unknown-esx + exit ;; +esac + +cat >&2 < in order to provide the needed +information to handle your system. + +config.guess timestamp = $timestamp + +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/redis.submodule/jemalloc/config.stamp.in b/redis.submodule/jemalloc/config.stamp.in new file mode 100644 index 0000000..e69de29 diff --git a/redis.submodule/jemalloc/config.sub b/redis.submodule/jemalloc/config.sub new file mode 100755 index 0000000..0ccff77 --- /dev/null +++ b/redis.submodule/jemalloc/config.sub @@ -0,0 +1,1797 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright 1992-2014 Free Software Foundation, Inc. + +timestamp='2014-05-01' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches with a ChangeLog entry to config-patches@gnu.org. +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS + $0 [OPTION] ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2014 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | \ + kopensolaris*-gnu* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + android-linux) + os=-linux-android + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze*) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*178) + os=-lynxos178 + ;; + -lynx*5) + os=-lynxos5 + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | aarch64 | aarch64_be \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arceb \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | avr | avr32 \ + | be32 | be64 \ + | bfin \ + | c4x | c8051 | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | epiphany \ + | fido | fr30 | frv \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | k1om \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa32r6 | mipsisa32r6el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64r6 | mipsisa64r6el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 | nios2eb | nios2el \ + | ns16k | ns32k \ + | open8 | or1k | or1knd | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pyramid \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | we32k \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + xgate) + basic_machine=$basic_machine-unknown + os=-none + ;; + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | aarch64-* | aarch64_be-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | c8051-* | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | k1om-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ + | microblaze-* | microblazeel-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa32r6-* | mipsisa32r6el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64r6-* | mipsisa64r6el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* | nios2eb-* | nios2el-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | or1k*-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pyramid-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze*) + basic_machine=microblaze-xilinx + ;; + mingw64) + basic_machine=x86_64-pc + os=-mingw64 + ;; + mingw32) + basic_machine=i686-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i686-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle | ppc-le | powerpc-little) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little | ppc64-le | powerpc64-little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + os=-rdos + ;; + rdos32) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + z80-*-coff) + basic_machine=z80-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* | -plan9* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -bitrig* | -openbsd* | -solidbsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-musl* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + # Apple iOS + -ios*) + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -nacl*) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + c8051-*) + os=-elf + ;; + hexagon-*) + os=-elf + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/redis.submodule/jemalloc/configure.ac b/redis.submodule/jemalloc/configure.ac new file mode 100644 index 0000000..7a1290e --- /dev/null +++ b/redis.submodule/jemalloc/configure.ac @@ -0,0 +1,1745 @@ +dnl Process this file with autoconf to produce a configure script. +AC_INIT([Makefile.in]) + +dnl ============================================================================ +dnl Custom macro definitions. + +dnl JE_CFLAGS_APPEND(cflag) +AC_DEFUN([JE_CFLAGS_APPEND], +[ +AC_MSG_CHECKING([whether compiler supports $1]) +TCFLAGS="${CFLAGS}" +if test "x${CFLAGS}" = "x" ; then + CFLAGS="$1" +else + CFLAGS="${CFLAGS} $1" +fi +AC_COMPILE_IFELSE([AC_LANG_PROGRAM( +[[ +]], [[ + return 0; +]])], + [je_cv_cflags_appended=$1] + AC_MSG_RESULT([yes]), + [je_cv_cflags_appended=] + AC_MSG_RESULT([no]) + [CFLAGS="${TCFLAGS}"] +) +]) + +dnl JE_COMPILABLE(label, hcode, mcode, rvar) +dnl +dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors +dnl cause failure. +AC_DEFUN([JE_COMPILABLE], +[ +AC_CACHE_CHECK([whether $1 is compilable], + [$4], + [AC_LINK_IFELSE([AC_LANG_PROGRAM([$2], + [$3])], + [$4=yes], + [$4=no])]) +]) + +dnl ============================================================================ + +CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'` +AC_SUBST([CONFIG]) + +dnl Library revision. +rev=2 +AC_SUBST([rev]) + +srcroot=$srcdir +if test "x${srcroot}" = "x." ; then + srcroot="" +else + srcroot="${srcroot}/" +fi +AC_SUBST([srcroot]) +abs_srcroot="`cd \"${srcdir}\"; pwd`/" +AC_SUBST([abs_srcroot]) + +objroot="" +AC_SUBST([objroot]) +abs_objroot="`pwd`/" +AC_SUBST([abs_objroot]) + +dnl Munge install path variables. +if test "x$prefix" = "xNONE" ; then + prefix="/usr/local" +fi +if test "x$exec_prefix" = "xNONE" ; then + exec_prefix=$prefix +fi +PREFIX=$prefix +AC_SUBST([PREFIX]) +BINDIR=`eval echo $bindir` +BINDIR=`eval echo $BINDIR` +AC_SUBST([BINDIR]) +INCLUDEDIR=`eval echo $includedir` +INCLUDEDIR=`eval echo $INCLUDEDIR` +AC_SUBST([INCLUDEDIR]) +LIBDIR=`eval echo $libdir` +LIBDIR=`eval echo $LIBDIR` +AC_SUBST([LIBDIR]) +DATADIR=`eval echo $datadir` +DATADIR=`eval echo $DATADIR` +AC_SUBST([DATADIR]) +MANDIR=`eval echo $mandir` +MANDIR=`eval echo $MANDIR` +AC_SUBST([MANDIR]) + +dnl Support for building documentation. +AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH]) +if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then + DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl" +elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then + DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets" +else + dnl Documentation building will fail if this default gets used. + DEFAULT_XSLROOT="" +fi +AC_ARG_WITH([xslroot], + [AS_HELP_STRING([--with-xslroot=], [XSL stylesheet root path])], [ +if test "x$with_xslroot" = "xno" ; then + XSLROOT="${DEFAULT_XSLROOT}" +else + XSLROOT="${with_xslroot}" +fi +], + XSLROOT="${DEFAULT_XSLROOT}" +) +AC_SUBST([XSLROOT]) + +dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise, +dnl just prevent autoconf from molesting CFLAGS. +CFLAGS=$CFLAGS +AC_PROG_CC +if test "x$GCC" != "xyes" ; then + AC_CACHE_CHECK([whether compiler is MSVC], + [je_cv_msvc], + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], + [ +#ifndef _MSC_VER + int fail[-1]; +#endif +])], + [je_cv_msvc=yes], + [je_cv_msvc=no])]) +fi + +if test "x$CFLAGS" = "x" ; then + no_CFLAGS="yes" + if test "x$GCC" = "xyes" ; then + JE_CFLAGS_APPEND([-std=gnu99]) + if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then + AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) + fi + JE_CFLAGS_APPEND([-Wall]) + JE_CFLAGS_APPEND([-Werror=declaration-after-statement]) + JE_CFLAGS_APPEND([-pipe]) + JE_CFLAGS_APPEND([-g3]) + elif test "x$je_cv_msvc" = "xyes" ; then + CC="$CC -nologo" + JE_CFLAGS_APPEND([-Zi]) + JE_CFLAGS_APPEND([-MT]) + JE_CFLAGS_APPEND([-W3]) + JE_CFLAGS_APPEND([-FS]) + CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat" + fi +fi +dnl Append EXTRA_CFLAGS to CFLAGS, if defined. +if test "x$EXTRA_CFLAGS" != "x" ; then + JE_CFLAGS_APPEND([$EXTRA_CFLAGS]) +fi +AC_PROG_CPP + +AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0]) +if test "x${ac_cv_big_endian}" = "x1" ; then + AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ]) +fi + +if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then + CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99" +fi + +AC_CHECK_SIZEOF([void *]) +if test "x${ac_cv_sizeof_void_p}" = "x8" ; then + LG_SIZEOF_PTR=3 +elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then + LG_SIZEOF_PTR=2 +else + AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}]) +fi +AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR]) + +AC_CHECK_SIZEOF([int]) +if test "x${ac_cv_sizeof_int}" = "x8" ; then + LG_SIZEOF_INT=3 +elif test "x${ac_cv_sizeof_int}" = "x4" ; then + LG_SIZEOF_INT=2 +else + AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}]) +fi +AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT]) + +AC_CHECK_SIZEOF([long]) +if test "x${ac_cv_sizeof_long}" = "x8" ; then + LG_SIZEOF_LONG=3 +elif test "x${ac_cv_sizeof_long}" = "x4" ; then + LG_SIZEOF_LONG=2 +else + AC_MSG_ERROR([Unsupported long size: ${ac_cv_sizeof_long}]) +fi +AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG]) + +AC_CHECK_SIZEOF([intmax_t]) +if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then + LG_SIZEOF_INTMAX_T=4 +elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then + LG_SIZEOF_INTMAX_T=3 +elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then + LG_SIZEOF_INTMAX_T=2 +else + AC_MSG_ERROR([Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}]) +fi +AC_DEFINE_UNQUOTED([LG_SIZEOF_INTMAX_T], [$LG_SIZEOF_INTMAX_T]) + +AC_CANONICAL_HOST +dnl CPU-specific settings. +CPU_SPINWAIT="" +case "${host_cpu}" in + i686|x86_64) + AC_CACHE_VAL([je_cv_pause], + [JE_COMPILABLE([pause instruction], [], + [[__asm__ volatile("pause"); return 0;]], + [je_cv_pause])]) + if test "x${je_cv_pause}" = "xyes" ; then + CPU_SPINWAIT='__asm__ volatile("pause")' + fi + ;; + powerpc) + AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ]) + ;; + *) + ;; +esac +AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT]) + +LD_PRELOAD_VAR="LD_PRELOAD" +so="so" +importlib="${so}" +o="$ac_objext" +a="a" +exe="$ac_exeext" +libprefix="lib" +DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' +RPATH='-Wl,-rpath,$(1)' +SOREV="${so}.${rev}" +PIC_CFLAGS='-fPIC -DPIC' +CTARGET='-o $@' +LDTARGET='-o $@' +EXTRA_LDFLAGS= +ARFLAGS='crus' +AROUT=' $@' +CC_MM=1 + +AN_MAKEVAR([AR], [AC_PROG_AR]) +AN_PROGRAM([ar], [AC_PROG_AR]) +AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)]) +AC_PROG_AR + +dnl Platform-specific settings. abi and RPATH can probably be determined +dnl programmatically, but doing so is error-prone, which makes it generally +dnl not worth the trouble. +dnl +dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the +dnl definitions need to be seen before any headers are included, which is a pain +dnl to make happen otherwise. +default_munmap="1" +maps_coalesce="1" +case "${host}" in + *-*-darwin* | *-*-ios*) + CFLAGS="$CFLAGS" + abi="macho" + AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + RPATH="" + LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" + so="dylib" + importlib="${so}" + force_tls="0" + DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' + SOREV="${rev}.${so}" + sbrk_deprecated="1" + ;; + *-*-freebsd*) + CFLAGS="$CFLAGS" + abi="elf" + AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + force_lazy_lock="1" + ;; + *-*-dragonfly*) + CFLAGS="$CFLAGS" + abi="elf" + AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + ;; + *-*-openbsd*) + CFLAGS="$CFLAGS" + abi="elf" + AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + force_tls="0" + ;; + *-*-bitrig*) + CFLAGS="$CFLAGS" + abi="elf" + AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + ;; + *-*-linux*) + CFLAGS="$CFLAGS" + CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" + abi="elf" + AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) + AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) + AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) + AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) + default_munmap="0" + ;; + *-*-netbsd*) + AC_MSG_CHECKING([ABI]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM( +[[#ifdef __ELF__ +/* ELF */ +#else +#error aout +#endif +]])], + [CFLAGS="$CFLAGS"; abi="elf"], + [abi="aout"]) + AC_MSG_RESULT([$abi]) + AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + ;; + *-*-solaris2*) + CFLAGS="$CFLAGS" + abi="elf" + AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + RPATH='-Wl,-R,$(1)' + dnl Solaris needs this for sigwait(). + CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS" + LIBS="$LIBS -lposix4 -lsocket -lnsl" + ;; + *-ibm-aix*) + if "$LG_SIZEOF_PTR" = "8"; then + dnl 64bit AIX + LD_PRELOAD_VAR="LDR_PRELOAD64" + else + dnl 32bit AIX + LD_PRELOAD_VAR="LDR_PRELOAD" + fi + abi="xcoff" + ;; + *-*-mingw* | *-*-cygwin*) + abi="pecoff" + force_tls="0" + force_lazy_lock="1" + maps_coalesce="0" + RPATH="" + so="dll" + if test "x$je_cv_msvc" = "xyes" ; then + importlib="lib" + DSO_LDFLAGS="-LD" + EXTRA_LDFLAGS="-link -DEBUG" + CTARGET='-Fo$@' + LDTARGET='-Fe$@' + AR='lib' + ARFLAGS='-nologo -out:' + AROUT='$@' + CC_MM= + else + importlib="${so}" + DSO_LDFLAGS="-shared" + fi + a="lib" + libprefix="" + SOREV="${so}" + PIC_CFLAGS="" + ;; + *) + AC_MSG_RESULT([Unsupported operating system: ${host}]) + abi="elf" + ;; +esac + +JEMALLOC_USABLE_SIZE_CONST=const +AC_CHECK_HEADERS([malloc.h], [ + AC_MSG_CHECKING([whether malloc_usable_size definition can use const argument]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM( + [#include + #include + size_t malloc_usable_size(const void *ptr); + ], + [])],[ + AC_MSG_RESULT([yes]) + ],[ + JEMALLOC_USABLE_SIZE_CONST= + AC_MSG_RESULT([no]) + ]) +]) +AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST]) +AC_SUBST([abi]) +AC_SUBST([RPATH]) +AC_SUBST([LD_PRELOAD_VAR]) +AC_SUBST([so]) +AC_SUBST([importlib]) +AC_SUBST([o]) +AC_SUBST([a]) +AC_SUBST([exe]) +AC_SUBST([libprefix]) +AC_SUBST([DSO_LDFLAGS]) +AC_SUBST([EXTRA_LDFLAGS]) +AC_SUBST([SOREV]) +AC_SUBST([PIC_CFLAGS]) +AC_SUBST([CTARGET]) +AC_SUBST([LDTARGET]) +AC_SUBST([MKLIB]) +AC_SUBST([ARFLAGS]) +AC_SUBST([AROUT]) +AC_SUBST([CC_MM]) + +JE_COMPILABLE([__attribute__ syntax], + [static __attribute__((unused)) void foo(void){}], + [], + [je_cv_attribute]) +if test "x${je_cv_attribute}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ]) + if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then + JE_CFLAGS_APPEND([-fvisibility=hidden]) + fi +fi +dnl Check for tls_model attribute support (clang 3.0 still lacks support). +SAVED_CFLAGS="${CFLAGS}" +JE_CFLAGS_APPEND([-Werror]) +JE_COMPILABLE([tls_model attribute], [], + [static __thread int + __attribute__((tls_model("initial-exec"), unused)) foo; + foo = 0;], + [je_cv_tls_model]) +CFLAGS="${SAVED_CFLAGS}" +if test "x${je_cv_tls_model}" = "xyes" ; then + AC_DEFINE([JEMALLOC_TLS_MODEL], + [__attribute__((tls_model("initial-exec")))]) +else + AC_DEFINE([JEMALLOC_TLS_MODEL], [ ]) +fi +dnl Check for alloc_size attribute support. +SAVED_CFLAGS="${CFLAGS}" +JE_CFLAGS_APPEND([-Werror]) +JE_COMPILABLE([alloc_size attribute], [#include ], + [void *foo(size_t size) __attribute__((alloc_size(1)));], + [je_cv_alloc_size]) +CFLAGS="${SAVED_CFLAGS}" +if test "x${je_cv_alloc_size}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ]) +fi +dnl Check for format(gnu_printf, ...) attribute support. +SAVED_CFLAGS="${CFLAGS}" +JE_CFLAGS_APPEND([-Werror]) +JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include ], + [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));], + [je_cv_format_gnu_printf]) +CFLAGS="${SAVED_CFLAGS}" +if test "x${je_cv_format_gnu_printf}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ]) +fi +dnl Check for format(printf, ...) attribute support. +SAVED_CFLAGS="${CFLAGS}" +JE_CFLAGS_APPEND([-Werror]) +JE_COMPILABLE([format(printf, ...) attribute], [#include ], + [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));], + [je_cv_format_printf]) +CFLAGS="${SAVED_CFLAGS}" +if test "x${je_cv_format_printf}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ]) +fi + +dnl Support optional additions to rpath. +AC_ARG_WITH([rpath], + [AS_HELP_STRING([--with-rpath=], [Colon-separated rpath (ELF systems only)])], +if test "x$with_rpath" = "xno" ; then + RPATH_EXTRA= +else + RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`" +fi, + RPATH_EXTRA= +) +AC_SUBST([RPATH_EXTRA]) + +dnl Disable rules that do automatic regeneration of configure output by default. +AC_ARG_ENABLE([autogen], + [AS_HELP_STRING([--enable-autogen], [Automatically regenerate configure output])], +if test "x$enable_autogen" = "xno" ; then + enable_autogen="0" +else + enable_autogen="1" +fi +, +enable_autogen="0" +) +AC_SUBST([enable_autogen]) + +AC_PROG_INSTALL +AC_PROG_RANLIB +AC_PATH_PROG([LD], [ld], [false], [$PATH]) +AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH]) + +public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx sdallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size" + +dnl Check for allocator-related functions that should be wrapped. +AC_CHECK_FUNC([memalign], + [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ]) + public_syms="${public_syms} memalign"]) +AC_CHECK_FUNC([valloc], + [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ]) + public_syms="${public_syms} valloc"]) + +dnl Do not compute test code coverage by default. +GCOV_FLAGS= +AC_ARG_ENABLE([code-coverage], + [AS_HELP_STRING([--enable-code-coverage], + [Enable code coverage])], +[if test "x$enable_code_coverage" = "xno" ; then + enable_code_coverage="0" +else + enable_code_coverage="1" +fi +], +[enable_code_coverage="0"] +) +if test "x$enable_code_coverage" = "x1" ; then + deoptimize="no" + echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes" + if test "x${deoptimize}" = "xyes" ; then + JE_CFLAGS_APPEND([-O0]) + fi + JE_CFLAGS_APPEND([-fprofile-arcs -ftest-coverage]) + EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage" + AC_DEFINE([JEMALLOC_CODE_COVERAGE], [ ]) +fi +AC_SUBST([enable_code_coverage]) + +dnl Perform no name mangling by default. +AC_ARG_WITH([mangling], + [AS_HELP_STRING([--with-mangling=], [Mangle symbols in ])], + [mangling_map="$with_mangling"], [mangling_map=""]) + +dnl Do not prefix public APIs by default. +AC_ARG_WITH([jemalloc_prefix], + [AS_HELP_STRING([--with-jemalloc-prefix=], [Prefix to prepend to all public APIs])], + [JEMALLOC_PREFIX="$with_jemalloc_prefix"], + [if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then + JEMALLOC_PREFIX="" +else + JEMALLOC_PREFIX="je_" +fi] +) +if test "x$JEMALLOC_PREFIX" != "x" ; then + JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` + AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"]) + AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"]) +fi +AC_SUBST([JEMALLOC_CPREFIX]) + +AC_ARG_WITH([export], + [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])], + [if test "x$with_export" = "xno"; then + AC_DEFINE([JEMALLOC_EXPORT],[]) +fi] +) + +dnl Mangle library-private APIs. +AC_ARG_WITH([private_namespace], + [AS_HELP_STRING([--with-private-namespace=], [Prefix to prepend to all library-private APIs])], + [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"], + [JEMALLOC_PRIVATE_NAMESPACE="je_"] +) +AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE]) +private_namespace="$JEMALLOC_PRIVATE_NAMESPACE" +AC_SUBST([private_namespace]) + +dnl Do not add suffix to installed files by default. +AC_ARG_WITH([install_suffix], + [AS_HELP_STRING([--with-install-suffix=], [Suffix to append to all installed files])], + [INSTALL_SUFFIX="$with_install_suffix"], + [INSTALL_SUFFIX=] +) +install_suffix="$INSTALL_SUFFIX" +AC_SUBST([install_suffix]) + +dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of +dnl jemalloc_protos_jet.h easy. +je_="je_" +AC_SUBST([je_]) + +cfgoutputs_in="Makefile.in" +cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in" +cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in" +cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in" +cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_internal.h.in" +cfgoutputs_in="${cfgoutputs_in} test/test.sh.in" +cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in" + +cfgoutputs_out="Makefile" +cfgoutputs_out="${cfgoutputs_out} jemalloc.pc" +cfgoutputs_out="${cfgoutputs_out} doc/html.xsl" +cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl" +cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h" +cfgoutputs_out="${cfgoutputs_out} test/test.sh" +cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h" + +cfgoutputs_tup="Makefile" +cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in" +cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in" +cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in" +cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h" +cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in" +cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in" + +cfghdrs_in="include/jemalloc/jemalloc_defs.h.in" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_unnamespace.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.txt" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh" +cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh" +cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in" + +cfghdrs_out="include/jemalloc/jemalloc_defs.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_namespace.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_unnamespace.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/size_classes.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h" +cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h" + +cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in" +cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in" +cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in" + +dnl Silence irrelevant compiler warnings by default. +AC_ARG_ENABLE([cc-silence], + [AS_HELP_STRING([--disable-cc-silence], + [Do not silence irrelevant compiler warnings])], +[if test "x$enable_cc_silence" = "xno" ; then + enable_cc_silence="0" +else + enable_cc_silence="1" +fi +], +[enable_cc_silence="1"] +) +if test "x$enable_cc_silence" = "x1" ; then + AC_DEFINE([JEMALLOC_CC_SILENCE], [ ]) +fi + +dnl Do not compile with debugging by default. +AC_ARG_ENABLE([debug], + [AS_HELP_STRING([--enable-debug], + [Build debugging code (implies --enable-ivsalloc)])], +[if test "x$enable_debug" = "xno" ; then + enable_debug="0" +else + enable_debug="1" +fi +], +[enable_debug="0"] +) +if test "x$enable_debug" = "x1" ; then + AC_DEFINE([JEMALLOC_DEBUG], [ ]) +fi +if test "x$enable_debug" = "x1" ; then + AC_DEFINE([JEMALLOC_DEBUG], [ ]) + enable_ivsalloc="1" +fi +AC_SUBST([enable_debug]) + +dnl Do not validate pointers by default. +AC_ARG_ENABLE([ivsalloc], + [AS_HELP_STRING([--enable-ivsalloc], + [Validate pointers passed through the public API])], +[if test "x$enable_ivsalloc" = "xno" ; then + enable_ivsalloc="0" +else + enable_ivsalloc="1" +fi +], +[enable_ivsalloc="0"] +) +if test "x$enable_ivsalloc" = "x1" ; then + AC_DEFINE([JEMALLOC_IVSALLOC], [ ]) +fi + +dnl Only optimize if not debugging. +if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then + dnl Make sure that an optimization flag was not specified in EXTRA_CFLAGS. + optimize="no" + echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || optimize="yes" + if test "x${optimize}" = "xyes" ; then + if test "x$GCC" = "xyes" ; then + JE_CFLAGS_APPEND([-O3]) + JE_CFLAGS_APPEND([-funroll-loops]) + elif test "x$je_cv_msvc" = "xyes" ; then + JE_CFLAGS_APPEND([-O2]) + else + JE_CFLAGS_APPEND([-O]) + fi + fi +fi + +dnl Enable statistics calculation by default. +AC_ARG_ENABLE([stats], + [AS_HELP_STRING([--disable-stats], + [Disable statistics calculation/reporting])], +[if test "x$enable_stats" = "xno" ; then + enable_stats="0" +else + enable_stats="1" +fi +], +[enable_stats="1"] +) +if test "x$enable_stats" = "x1" ; then + AC_DEFINE([JEMALLOC_STATS], [ ]) +fi +AC_SUBST([enable_stats]) + +dnl Do not enable profiling by default. +AC_ARG_ENABLE([prof], + [AS_HELP_STRING([--enable-prof], [Enable allocation profiling])], +[if test "x$enable_prof" = "xno" ; then + enable_prof="0" +else + enable_prof="1" +fi +], +[enable_prof="0"] +) +if test "x$enable_prof" = "x1" ; then + backtrace_method="" +else + backtrace_method="N/A" +fi + +AC_ARG_ENABLE([prof-libunwind], + [AS_HELP_STRING([--enable-prof-libunwind], [Use libunwind for backtracing])], +[if test "x$enable_prof_libunwind" = "xno" ; then + enable_prof_libunwind="0" +else + enable_prof_libunwind="1" +fi +], +[enable_prof_libunwind="0"] +) +AC_ARG_WITH([static_libunwind], + [AS_HELP_STRING([--with-static-libunwind=], + [Path to static libunwind library; use rather than dynamically linking])], +if test "x$with_static_libunwind" = "xno" ; then + LUNWIND="-lunwind" +else + if test ! -f "$with_static_libunwind" ; then + AC_MSG_ERROR([Static libunwind not found: $with_static_libunwind]) + fi + LUNWIND="$with_static_libunwind" +fi, + LUNWIND="-lunwind" +) +if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then + AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"]) + if test "x$LUNWIND" = "x-lunwind" ; then + AC_CHECK_LIB([unwind], [unw_backtrace], [LIBS="$LIBS $LUNWIND"], + [enable_prof_libunwind="0"]) + else + LIBS="$LIBS $LUNWIND" + fi + if test "x${enable_prof_libunwind}" = "x1" ; then + backtrace_method="libunwind" + AC_DEFINE([JEMALLOC_PROF_LIBUNWIND], [ ]) + fi +fi + +AC_ARG_ENABLE([prof-libgcc], + [AS_HELP_STRING([--disable-prof-libgcc], + [Do not use libgcc for backtracing])], +[if test "x$enable_prof_libgcc" = "xno" ; then + enable_prof_libgcc="0" +else + enable_prof_libgcc="1" +fi +], +[enable_prof_libgcc="1"] +) +if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ + -a "x$GCC" = "xyes" ; then + AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"]) + AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [LIBS="$LIBS -lgcc"], [enable_prof_libgcc="0"]) + if test "x${enable_prof_libgcc}" = "x1" ; then + backtrace_method="libgcc" + AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ]) + fi +else + enable_prof_libgcc="0" +fi + +AC_ARG_ENABLE([prof-gcc], + [AS_HELP_STRING([--disable-prof-gcc], + [Do not use gcc intrinsics for backtracing])], +[if test "x$enable_prof_gcc" = "xno" ; then + enable_prof_gcc="0" +else + enable_prof_gcc="1" +fi +], +[enable_prof_gcc="1"] +) +if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ + -a "x$GCC" = "xyes" ; then + JE_CFLAGS_APPEND([-fno-omit-frame-pointer]) + backtrace_method="gcc intrinsics" + AC_DEFINE([JEMALLOC_PROF_GCC], [ ]) +else + enable_prof_gcc="0" +fi + +if test "x$backtrace_method" = "x" ; then + backtrace_method="none (disabling profiling)" + enable_prof="0" +fi +AC_MSG_CHECKING([configured backtracing method]) +AC_MSG_RESULT([$backtrace_method]) +if test "x$enable_prof" = "x1" ; then + if test "x$abi" != "xpecoff"; then + dnl Heap profiling uses the log(3) function. + LIBS="$LIBS -lm" + fi + + AC_DEFINE([JEMALLOC_PROF], [ ]) +fi +AC_SUBST([enable_prof]) + +dnl Enable thread-specific caching by default. +AC_ARG_ENABLE([tcache], + [AS_HELP_STRING([--disable-tcache], [Disable per thread caches])], +[if test "x$enable_tcache" = "xno" ; then + enable_tcache="0" +else + enable_tcache="1" +fi +], +[enable_tcache="1"] +) +if test "x$enable_tcache" = "x1" ; then + AC_DEFINE([JEMALLOC_TCACHE], [ ]) +fi +AC_SUBST([enable_tcache]) + +dnl Indicate whether adjacent virtual memory mappings automatically coalesce +dnl (and fragment on demand). +if test "x${maps_coalesce}" = "x1" ; then + AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ]) +fi + +dnl Enable VM deallocation via munmap() by default. +AC_ARG_ENABLE([munmap], + [AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])], +[if test "x$enable_munmap" = "xno" ; then + enable_munmap="0" +else + enable_munmap="1" +fi +], +[enable_munmap="${default_munmap}"] +) +if test "x$enable_munmap" = "x1" ; then + AC_DEFINE([JEMALLOC_MUNMAP], [ ]) +fi +AC_SUBST([enable_munmap]) + +dnl Enable allocation from DSS if supported by the OS. +have_dss="1" +dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support. +AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"]) +if test "x$have_sbrk" = "x1" ; then + if test "x$sbrk_deprecated" = "x1" ; then + AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated]) + have_dss="0" + fi +else + have_dss="0" +fi + +if test "x$have_dss" = "x1" ; then + AC_DEFINE([JEMALLOC_DSS], [ ]) +fi + +dnl Support the junk/zero filling option by default. +AC_ARG_ENABLE([fill], + [AS_HELP_STRING([--disable-fill], + [Disable support for junk/zero filling, quarantine, and redzones])], +[if test "x$enable_fill" = "xno" ; then + enable_fill="0" +else + enable_fill="1" +fi +], +[enable_fill="1"] +) +if test "x$enable_fill" = "x1" ; then + AC_DEFINE([JEMALLOC_FILL], [ ]) +fi +AC_SUBST([enable_fill]) + +dnl Disable utrace(2)-based tracing by default. +AC_ARG_ENABLE([utrace], + [AS_HELP_STRING([--enable-utrace], [Enable utrace(2)-based tracing])], +[if test "x$enable_utrace" = "xno" ; then + enable_utrace="0" +else + enable_utrace="1" +fi +], +[enable_utrace="0"] +) +JE_COMPILABLE([utrace(2)], [ +#include +#include +#include +#include +#include +], [ + utrace((void *)0, 0); +], [je_cv_utrace]) +if test "x${je_cv_utrace}" = "xno" ; then + enable_utrace="0" +fi +if test "x$enable_utrace" = "x1" ; then + AC_DEFINE([JEMALLOC_UTRACE], [ ]) +fi +AC_SUBST([enable_utrace]) + +dnl Support Valgrind by default. +AC_ARG_ENABLE([valgrind], + [AS_HELP_STRING([--disable-valgrind], [Disable support for Valgrind])], +[if test "x$enable_valgrind" = "xno" ; then + enable_valgrind="0" +else + enable_valgrind="1" +fi +], +[enable_valgrind="1"] +) +if test "x$enable_valgrind" = "x1" ; then + JE_COMPILABLE([valgrind], [ +#include +#include + +#if !defined(VALGRIND_RESIZEINPLACE_BLOCK) +# error "Incompatible Valgrind version" +#endif +], [], [je_cv_valgrind]) + if test "x${je_cv_valgrind}" = "xno" ; then + enable_valgrind="0" + fi + if test "x$enable_valgrind" = "x1" ; then + AC_DEFINE([JEMALLOC_VALGRIND], [ ]) + fi +fi +AC_SUBST([enable_valgrind]) + +dnl Do not support the xmalloc option by default. +AC_ARG_ENABLE([xmalloc], + [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])], +[if test "x$enable_xmalloc" = "xno" ; then + enable_xmalloc="0" +else + enable_xmalloc="1" +fi +], +[enable_xmalloc="0"] +) +if test "x$enable_xmalloc" = "x1" ; then + AC_DEFINE([JEMALLOC_XMALLOC], [ ]) +fi +AC_SUBST([enable_xmalloc]) + +dnl Support cache-oblivious allocation alignment by default. +AC_ARG_ENABLE([cache-oblivious], + [AS_HELP_STRING([--disable-cache-oblivious], + [Disable support for cache-oblivious allocation alignment])], +[if test "x$enable_cache_oblivious" = "xno" ; then + enable_cache_oblivious="0" +else + enable_cache_oblivious="1" +fi +], +[enable_cache_oblivious="1"] +) +if test "x$enable_cache_oblivious" = "x1" ; then + AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ]) +fi +AC_SUBST([enable_cache_oblivious]) + +dnl ============================================================================ +dnl Check for __builtin_ffsl(), then ffsl(3), and fail if neither are found. +dnl One of those two functions should (theoretically) exist on all platforms +dnl that jemalloc currently has a chance of functioning on without modification. +dnl We additionally assume ffs() or __builtin_ffs() are defined if +dnl ffsl() or __builtin_ffsl() are defined, respectively. +JE_COMPILABLE([a program using __builtin_ffsl], [ +#include +#include +#include +], [ + { + int rv = __builtin_ffsl(0x08); + printf("%d\n", rv); + } +], [je_cv_gcc_builtin_ffsl]) +if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then + AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl]) + AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs]) +else + JE_COMPILABLE([a program using ffsl], [ + #include + #include + #include + ], [ + { + int rv = ffsl(0x08); + printf("%d\n", rv); + } + ], [je_cv_function_ffsl]) + if test "x${je_cv_function_ffsl}" = "xyes" ; then + AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl]) + AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs]) + else + AC_MSG_ERROR([Cannot build without ffsl(3) or __builtin_ffsl()]) + fi +fi + +AC_ARG_WITH([lg_tiny_min], + [AS_HELP_STRING([--with-lg-tiny-min=], + [Base 2 log of minimum tiny size class to support])], + [LG_TINY_MIN="$with_lg_tiny_min"], + [LG_TINY_MIN="3"]) +AC_DEFINE_UNQUOTED([LG_TINY_MIN], [$LG_TINY_MIN]) + +AC_ARG_WITH([lg_quantum], + [AS_HELP_STRING([--with-lg-quantum=], + [Base 2 log of minimum allocation alignment])], + [LG_QUANTA="$with_lg_quantum"], + [LG_QUANTA="3 4"]) +if test "x$with_lg_quantum" != "x" ; then + AC_DEFINE_UNQUOTED([LG_QUANTUM], [$with_lg_quantum]) +fi + +AC_ARG_WITH([lg_page], + [AS_HELP_STRING([--with-lg-page=], [Base 2 log of system page size])], + [LG_PAGE="$with_lg_page"], [LG_PAGE="detect"]) +if test "x$LG_PAGE" = "xdetect"; then + AC_CACHE_CHECK([LG_PAGE], + [je_cv_lg_page], + AC_RUN_IFELSE([AC_LANG_PROGRAM( +[[ +#include +#ifdef _WIN32 +#include +#else +#include +#endif +#include +]], +[[ + int result; + FILE *f; + +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + result = si.dwPageSize; +#else + result = sysconf(_SC_PAGESIZE); +#endif + if (result == -1) { + return 1; + } + result = JEMALLOC_INTERNAL_FFSL(result) - 1; + + f = fopen("conftest.out", "w"); + if (f == NULL) { + return 1; + } + fprintf(f, "%d\n", result); + fclose(f); + + return 0; +]])], + [je_cv_lg_page=`cat conftest.out`], + [je_cv_lg_page=undefined], + [je_cv_lg_page=12])) +fi +if test "x${je_cv_lg_page}" != "x" ; then + LG_PAGE="${je_cv_lg_page}" +fi +if test "x${LG_PAGE}" != "xundefined" ; then + AC_DEFINE_UNQUOTED([LG_PAGE], [$LG_PAGE]) +else + AC_MSG_ERROR([cannot determine value for LG_PAGE]) +fi + +AC_ARG_WITH([lg_page_sizes], + [AS_HELP_STRING([--with-lg-page-sizes=], + [Base 2 logs of system page sizes to support])], + [LG_PAGE_SIZES="$with_lg_page_sizes"], [LG_PAGE_SIZES="$LG_PAGE"]) + +AC_ARG_WITH([lg_size_class_group], + [AS_HELP_STRING([--with-lg-size-class-group=], + [Base 2 log of size classes per doubling])], + [LG_SIZE_CLASS_GROUP="$with_lg_size_class_group"], + [LG_SIZE_CLASS_GROUP="2"]) + +dnl ============================================================================ +dnl jemalloc configuration. +dnl + +dnl Set VERSION if source directory is inside a git repository. +if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then + dnl Pattern globs aren't powerful enough to match both single- and + dnl double-digit version numbers, so iterate over patterns to support up to + dnl version 99.99.99 without any accidental matches. + rm -f "${objroot}VERSION" + for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ + '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ + '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ + '[0-9][0-9].[0-9][0-9].[0-9]' \ + '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do + if test ! -e "${objroot}VERSION" ; then + (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null + if test $? -eq 0 ; then + mv "${objroot}VERSION.tmp" "${objroot}VERSION" + break + fi + fi + done +fi +rm -f "${objroot}VERSION.tmp" +if test ! -e "${objroot}VERSION" ; then + if test ! -e "${srcroot}VERSION" ; then + AC_MSG_RESULT( + [Missing VERSION file, and unable to generate it; creating bogus VERSION]) + echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION" + else + cp ${srcroot}VERSION ${objroot}VERSION + fi +fi +jemalloc_version=`cat "${objroot}VERSION"` +jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'` +jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'` +jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'` +jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]4}'` +jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]5}'` +AC_SUBST([jemalloc_version]) +AC_SUBST([jemalloc_version_major]) +AC_SUBST([jemalloc_version_minor]) +AC_SUBST([jemalloc_version_bugfix]) +AC_SUBST([jemalloc_version_nrev]) +AC_SUBST([jemalloc_version_gid]) + +dnl ============================================================================ +dnl Configure pthreads. + +if test "x$abi" != "xpecoff" ; then + AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])]) + dnl Some systems may embed pthreads functionality in libc; check for libpthread + dnl first, but try libc too before failing. + AC_CHECK_LIB([pthread], [pthread_create], [LIBS="$LIBS -lpthread"], + [AC_SEARCH_LIBS([pthread_create], , , + AC_MSG_ERROR([libpthread is missing]))]) +fi + +CPPFLAGS="$CPPFLAGS -D_REENTRANT" + +dnl Check whether clock_gettime(2) is in libc or librt. This function is only +dnl used in test code, so save the result to TESTLIBS to avoid poluting LIBS. +SAVED_LIBS="${LIBS}" +LIBS= +AC_SEARCH_LIBS([clock_gettime], [rt], [TESTLIBS="${LIBS}"]) +AC_SUBST([TESTLIBS]) +LIBS="${SAVED_LIBS}" + +dnl Check if the GNU-specific secure_getenv function exists. +AC_CHECK_FUNC([secure_getenv], + [have_secure_getenv="1"], + [have_secure_getenv="0"] + ) +if test "x$have_secure_getenv" = "x1" ; then + AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ]) +fi + +dnl Check if the Solaris/BSD issetugid function exists. +AC_CHECK_FUNC([issetugid], + [have_issetugid="1"], + [have_issetugid="0"] + ) +if test "x$have_issetugid" = "x1" ; then + AC_DEFINE([JEMALLOC_HAVE_ISSETUGID], [ ]) +fi + +dnl Check whether the BSD-specific _malloc_thread_cleanup() exists. If so, use +dnl it rather than pthreads TSD cleanup functions to support cleanup during +dnl thread exit, in order to avoid pthreads library recursion during +dnl bootstrapping. +AC_CHECK_FUNC([_malloc_thread_cleanup], + [have__malloc_thread_cleanup="1"], + [have__malloc_thread_cleanup="0"] + ) +if test "x$have__malloc_thread_cleanup" = "x1" ; then + AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ]) + force_tls="1" +fi + +dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists. If +dnl so, mutex initialization causes allocation, and we need to implement this +dnl callback function in order to prevent recursive allocation. +AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb], + [have__pthread_mutex_init_calloc_cb="1"], + [have__pthread_mutex_init_calloc_cb="0"] + ) +if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then + AC_DEFINE([JEMALLOC_MUTEX_INIT_CB]) +fi + +dnl Disable lazy locking by default. +AC_ARG_ENABLE([lazy_lock], + [AS_HELP_STRING([--enable-lazy-lock], + [Enable lazy locking (only lock when multi-threaded)])], +[if test "x$enable_lazy_lock" = "xno" ; then + enable_lazy_lock="0" +else + enable_lazy_lock="1" +fi +], +[enable_lazy_lock=""] +) +if test "x$enable_lazy_lock" = "x" -a "x${force_lazy_lock}" = "x1" ; then + AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues]) + enable_lazy_lock="1" +fi +if test "x$enable_lazy_lock" = "x1" ; then + if test "x$abi" != "xpecoff" ; then + AC_CHECK_HEADERS([dlfcn.h], , [AC_MSG_ERROR([dlfcn.h is missing])]) + AC_CHECK_FUNC([dlsym], [], + [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], + [AC_MSG_ERROR([libdl is missing])]) + ]) + fi + AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ]) +else + enable_lazy_lock="0" +fi +AC_SUBST([enable_lazy_lock]) + +AC_ARG_ENABLE([tls], + [AS_HELP_STRING([--disable-tls], [Disable thread-local storage (__thread keyword)])], +if test "x$enable_tls" = "xno" ; then + enable_tls="0" +else + enable_tls="1" +fi +, +enable_tls="" +) +if test "x${enable_tls}" = "x" ; then + if test "x${force_tls}" = "x1" ; then + AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues]) + enable_tls="1" + elif test "x${force_tls}" = "x0" ; then + AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues]) + enable_tls="0" + else + enable_tls="1" + fi +fi +if test "x${enable_tls}" = "x1" ; then +AC_MSG_CHECKING([for TLS]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM( +[[ + __thread int x; +]], [[ + x = 42; + + return 0; +]])], + AC_MSG_RESULT([yes]), + AC_MSG_RESULT([no]) + enable_tls="0") +else + enable_tls="0" +fi +AC_SUBST([enable_tls]) +if test "x${enable_tls}" = "x1" ; then + if test "x${force_tls}" = "x0" ; then + AC_MSG_WARN([TLS enabled despite being marked unusable on this platform]) + fi + AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ]) +elif test "x${force_tls}" = "x1" ; then + AC_MSG_WARN([TLS disabled despite being marked critical on this platform]) +fi + +dnl ============================================================================ +dnl Check for C11 atomics. + +JE_COMPILABLE([C11 atomics], [ +#include +#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) +#include +#else +#error Atomics not available +#endif +], [ + uint64_t *p = (uint64_t *)0; + uint64_t x = 1; + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + uint64_t r = atomic_fetch_add(a, x) + x; + return (r == 0); +], [je_cv_c11atomics]) +if test "x${je_cv_c11atomics}" = "xyes" ; then + AC_DEFINE([JEMALLOC_C11ATOMICS]) +fi + +dnl ============================================================================ +dnl Check for atomic(9) operations as provided on FreeBSD. + +JE_COMPILABLE([atomic(9)], [ +#include +#include +#include +], [ + { + uint32_t x32 = 0; + volatile uint32_t *x32p = &x32; + atomic_fetchadd_32(x32p, 1); + } + { + unsigned long xlong = 0; + volatile unsigned long *xlongp = &xlong; + atomic_fetchadd_long(xlongp, 1); + } +], [je_cv_atomic9]) +if test "x${je_cv_atomic9}" = "xyes" ; then + AC_DEFINE([JEMALLOC_ATOMIC9]) +fi + +dnl ============================================================================ +dnl Check for atomic(3) operations as provided on Darwin. + +JE_COMPILABLE([Darwin OSAtomic*()], [ +#include +#include +], [ + { + int32_t x32 = 0; + volatile int32_t *x32p = &x32; + OSAtomicAdd32(1, x32p); + } + { + int64_t x64 = 0; + volatile int64_t *x64p = &x64; + OSAtomicAdd64(1, x64p); + } +], [je_cv_osatomic]) +if test "x${je_cv_osatomic}" = "xyes" ; then + AC_DEFINE([JEMALLOC_OSATOMIC], [ ]) +fi + +dnl ============================================================================ +dnl Check for madvise(2). + +JE_COMPILABLE([madvise(2)], [ +#include +], [ + { + madvise((void *)0, 0, 0); + } +], [je_cv_madvise]) +if test "x${je_cv_madvise}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ]) +fi + +dnl ============================================================================ +dnl Check whether __sync_{add,sub}_and_fetch() are available despite +dnl __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros being undefined. + +AC_DEFUN([JE_SYNC_COMPARE_AND_SWAP_CHECK],[ + AC_CACHE_CHECK([whether to force $1-bit __sync_{add,sub}_and_fetch()], + [je_cv_sync_compare_and_swap_$2], + [AC_LINK_IFELSE([AC_LANG_PROGRAM([ + #include + ], + [ + #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2 + { + uint$1_t x$1 = 0; + __sync_add_and_fetch(&x$1, 42); + __sync_sub_and_fetch(&x$1, 1); + } + #else + #error __GCC_HAVE_SYNC_COMPARE_AND_SWAP_$2 is defined, no need to force + #endif + ])], + [je_cv_sync_compare_and_swap_$2=yes], + [je_cv_sync_compare_and_swap_$2=no])]) + + if test "x${je_cv_sync_compare_and_swap_$2}" = "xyes" ; then + AC_DEFINE([JE_FORCE_SYNC_COMPARE_AND_SWAP_$2], [ ]) + fi +]) + +if test "x${je_cv_atomic9}" != "xyes" -a "x${je_cv_osatomic}" != "xyes" ; then + JE_SYNC_COMPARE_AND_SWAP_CHECK(32, 4) + JE_SYNC_COMPARE_AND_SWAP_CHECK(64, 8) +fi + +dnl ============================================================================ +dnl Check for __builtin_clz() and __builtin_clzl(). + +AC_CACHE_CHECK([for __builtin_clz], + [je_cv_builtin_clz], + [AC_LINK_IFELSE([AC_LANG_PROGRAM([], + [ + { + unsigned x = 0; + int y = __builtin_clz(x); + } + { + unsigned long x = 0; + int y = __builtin_clzl(x); + } + ])], + [je_cv_builtin_clz=yes], + [je_cv_builtin_clz=no])]) + +if test "x${je_cv_builtin_clz}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ]) +fi + +dnl ============================================================================ +dnl Check for spinlock(3) operations as provided on Darwin. + +JE_COMPILABLE([Darwin OSSpin*()], [ +#include +#include +], [ + OSSpinLock lock = 0; + OSSpinLockLock(&lock); + OSSpinLockUnlock(&lock); +], [je_cv_osspin]) +if test "x${je_cv_osspin}" = "xyes" ; then + AC_DEFINE([JEMALLOC_OSSPIN], [ ]) +fi + +dnl ============================================================================ +dnl Darwin-related configuration. + +AC_ARG_ENABLE([zone-allocator], + [AS_HELP_STRING([--disable-zone-allocator], + [Disable zone allocator for Darwin])], +[if test "x$enable_zone_allocator" = "xno" ; then + enable_zone_allocator="0" +else + enable_zone_allocator="1" +fi +], +[if test "x${abi}" = "xmacho"; then + enable_zone_allocator="1" +fi +] +) +AC_SUBST([enable_zone_allocator]) + +if test "x${enable_zone_allocator}" = "x1" ; then + if test "x${abi}" != "xmacho"; then + AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin]) + fi + AC_DEFINE([JEMALLOC_ZONE], [ ]) + + dnl The szone version jumped from 3 to 6 between the OS X 10.5.x and 10.6 + dnl releases. malloc_zone_t and malloc_introspection_t have new fields in + dnl 10.6, which is the only source-level indication of the change. + AC_MSG_CHECKING([malloc zone version]) + AC_DEFUN([JE_ZONE_PROGRAM], + [AC_LANG_PROGRAM( + [#include ], + [static int foo[[sizeof($1) $2 sizeof(void *) * $3 ? 1 : -1]]] + )]) + + AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,14)],[JEMALLOC_ZONE_VERSION=3],[ + AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,15)],[JEMALLOC_ZONE_VERSION=5],[ + AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,16)],[ + AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,9)],[JEMALLOC_ZONE_VERSION=6],[ + AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,13)],[JEMALLOC_ZONE_VERSION=7],[JEMALLOC_ZONE_VERSION=] + )])],[ + AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,17)],[JEMALLOC_ZONE_VERSION=8],[ + AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,>,17)],[JEMALLOC_ZONE_VERSION=9],[JEMALLOC_ZONE_VERSION=] + )])])])]) + if test "x${JEMALLOC_ZONE_VERSION}" = "x"; then + AC_MSG_RESULT([unsupported]) + AC_MSG_ERROR([Unsupported malloc zone version]) + fi + if test "${JEMALLOC_ZONE_VERSION}" = 9; then + JEMALLOC_ZONE_VERSION=8 + AC_MSG_RESULT([> 8]) + else + AC_MSG_RESULT([$JEMALLOC_ZONE_VERSION]) + fi + AC_DEFINE_UNQUOTED(JEMALLOC_ZONE_VERSION, [$JEMALLOC_ZONE_VERSION]) +fi + +dnl ============================================================================ +dnl Check for glibc malloc hooks + +JE_COMPILABLE([glibc malloc hook], [ +#include + +extern void (* __free_hook)(void *ptr); +extern void *(* __malloc_hook)(size_t size); +extern void *(* __realloc_hook)(void *ptr, size_t size); +], [ + void *ptr = 0L; + if (__malloc_hook) ptr = __malloc_hook(1); + if (__realloc_hook) ptr = __realloc_hook(ptr, 2); + if (__free_hook && ptr) __free_hook(ptr); +], [je_cv_glibc_malloc_hook]) +if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then + AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ]) +fi + +JE_COMPILABLE([glibc memalign hook], [ +#include + +extern void *(* __memalign_hook)(size_t alignment, size_t size); +], [ + void *ptr = 0L; + if (__memalign_hook) ptr = __memalign_hook(16, 7); +], [je_cv_glibc_memalign_hook]) +if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then + AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ]) +fi + +JE_COMPILABLE([pthreads adaptive mutexes], [ +#include +], [ + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); + pthread_mutexattr_destroy(&attr); +], [je_cv_pthread_mutex_adaptive_np]) +if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ]) +fi + +dnl ============================================================================ +dnl Check for typedefs, structures, and compiler characteristics. +AC_HEADER_STDBOOL + +dnl ============================================================================ +dnl Define commands that generate output files. + +AC_CONFIG_COMMANDS([include/jemalloc/internal/private_namespace.h], [ + mkdir -p "${objroot}include/jemalloc/internal" + "${srcdir}/include/jemalloc/internal/private_namespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_namespace.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/private_unnamespace.h], [ + mkdir -p "${objroot}include/jemalloc/internal" + "${srcdir}/include/jemalloc/internal/private_unnamespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_unnamespace.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [ + f="${objroot}include/jemalloc/internal/public_symbols.txt" + mkdir -p "${objroot}include/jemalloc/internal" + cp /dev/null "${f}" + for nm in `echo ${mangling_map} |tr ',' ' '` ; do + n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'` + m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'` + echo "${n}:${m}" >> "${f}" + dnl Remove name from public_syms so that it isn't redefined later. + public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '` + done + for sym in ${public_syms} ; do + n="${sym}" + m="${JEMALLOC_PREFIX}${sym}" + echo "${n}:${m}" >> "${f}" + done +], [ + srcdir="${srcdir}" + objroot="${objroot}" + mangling_map="${mangling_map}" + public_syms="${public_syms}" + JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [ + mkdir -p "${objroot}include/jemalloc/internal" + "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [ + mkdir -p "${objroot}include/jemalloc/internal" + "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [ + mkdir -p "${objroot}include/jemalloc/internal" + "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h" +], [ + SHELL="${SHELL}" + srcdir="${srcdir}" + objroot="${objroot}" + LG_QUANTA="${LG_QUANTA}" + LG_TINY_MIN=${LG_TINY_MIN} + LG_PAGE_SIZES="${LG_PAGE_SIZES}" + LG_SIZE_CLASS_GROUP=${LG_SIZE_CLASS_GROUP} +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [ + mkdir -p "${objroot}include/jemalloc" + cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [ + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [ + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [ + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [ + mkdir -p "${objroot}include/jemalloc" + "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h" +], [ + srcdir="${srcdir}" + objroot="${objroot}" + install_suffix="${install_suffix}" +]) + +dnl Process .in files. +AC_SUBST([cfghdrs_in]) +AC_SUBST([cfghdrs_out]) +AC_CONFIG_HEADERS([$cfghdrs_tup]) + +dnl ============================================================================ +dnl Generate outputs. + +AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof]) +AC_SUBST([cfgoutputs_in]) +AC_SUBST([cfgoutputs_out]) +AC_OUTPUT + +dnl ============================================================================ +dnl Print out the results of configuration. +AC_MSG_RESULT([===============================================================================]) +AC_MSG_RESULT([jemalloc version : ${jemalloc_version}]) +AC_MSG_RESULT([library revision : ${rev}]) +AC_MSG_RESULT([]) +AC_MSG_RESULT([CONFIG : ${CONFIG}]) +AC_MSG_RESULT([CC : ${CC}]) +AC_MSG_RESULT([CFLAGS : ${CFLAGS}]) +AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}]) +AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}]) +AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}]) +AC_MSG_RESULT([LIBS : ${LIBS}]) +AC_MSG_RESULT([TESTLIBS : ${TESTLIBS}]) +AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}]) +AC_MSG_RESULT([]) +AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}]) +AC_MSG_RESULT([XSLROOT : ${XSLROOT}]) +AC_MSG_RESULT([]) +AC_MSG_RESULT([PREFIX : ${PREFIX}]) +AC_MSG_RESULT([BINDIR : ${BINDIR}]) +AC_MSG_RESULT([DATADIR : ${DATADIR}]) +AC_MSG_RESULT([INCLUDEDIR : ${INCLUDEDIR}]) +AC_MSG_RESULT([LIBDIR : ${LIBDIR}]) +AC_MSG_RESULT([MANDIR : ${MANDIR}]) +AC_MSG_RESULT([]) +AC_MSG_RESULT([srcroot : ${srcroot}]) +AC_MSG_RESULT([abs_srcroot : ${abs_srcroot}]) +AC_MSG_RESULT([objroot : ${objroot}]) +AC_MSG_RESULT([abs_objroot : ${abs_objroot}]) +AC_MSG_RESULT([]) +AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}]) +AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE]) +AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}]) +AC_MSG_RESULT([install_suffix : ${install_suffix}]) +AC_MSG_RESULT([autogen : ${enable_autogen}]) +AC_MSG_RESULT([cc-silence : ${enable_cc_silence}]) +AC_MSG_RESULT([debug : ${enable_debug}]) +AC_MSG_RESULT([code-coverage : ${enable_code_coverage}]) +AC_MSG_RESULT([stats : ${enable_stats}]) +AC_MSG_RESULT([prof : ${enable_prof}]) +AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}]) +AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}]) +AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}]) +AC_MSG_RESULT([tcache : ${enable_tcache}]) +AC_MSG_RESULT([fill : ${enable_fill}]) +AC_MSG_RESULT([utrace : ${enable_utrace}]) +AC_MSG_RESULT([valgrind : ${enable_valgrind}]) +AC_MSG_RESULT([xmalloc : ${enable_xmalloc}]) +AC_MSG_RESULT([munmap : ${enable_munmap}]) +AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}]) +AC_MSG_RESULT([tls : ${enable_tls}]) +AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}]) +AC_MSG_RESULT([===============================================================================]) diff --git a/redis.submodule/jemalloc/coverage.sh b/redis.submodule/jemalloc/coverage.sh new file mode 100755 index 0000000..6d1362a --- /dev/null +++ b/redis.submodule/jemalloc/coverage.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +set -e + +objdir=$1 +suffix=$2 +shift 2 +objs=$@ + +gcov -b -p -f -o "${objdir}" ${objs} + +# Move gcov outputs so that subsequent gcov invocations won't clobber results +# for the same sources with different compilation flags. +for f in `find . -maxdepth 1 -type f -name '*.gcov'` ; do + mv "${f}" "${f}.${suffix}" +done diff --git a/redis.submodule/jemalloc/doc/html.xsl.in b/redis.submodule/jemalloc/doc/html.xsl.in new file mode 100644 index 0000000..a91d974 --- /dev/null +++ b/redis.submodule/jemalloc/doc/html.xsl.in @@ -0,0 +1,4 @@ + + + + diff --git a/redis.submodule/jemalloc/doc/jemalloc.xml.in b/redis.submodule/jemalloc/doc/jemalloc.xml.in new file mode 100644 index 0000000..26a5e14 --- /dev/null +++ b/redis.submodule/jemalloc/doc/jemalloc.xml.in @@ -0,0 +1,2762 @@ + + + + + + + User Manual + jemalloc + @jemalloc_version@ + + + Jason + Evans + Author + + + + + JEMALLOC + 3 + + + jemalloc + jemalloc + + general purpose memory allocation functions + + + LIBRARY + This manual describes jemalloc @jemalloc_version@. More information + can be found at the jemalloc website. + + + SYNOPSIS + + #include <jemalloc/jemalloc.h> + + Standard API + + void *malloc + size_t size + + + void *calloc + size_t number + size_t size + + + int posix_memalign + void **ptr + size_t alignment + size_t size + + + void *aligned_alloc + size_t alignment + size_t size + + + void *realloc + void *ptr + size_t size + + + void free + void *ptr + + + + Non-standard API + + void *mallocx + size_t size + int flags + + + void *rallocx + void *ptr + size_t size + int flags + + + size_t xallocx + void *ptr + size_t size + size_t extra + int flags + + + size_t sallocx + void *ptr + int flags + + + void dallocx + void *ptr + int flags + + + void sdallocx + void *ptr + size_t size + int flags + + + size_t nallocx + size_t size + int flags + + + int mallctl + const char *name + void *oldp + size_t *oldlenp + void *newp + size_t newlen + + + int mallctlnametomib + const char *name + size_t *mibp + size_t *miblenp + + + int mallctlbymib + const size_t *mib + size_t miblen + void *oldp + size_t *oldlenp + void *newp + size_t newlen + + + void malloc_stats_print + void (*write_cb) + void *, const char * + + void *cbopaque + const char *opts + + + size_t malloc_usable_size + const void *ptr + + + void (*malloc_message) + void *cbopaque + const char *s + + const char *malloc_conf; + + + + + DESCRIPTION + + Standard API + + The malloc function allocates + size bytes of uninitialized memory. The allocated + space is suitably aligned (after possible pointer coercion) for storage + of any type of object. + + The calloc function allocates + space for number objects, each + size bytes in length. The result is identical to + calling malloc with an argument of + number * size, with the + exception that the allocated memory is explicitly initialized to zero + bytes. + + The posix_memalign function + allocates size bytes of memory such that the + allocation's base address is a multiple of + alignment, and returns the allocation in the value + pointed to by ptr. The requested + alignment must be a power of 2 at least as large as + sizeof(void *). + + The aligned_alloc function + allocates size bytes of memory such that the + allocation's base address is a multiple of + alignment. The requested + alignment must be a power of 2. Behavior is + undefined if size is not an integral multiple of + alignment. + + The realloc function changes the + size of the previously allocated memory referenced by + ptr to size bytes. The + contents of the memory are unchanged up to the lesser of the new and old + sizes. If the new size is larger, the contents of the newly allocated + portion of the memory are undefined. Upon success, the memory referenced + by ptr is freed and a pointer to the newly + allocated memory is returned. Note that + realloc may move the memory allocation, + resulting in a different return value than ptr. + If ptr is NULL, the + realloc function behaves identically to + malloc for the specified size. + + The free function causes the + allocated memory referenced by ptr to be made + available for future allocations. If ptr is + NULL, no action occurs. + + + Non-standard API + The mallocx, + rallocx, + xallocx, + sallocx, + dallocx, + sdallocx, and + nallocx functions all have a + flags argument that can be used to specify + options. The functions only check the options that are contextually + relevant. Use bitwise or (|) operations to + specify one or more of the following: + + + MALLOCX_LG_ALIGN(la) + + + Align the memory allocation to start at an address + that is a multiple of (1 << + la). This macro does not validate + that la is within the valid + range. + + + MALLOCX_ALIGN(a) + + + Align the memory allocation to start at an address + that is a multiple of a, where + a is a power of two. This macro does not + validate that a is a power of 2. + + + + MALLOCX_ZERO + + Initialize newly allocated memory to contain zero + bytes. In the growing reallocation case, the real size prior to + reallocation defines the boundary between untouched bytes and those + that are initialized to contain zero bytes. If this macro is + absent, newly allocated memory is uninitialized. + + + MALLOCX_TCACHE(tc) + + + Use the thread-specific cache (tcache) specified by + the identifier tc, which must have been + acquired via the tcache.create + mallctl. This macro does not validate that + tc specifies a valid + identifier. + + + MALLOCX_TCACHE_NONE + + Do not use a thread-specific cache (tcache). Unless + MALLOCX_TCACHE(tc) or + MALLOCX_TCACHE_NONE is specified, an + automatically managed tcache will be used under many circumstances. + This macro cannot be used in the same flags + argument as + MALLOCX_TCACHE(tc). + + + MALLOCX_ARENA(a) + + + Use the arena specified by the index + a. This macro has no effect for regions that + were allocated via an arena other than the one specified. This + macro does not validate that a specifies an + arena index in the valid range. + + + + + The mallocx function allocates at + least size bytes of memory, and returns a pointer + to the base address of the allocation. Behavior is undefined if + size is 0, or if request size + overflows due to size class and/or alignment constraints. + + The rallocx function resizes the + allocation at ptr to be at least + size bytes, and returns a pointer to the base + address of the resulting allocation, which may or may not have moved from + its original location. Behavior is undefined if + size is 0, or if request size + overflows due to size class and/or alignment constraints. + + The xallocx function resizes the + allocation at ptr in place to be at least + size bytes, and returns the real size of the + allocation. If extra is non-zero, an attempt is + made to resize the allocation to be at least (size + + extra) bytes, though inability to allocate + the extra byte(s) will not by itself result in failure to resize. + Behavior is undefined if size is + 0, or if (size + extra + > SIZE_T_MAX). + + The sallocx function returns the + real size of the allocation at ptr. + + The dallocx function causes the + memory referenced by ptr to be made available for + future allocations. + + The sdallocx function is an + extension of dallocx with a + size parameter to allow the caller to pass in the + allocation size as an optimization. The minimum valid input size is the + original requested size of the allocation, and the maximum valid input + size is the corresponding value returned by + nallocx or + sallocx. + + The nallocx function allocates no + memory, but it performs the same size computation as the + mallocx function, and returns the real + size of the allocation that would result from the equivalent + mallocx function call. Behavior is + undefined if size is 0, or if + request size overflows due to size class and/or alignment + constraints. + + The mallctl function provides a + general interface for introspecting the memory allocator, as well as + setting modifiable parameters and triggering actions. The + period-separated name argument specifies a + location in a tree-structured namespace; see the section for + documentation on the tree contents. To read a value, pass a pointer via + oldp to adequate space to contain the value, and a + pointer to its length via oldlenp; otherwise pass + NULL and NULL. Similarly, to + write a value, pass a pointer to the value via + newp, and its length via + newlen; otherwise pass NULL + and 0. + + The mallctlnametomib function + provides a way to avoid repeated name lookups for applications that + repeatedly query the same portion of the namespace, by translating a name + to a “Management Information Base” (MIB) that can be passed + repeatedly to mallctlbymib. Upon + successful return from mallctlnametomib, + mibp contains an array of + *miblenp integers, where + *miblenp is the lesser of the number of components + in name and the input value of + *miblenp. Thus it is possible to pass a + *miblenp that is smaller than the number of + period-separated name components, which results in a partial MIB that can + be used as the basis for constructing a complete MIB. For name + components that are integers (e.g. the 2 in + arenas.bin.2.size), + the corresponding MIB component will always be that integer. Therefore, + it is legitimate to construct code like the following: + + The malloc_stats_print function + writes human-readable summary statistics via the + write_cb callback function pointer and + cbopaque data passed to + write_cb, or + malloc_message if + write_cb is NULL. This + function can be called repeatedly. General information that never + changes during execution can be omitted by specifying "g" as a character + within the opts string. Note that + malloc_message uses the + mallctl* functions internally, so + inconsistent statistics can be reported if multiple threads use these + functions simultaneously. If is + specified during configuration, “m” and “a” can + be specified to omit merged arena and per arena statistics, respectively; + “b”, “l”, and “h” can be specified to + omit per size class statistics for bins, large objects, and huge objects, + respectively. Unrecognized characters are silently ignored. Note that + thread caching may prevent some statistics from being completely up to + date, since extra locking would be required to merge counters that track + thread cache operations. + + + The malloc_usable_size function + returns the usable size of the allocation pointed to by + ptr. The return value may be larger than the size + that was requested during allocation. The + malloc_usable_size function is not a + mechanism for in-place realloc; rather + it is provided solely as a tool for introspection purposes. Any + discrepancy between the requested allocation size and the size reported + by malloc_usable_size should not be + depended on, since such behavior is entirely implementation-dependent. + + + + + TUNING + Once, when the first call is made to one of the memory allocation + routines, the allocator initializes its internals based in part on various + options that can be specified at compile- or run-time. + + The string pointed to by the global variable + malloc_conf, the “name” of the file + referenced by the symbolic link named /etc/malloc.conf, and the value of the + environment variable MALLOC_CONF, will be interpreted, in + that order, from left to right as options. Note that + malloc_conf may be read before + main is entered, so the declaration of + malloc_conf should specify an initializer that contains + the final value to be read by jemalloc. malloc_conf is + a compile-time setting, whereas /etc/malloc.conf and MALLOC_CONF + can be safely set any time prior to program invocation. + + An options string is a comma-separated list of option:value pairs. + There is one key corresponding to each opt.* mallctl (see the section for options + documentation). For example, abort:true,narenas:1 sets + the opt.abort and opt.narenas options. Some + options have boolean values (true/false), others have integer values (base + 8, 10, or 16, depending on prefix), and yet others have raw string + values. + + + IMPLEMENTATION NOTES + Traditionally, allocators have used + sbrk + 2 to obtain memory, which is + suboptimal for several reasons, including race conditions, increased + fragmentation, and artificial limitations on maximum usable memory. If + sbrk + 2 is supported by the operating + system, this allocator uses both + mmap + 2 and + sbrk + 2, in that order of preference; + otherwise only mmap + 2 is used. + + This allocator uses multiple arenas in order to reduce lock + contention for threaded programs on multi-processor systems. This works + well with regard to threading scalability, but incurs some costs. There is + a small fixed per-arena overhead, and additionally, arenas manage memory + completely independently of each other, which means a small fixed increase + in overall memory fragmentation. These overheads are not generally an + issue, given the number of arenas normally used. Note that using + substantially more arenas than the default is not likely to improve + performance, mainly due to reduced cache performance. However, it may make + sense to reduce the number of arenas if an application does not make much + use of the allocation functions. + + In addition to multiple arenas, unless + is specified during configuration, this + allocator supports thread-specific caching for small and large objects, in + order to make it possible to completely avoid synchronization for most + allocation requests. Such caching allows very fast allocation in the + common case, but it increases memory usage and fragmentation, since a + bounded number of objects can remain allocated in each thread cache. + + Memory is conceptually broken into equal-sized chunks, where the + chunk size is a power of two that is greater than the page size. Chunks + are always aligned to multiples of the chunk size. This alignment makes it + possible to find metadata for user objects very quickly. + + User objects are broken into three categories according to size: + small, large, and huge. Small and large objects are managed entirely by + arenas; huge objects are additionally aggregated in a single data structure + that is shared by all threads. Huge objects are typically used by + applications infrequently enough that this single data structure is not a + scalability issue. + + Each chunk that is managed by an arena tracks its contents as runs of + contiguous pages (unused, backing a set of small objects, or backing one + large object). The combination of chunk alignment and chunk page maps + makes it possible to determine all metadata regarding small and large + allocations in constant time. + + Small objects are managed in groups by page runs. Each run maintains + a bitmap to track which regions are in use. Allocation requests that are no + more than half the quantum (8 or 16, depending on architecture) are rounded + up to the nearest power of two that is at least sizeof(double). All other object size + classes are multiples of the quantum, spaced such that there are four size + classes for each doubling in size, which limits internal fragmentation to + approximately 20% for all but the smallest size classes. Small size classes + are smaller than four times the page size, large size classes are smaller + than the chunk size (see the opt.lg_chunk option), and + huge size classes extend from the chunk size up to one size class less than + the full address space size. + + Allocations are packed tightly together, which can be an issue for + multi-threaded applications. If you need to assure that allocations do not + suffer from cacheline sharing, round your allocation requests up to the + nearest multiple of the cacheline size, or specify cacheline alignment when + allocating. + + The realloc, + rallocx, and + xallocx functions may resize allocations + without moving them under limited circumstances. Unlike the + *allocx API, the standard API does not + officially round up the usable size of an allocation to the nearest size + class, so technically it is necessary to call + realloc to grow e.g. a 9-byte allocation to + 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage + trivially succeeds in place as long as the pre-size and post-size both round + up to the same size class. No other API guarantees are made regarding + in-place resizing, but the current implementation also tries to resize large + and huge allocations in place, as long as the pre-size and post-size are + both large or both huge. In such cases shrinkage always succeeds for large + size classes, but for huge size classes the chunk allocator must support + splitting (see arena.<i>.chunk_hooks). + Growth only succeeds if the trailing memory is currently available, and + additionally for huge size classes the chunk allocator must support + merging. + + Assuming 2 MiB chunks, 4 KiB pages, and a 16-byte quantum on a + 64-bit system, the size classes in each category are as shown in . + + + Size classes + + + + + + + Category + Spacing + Size + + + + + Small + lg + [8] + + + 16 + [16, 32, 48, 64, 80, 96, 112, 128] + + + 32 + [160, 192, 224, 256] + + + 64 + [320, 384, 448, 512] + + + 128 + [640, 768, 896, 1024] + + + 256 + [1280, 1536, 1792, 2048] + + + 512 + [2560, 3072, 3584, 4096] + + + 1 KiB + [5 KiB, 6 KiB, 7 KiB, 8 KiB] + + + 2 KiB + [10 KiB, 12 KiB, 14 KiB] + + + Large + 2 KiB + [16 KiB] + + + 4 KiB + [20 KiB, 24 KiB, 28 KiB, 32 KiB] + + + 8 KiB + [40 KiB, 48 KiB, 54 KiB, 64 KiB] + + + 16 KiB + [80 KiB, 96 KiB, 112 KiB, 128 KiB] + + + 32 KiB + [160 KiB, 192 KiB, 224 KiB, 256 KiB] + + + 64 KiB + [320 KiB, 384 KiB, 448 KiB, 512 KiB] + + + 128 KiB + [640 KiB, 768 KiB, 896 KiB, 1 MiB] + + + 256 KiB + [1280 KiB, 1536 KiB, 1792 KiB] + + + Huge + 256 KiB + [2 MiB] + + + 512 KiB + [2560 KiB, 3 MiB, 3584 KiB, 4 MiB] + + + 1 MiB + [5 MiB, 6 MiB, 7 MiB, 8 MiB] + + + 2 MiB + [10 MiB, 12 MiB, 14 MiB, 16 MiB] + + + 4 MiB + [20 MiB, 24 MiB, 28 MiB, 32 MiB] + + + 8 MiB + [40 MiB, 48 MiB, 56 MiB, 64 MiB] + + + ... + ... + + + +
+
+ + MALLCTL NAMESPACE + The following names are defined in the namespace accessible via the + mallctl* functions. Value types are + specified in parentheses, their readable/writable statuses are encoded as + rw, r-, -w, or + --, and required build configuration flags follow, if + any. A name element encoded as <i> or + <j> indicates an integer component, where the + integer varies from 0 to some upper value that must be determined via + introspection. In the case of stats.arenas.<i>.*, + <i> equal to arenas.narenas can be + used to access the summation of statistics from all arenas. Take special + note of the epoch mallctl, + which controls refreshing of cached dynamic statistics. + + + + + version + (const char *) + r- + + Return the jemalloc version string. + + + + + epoch + (uint64_t) + rw + + If a value is passed in, refresh the data from which + the mallctl* functions report values, + and increment the epoch. Return the current epoch. This is useful for + detecting whether another thread caused a refresh. + + + + + config.cache_oblivious + (bool) + r- + + was specified + during build configuration. + + + + + config.debug + (bool) + r- + + was specified during + build configuration. + + + + + config.fill + (bool) + r- + + was specified during + build configuration. + + + + + config.lazy_lock + (bool) + r- + + was specified + during build configuration. + + + + + config.munmap + (bool) + r- + + was specified during + build configuration. + + + + + config.prof + (bool) + r- + + was specified during + build configuration. + + + + + config.prof_libgcc + (bool) + r- + + was not + specified during build configuration. + + + + + config.prof_libunwind + (bool) + r- + + was specified + during build configuration. + + + + + config.stats + (bool) + r- + + was specified during + build configuration. + + + + + config.tcache + (bool) + r- + + was not specified + during build configuration. + + + + + config.tls + (bool) + r- + + was not specified during + build configuration. + + + + + config.utrace + (bool) + r- + + was specified during + build configuration. + + + + + config.valgrind + (bool) + r- + + was specified during + build configuration. + + + + + config.xmalloc + (bool) + r- + + was specified during + build configuration. + + + + + opt.abort + (bool) + r- + + Abort-on-warning enabled/disabled. If true, most + warnings are fatal. The process will call + abort + 3 in these cases. This option is + disabled by default unless is + specified during configuration, in which case it is enabled by default. + + + + + + opt.dss + (const char *) + r- + + dss (sbrk + 2) allocation precedence as + related to mmap + 2 allocation. The following + settings are supported if + sbrk + 2 is supported by the operating + system: “disabled”, “primary”, and + “secondary”; otherwise only “disabled” is + supported. The default is “secondary” if + sbrk + 2 is supported by the operating + system; “disabled” otherwise. + + + + + + opt.lg_chunk + (size_t) + r- + + Virtual memory chunk size (log base 2). If a chunk + size outside the supported size range is specified, the size is + silently clipped to the minimum/maximum supported size. The default + chunk size is 2 MiB (2^21). + + + + + + opt.narenas + (size_t) + r- + + Maximum number of arenas to use for automatic + multiplexing of threads and arenas. The default is four times the + number of CPUs, or one if there is a single CPU. + + + + + opt.lg_dirty_mult + (ssize_t) + r- + + Per-arena minimum ratio (log base 2) of active to dirty + pages. Some dirty unused pages may be allowed to accumulate, within + the limit set by the ratio (or one chunk worth of dirty pages, + whichever is greater), before informing the kernel about some of those + pages via madvise + 2 or a similar system call. This + provides the kernel with sufficient information to recycle dirty pages + if physical memory becomes scarce and the pages remain unused. The + default minimum ratio is 8:1 (2^3:1); an option value of -1 will + disable dirty page purging. See arenas.lg_dirty_mult + and arena.<i>.lg_dirty_mult + for related dynamic control options. + + + + + opt.stats_print + (bool) + r- + + Enable/disable statistics printing at exit. If + enabled, the malloc_stats_print + function is called at program exit via an + atexit + 3 function. If + is specified during configuration, this + has the potential to cause deadlock for a multi-threaded process that + exits while one or more threads are executing in the memory allocation + functions. Furthermore, atexit may + allocate memory during application initialization and then deadlock + internally when jemalloc in turn calls + atexit, so this option is not + univerally usable (though the application can register its own + atexit function with equivalent + functionality). Therefore, this option should only be used with care; + it is primarily intended as a performance tuning aid during application + development. This option is disabled by default. + + + + + opt.junk + (const char *) + r- + [] + + Junk filling. If set to "alloc", each byte of + uninitialized allocated memory will be initialized to + 0xa5. If set to "free", all deallocated memory will + be initialized to 0x5a. If set to "true", both + allocated and deallocated memory will be initialized, and if set to + "false", junk filling be disabled entirely. This is intended for + debugging and will impact performance negatively. This option is + "false" by default unless is specified + during configuration, in which case it is "true" by default unless + running inside Valgrind. + + + + + opt.quarantine + (size_t) + r- + [] + + Per thread quarantine size in bytes. If non-zero, each + thread maintains a FIFO object quarantine that stores up to the + specified number of bytes of memory. The quarantined memory is not + freed until it is released from quarantine, though it is immediately + junk-filled if the opt.junk option is + enabled. This feature is of particular use in combination with Valgrind, which can detect attempts + to access quarantined objects. This is intended for debugging and will + impact performance negatively. The default quarantine size is 0 unless + running inside Valgrind, in which case the default is 16 + MiB. + + + + + opt.redzone + (bool) + r- + [] + + Redzones enabled/disabled. If enabled, small + allocations have redzones before and after them. Furthermore, if the + opt.junk option is + enabled, the redzones are checked for corruption during deallocation. + However, the primary intended purpose of this feature is to be used in + combination with Valgrind, + which needs redzones in order to do effective buffer overflow/underflow + detection. This option is intended for debugging and will impact + performance negatively. This option is disabled by + default unless running inside Valgrind. + + + + + opt.zero + (bool) + r- + [] + + Zero filling enabled/disabled. If enabled, each byte + of uninitialized allocated memory will be initialized to 0. Note that + this initialization only happens once for each byte, so + realloc and + rallocx calls do not zero memory that + was previously allocated. This is intended for debugging and will + impact performance negatively. This option is disabled by default. + + + + + + opt.utrace + (bool) + r- + [] + + Allocation tracing based on + utrace + 2 enabled/disabled. This option + is disabled by default. + + + + + opt.xmalloc + (bool) + r- + [] + + Abort-on-out-of-memory enabled/disabled. If enabled, + rather than returning failure for any allocation function, display a + diagnostic message on STDERR_FILENO and cause the + program to drop core (using + abort + 3). If an application is + designed to depend on this behavior, set the option at compile time by + including the following in the source code: + + This option is disabled by default. + + + + + opt.tcache + (bool) + r- + [] + + Thread-specific caching (tcache) enabled/disabled. When + there are multiple threads, each thread uses a tcache for objects up to + a certain size. Thread-specific caching allows many allocations to be + satisfied without performing any thread synchronization, at the cost of + increased memory use. See the opt.lg_tcache_max + option for related tuning information. This option is enabled by + default unless running inside Valgrind, in which case it is + forcefully disabled. + + + + + opt.lg_tcache_max + (size_t) + r- + [] + + Maximum size class (log base 2) to cache in the + thread-specific cache (tcache). At a minimum, all small size classes + are cached, and at a maximum all large size classes are cached. The + default maximum is 32 KiB (2^15). + + + + + opt.prof + (bool) + r- + [] + + Memory profiling enabled/disabled. If enabled, profile + memory allocation activity. See the opt.prof_active + option for on-the-fly activation/deactivation. See the opt.lg_prof_sample + option for probabilistic sampling control. See the opt.prof_accum + option for control of cumulative sample reporting. See the opt.lg_prof_interval + option for information on interval-triggered profile dumping, the opt.prof_gdump + option for information on high-water-triggered profile dumping, and the + opt.prof_final + option for final profile dumping. Profile output is compatible with + the jeprof command, which is based on the + pprof that is developed as part of the gperftools + package. + + + + + opt.prof_prefix + (const char *) + r- + [] + + Filename prefix for profile dumps. If the prefix is + set to the empty string, no automatic dumps will occur; this is + primarily useful for disabling the automatic final heap dump (which + also disables leak reporting, if enabled). The default prefix is + jeprof. + + + + + opt.prof_active + (bool) + r- + [] + + Profiling activated/deactivated. This is a secondary + control mechanism that makes it possible to start the application with + profiling enabled (see the opt.prof option) but + inactive, then toggle profiling at any time during program execution + with the prof.active mallctl. + This option is enabled by default. + + + + + opt.prof_thread_active_init + (bool) + r- + [] + + Initial setting for thread.prof.active + in newly created threads. The initial setting for newly created threads + can also be changed during execution via the prof.thread_active_init + mallctl. This option is enabled by default. + + + + + opt.lg_prof_sample + (size_t) + r- + [] + + Average interval (log base 2) between allocation + samples, as measured in bytes of allocation activity. Increasing the + sampling interval decreases profile fidelity, but also decreases the + computational overhead. The default sample interval is 512 KiB (2^19 + B). + + + + + opt.prof_accum + (bool) + r- + [] + + Reporting of cumulative object/byte counts in profile + dumps enabled/disabled. If this option is enabled, every unique + backtrace must be stored for the duration of execution. Depending on + the application, this can impose a large memory overhead, and the + cumulative counts are not always of interest. This option is disabled + by default. + + + + + opt.lg_prof_interval + (ssize_t) + r- + [] + + Average interval (log base 2) between memory profile + dumps, as measured in bytes of allocation activity. The actual + interval between dumps may be sporadic because decentralized allocation + counters are used to avoid synchronization bottlenecks. Profiles are + dumped to files named according to the pattern + <prefix>.<pid>.<seq>.i<iseq>.heap, + where <prefix> is controlled by the + opt.prof_prefix + option. By default, interval-triggered profile dumping is disabled + (encoded as -1). + + + + + + opt.prof_gdump + (bool) + r- + [] + + Set the initial state of prof.gdump, which when + enabled triggers a memory profile dump every time the total virtual + memory exceeds the previous maximum. This option is disabled by + default. + + + + + opt.prof_final + (bool) + r- + [] + + Use an + atexit + 3 function to dump final memory + usage to a file named according to the pattern + <prefix>.<pid>.<seq>.f.heap, + where <prefix> is controlled by the opt.prof_prefix + option. Note that atexit may allocate + memory during application initialization and then deadlock internally + when jemalloc in turn calls atexit, so + this option is not univerally usable (though the application can + register its own atexit function with + equivalent functionality). This option is disabled by + default. + + + + + opt.prof_leak + (bool) + r- + [] + + Leak reporting enabled/disabled. If enabled, use an + atexit + 3 function to report memory leaks + detected by allocation sampling. See the + opt.prof option for + information on analyzing heap profile output. This option is disabled + by default. + + + + + thread.arena + (unsigned) + rw + + Get or set the arena associated with the calling + thread. If the specified arena was not initialized beforehand (see the + arenas.initialized + mallctl), it will be automatically initialized as a side effect of + calling this interface. + + + + + thread.allocated + (uint64_t) + r- + [] + + Get the total number of bytes ever allocated by the + calling thread. This counter has the potential to wrap around; it is + up to the application to appropriately interpret the counter in such + cases. + + + + + thread.allocatedp + (uint64_t *) + r- + [] + + Get a pointer to the the value that is returned by the + thread.allocated + mallctl. This is useful for avoiding the overhead of repeated + mallctl* calls. + + + + + thread.deallocated + (uint64_t) + r- + [] + + Get the total number of bytes ever deallocated by the + calling thread. This counter has the potential to wrap around; it is + up to the application to appropriately interpret the counter in such + cases. + + + + + thread.deallocatedp + (uint64_t *) + r- + [] + + Get a pointer to the the value that is returned by the + thread.deallocated + mallctl. This is useful for avoiding the overhead of repeated + mallctl* calls. + + + + + thread.tcache.enabled + (bool) + rw + [] + + Enable/disable calling thread's tcache. The tcache is + implicitly flushed as a side effect of becoming + disabled (see thread.tcache.flush). + + + + + + thread.tcache.flush + (void) + -- + [] + + Flush calling thread's thread-specific cache (tcache). + This interface releases all cached objects and internal data structures + associated with the calling thread's tcache. Ordinarily, this interface + need not be called, since automatic periodic incremental garbage + collection occurs, and the thread cache is automatically discarded when + a thread exits. However, garbage collection is triggered by allocation + activity, so it is possible for a thread that stops + allocating/deallocating to retain its cache indefinitely, in which case + the developer may find manual flushing useful. + + + + + thread.prof.name + (const char *) + r- or + -w + [] + + Get/set the descriptive name associated with the calling + thread in memory profile dumps. An internal copy of the name string is + created, so the input string need not be maintained after this interface + completes execution. The output string of this interface should be + copied for non-ephemeral uses, because multiple implementation details + can cause asynchronous string deallocation. Furthermore, each + invocation of this interface can only read or write; simultaneous + read/write is not supported due to string lifetime limitations. The + name string must be nil-terminated and comprised only of characters in + the sets recognized + by isgraph + 3 and + isblank + 3. + + + + + thread.prof.active + (bool) + rw + [] + + Control whether sampling is currently active for the + calling thread. This is an activation mechanism in addition to prof.active; both must + be active for the calling thread to sample. This flag is enabled by + default. + + + + + tcache.create + (unsigned) + r- + [] + + Create an explicit thread-specific cache (tcache) and + return an identifier that can be passed to the MALLOCX_TCACHE(tc) + macro to explicitly use the specified cache rather than the + automatically managed one that is used by default. Each explicit cache + can be used by only one thread at a time; the application must assure + that this constraint holds. + + + + + + tcache.flush + (unsigned) + -w + [] + + Flush the specified thread-specific cache (tcache). The + same considerations apply to this interface as to thread.tcache.flush, + except that the tcache will never be automatically be discarded. + + + + + + tcache.destroy + (unsigned) + -w + [] + + Flush the specified thread-specific cache (tcache) and + make the identifier available for use during a future tcache creation. + + + + + + arena.<i>.purge + (void) + -- + + Purge unused dirty pages for arena <i>, or for + all arenas if <i> equals arenas.narenas. + + + + + + arena.<i>.dss + (const char *) + rw + + Set the precedence of dss allocation as related to mmap + allocation for arena <i>, or for all arenas if <i> equals + arenas.narenas. See + opt.dss for supported + settings. + + + + + arena.<i>.lg_dirty_mult + (ssize_t) + rw + + Current per-arena minimum ratio (log base 2) of active + to dirty pages for arena <i>. Each time this interface is set and + the ratio is increased, pages are synchronously purged as necessary to + impose the new ratio. See opt.lg_dirty_mult + for additional information. + + + + + arena.<i>.chunk_hooks + (chunk_hooks_t) + rw + + Get or set the chunk management hook functions for arena + <i>. The functions must be capable of operating on all extant + chunks associated with arena <i>, usually by passing unknown + chunks to the replaced functions. In practice, it is feasible to + control allocation for arenas created via arenas.extend such + that all chunks originate from an application-supplied chunk allocator + (by setting custom chunk hook functions just after arena creation), but + the automatically created arenas may have already created chunks prior + to the application having an opportunity to take over chunk + allocation. + + + The chunk_hooks_t structure comprises function + pointers which are described individually below. jemalloc uses these + functions to manage chunk lifetime, which starts off with allocation of + mapped committed memory, in the simplest case followed by deallocation. + However, there are performance and platform reasons to retain chunks for + later reuse. Cleanup attempts cascade from deallocation to decommit to + purging, which gives the chunk management functions opportunities to + reject the most permanent cleanup operations in favor of less permanent + (and often less costly) operations. The chunk splitting and merging + operations can also be opted out of, but this is mainly intended to + support platforms on which virtual memory mappings provided by the + operating system kernel do not automatically coalesce and split, e.g. + Windows. + + + typedef void *(chunk_alloc_t) + void *chunk + size_t size + size_t alignment + bool *zero + bool *commit + unsigned arena_ind + + + A chunk allocation function conforms to the + chunk_alloc_t type and upon success returns a pointer to + size bytes of mapped memory on behalf of arena + arena_ind such that the chunk's base address is a + multiple of alignment, as well as setting + *zero to indicate whether the chunk is zeroed and + *commit to indicate whether the chunk is + committed. Upon error the function returns NULL + and leaves *zero and + *commit unmodified. The + size parameter is always a multiple of the chunk + size. The alignment parameter is always a power + of two at least as large as the chunk size. Zeroing is mandatory if + *zero is true upon function entry. Committing is + mandatory if *commit is true upon function entry. + If chunk is not NULL, the + returned pointer must be chunk on success or + NULL on error. Committed memory may be committed + in absolute terms as on a system that does not overcommit, or in + implicit terms as on a system that overcommits and satisfies physical + memory needs on demand via soft page faults. Note that replacing the + default chunk allocation function makes the arena's arena.<i>.dss + setting irrelevant. + + + typedef bool (chunk_dalloc_t) + void *chunk + size_t size + bool committed + unsigned arena_ind + + + + A chunk deallocation function conforms to the + chunk_dalloc_t type and deallocates a + chunk of given size with + committed/decommited memory as indicated, on + behalf of arena arena_ind, returning false upon + success. If the function returns true, this indicates opt-out from + deallocation; the virtual memory mapping associated with the chunk + remains mapped, in the same commit state, and available for future use, + in which case it will be automatically retained for later reuse. + + + typedef bool (chunk_commit_t) + void *chunk + size_t size + size_t offset + size_t length + unsigned arena_ind + + + A chunk commit function conforms to the + chunk_commit_t type and commits zeroed physical memory to + back pages within a chunk of given + size at offset bytes, + extending for length on behalf of arena + arena_ind, returning false upon success. + Committed memory may be committed in absolute terms as on a system that + does not overcommit, or in implicit terms as on a system that + overcommits and satisfies physical memory needs on demand via soft page + faults. If the function returns true, this indicates insufficient + physical memory to satisfy the request. + + + typedef bool (chunk_decommit_t) + void *chunk + size_t size + size_t offset + size_t length + unsigned arena_ind + + + A chunk decommit function conforms to the + chunk_decommit_t type and decommits any physical memory + that is backing pages within a chunk of given + size at offset bytes, + extending for length on behalf of arena + arena_ind, returning false upon success, in which + case the pages will be committed via the chunk commit function before + being reused. If the function returns true, this indicates opt-out from + decommit; the memory remains committed and available for future use, in + which case it will be automatically retained for later reuse. + + + typedef bool (chunk_purge_t) + void *chunk + size_tsize + size_t offset + size_t length + unsigned arena_ind + + + A chunk purge function conforms to the chunk_purge_t + type and optionally discards physical pages within the virtual memory + mapping associated with chunk of given + size at offset bytes, + extending for length on behalf of arena + arena_ind, returning false if pages within the + purged virtual memory range will be zero-filled the next time they are + accessed. + + + typedef bool (chunk_split_t) + void *chunk + size_t size + size_t size_a + size_t size_b + bool committed + unsigned arena_ind + + + A chunk split function conforms to the chunk_split_t + type and optionally splits chunk of given + size into two adjacent chunks, the first of + size_a bytes, and the second of + size_b bytes, operating on + committed/decommitted memory as indicated, on + behalf of arena arena_ind, returning false upon + success. If the function returns true, this indicates that the chunk + remains unsplit and therefore should continue to be operated on as a + whole. + + + typedef bool (chunk_merge_t) + void *chunk_a + size_t size_a + void *chunk_b + size_t size_b + bool committed + unsigned arena_ind + + + A chunk merge function conforms to the chunk_merge_t + type and optionally merges adjacent chunks, + chunk_a of given size_a + and chunk_b of given + size_b into one contiguous chunk, operating on + committed/decommitted memory as indicated, on + behalf of arena arena_ind, returning false upon + success. If the function returns true, this indicates that the chunks + remain distinct mappings and therefore should continue to be operated on + independently. + + + + + + arenas.narenas + (unsigned) + r- + + Current limit on number of arenas. + + + + + arenas.initialized + (bool *) + r- + + An array of arenas.narenas + booleans. Each boolean indicates whether the corresponding arena is + initialized. + + + + + arenas.lg_dirty_mult + (ssize_t) + rw + + Current default per-arena minimum ratio (log base 2) of + active to dirty pages, used to initialize arena.<i>.lg_dirty_mult + during arena creation. See opt.lg_dirty_mult + for additional information. + + + + + arenas.quantum + (size_t) + r- + + Quantum size. + + + + + arenas.page + (size_t) + r- + + Page size. + + + + + arenas.tcache_max + (size_t) + r- + [] + + Maximum thread-cached size class. + + + + + arenas.nbins + (unsigned) + r- + + Number of bin size classes. + + + + + arenas.nhbins + (unsigned) + r- + [] + + Total number of thread cache bin size + classes. + + + + + arenas.bin.<i>.size + (size_t) + r- + + Maximum size supported by size class. + + + + + arenas.bin.<i>.nregs + (uint32_t) + r- + + Number of regions per page run. + + + + + arenas.bin.<i>.run_size + (size_t) + r- + + Number of bytes per page run. + + + + + arenas.nlruns + (unsigned) + r- + + Total number of large size classes. + + + + + arenas.lrun.<i>.size + (size_t) + r- + + Maximum size supported by this large size + class. + + + + + arenas.nhchunks + (unsigned) + r- + + Total number of huge size classes. + + + + + arenas.hchunk.<i>.size + (size_t) + r- + + Maximum size supported by this huge size + class. + + + + + arenas.extend + (unsigned) + r- + + Extend the array of arenas by appending a new arena, + and returning the new arena index. + + + + + prof.thread_active_init + (bool) + rw + [] + + Control the initial setting for thread.prof.active + in newly created threads. See the opt.prof_thread_active_init + option for additional information. + + + + + prof.active + (bool) + rw + [] + + Control whether sampling is currently active. See the + opt.prof_active + option for additional information, as well as the interrelated thread.prof.active + mallctl. + + + + + prof.dump + (const char *) + -w + [] + + Dump a memory profile to the specified file, or if NULL + is specified, to a file according to the pattern + <prefix>.<pid>.<seq>.m<mseq>.heap, + where <prefix> is controlled by the + opt.prof_prefix + option. + + + + + prof.gdump + (bool) + rw + [] + + When enabled, trigger a memory profile dump every time + the total virtual memory exceeds the previous maximum. Profiles are + dumped to files named according to the pattern + <prefix>.<pid>.<seq>.u<useq>.heap, + where <prefix> is controlled by the opt.prof_prefix + option. + + + + + prof.reset + (size_t) + -w + [] + + Reset all memory profile statistics, and optionally + update the sample rate (see opt.lg_prof_sample + and prof.lg_sample). + + + + + + prof.lg_sample + (size_t) + r- + [] + + Get the current sample rate (see opt.lg_prof_sample). + + + + + + prof.interval + (uint64_t) + r- + [] + + Average number of bytes allocated between + inverval-based profile dumps. See the + opt.lg_prof_interval + option for additional information. + + + + + stats.cactive + (size_t *) + r- + [] + + Pointer to a counter that contains an approximate count + of the current number of bytes in active pages. The estimate may be + high, but never low, because each arena rounds up when computing its + contribution to the counter. Note that the epoch mallctl has no bearing + on this counter. Furthermore, counter consistency is maintained via + atomic operations, so it is necessary to use an atomic operation in + order to guarantee a consistent read when dereferencing the pointer. + + + + + + stats.allocated + (size_t) + r- + [] + + Total number of bytes allocated by the + application. + + + + + stats.active + (size_t) + r- + [] + + Total number of bytes in active pages allocated by the + application. This is a multiple of the page size, and greater than or + equal to stats.allocated. + This does not include + stats.arenas.<i>.pdirty, nor pages + entirely devoted to allocator metadata. + + + + + stats.metadata + (size_t) + r- + [] + + Total number of bytes dedicated to metadata, which + comprise base allocations used for bootstrap-sensitive internal + allocator data structures, arena chunk headers (see stats.arenas.<i>.metadata.mapped), + and internal allocations (see stats.arenas.<i>.metadata.allocated). + + + + + stats.resident + (size_t) + r- + [] + + Maximum number of bytes in physically resident data + pages mapped by the allocator, comprising all pages dedicated to + allocator metadata, pages backing active allocations, and unused dirty + pages. This is a maximum rather than precise because pages may not + actually be physically resident if they correspond to demand-zeroed + virtual memory that has not yet been touched. This is a multiple of the + page size, and is larger than stats.active. + + + + + stats.mapped + (size_t) + r- + [] + + Total number of bytes in active chunks mapped by the + allocator. This is a multiple of the chunk size, and is larger than + stats.active. + This does not include inactive chunks, even those that contain unused + dirty pages, which means that there is no strict ordering between this + and stats.resident. + + + + + stats.arenas.<i>.dss + (const char *) + r- + + dss (sbrk + 2) allocation precedence as + related to mmap + 2 allocation. See opt.dss for details. + + + + + + stats.arenas.<i>.lg_dirty_mult + (ssize_t) + r- + + Minimum ratio (log base 2) of active to dirty pages. + See opt.lg_dirty_mult + for details. + + + + + stats.arenas.<i>.nthreads + (unsigned) + r- + + Number of threads currently assigned to + arena. + + + + + stats.arenas.<i>.pactive + (size_t) + r- + + Number of pages in active runs. + + + + + stats.arenas.<i>.pdirty + (size_t) + r- + + Number of pages within unused runs that are potentially + dirty, and for which madvise... + MADV_DONTNEED or + similar has not been called. + + + + + stats.arenas.<i>.mapped + (size_t) + r- + [] + + Number of mapped bytes. + + + + + stats.arenas.<i>.metadata.mapped + (size_t) + r- + [] + + Number of mapped bytes in arena chunk headers, which + track the states of the non-metadata pages. + + + + + stats.arenas.<i>.metadata.allocated + (size_t) + r- + [] + + Number of bytes dedicated to internal allocations. + Internal allocations differ from application-originated allocations in + that they are for internal use, and that they are omitted from heap + profiles. This statistic is reported separately from stats.metadata and + stats.arenas.<i>.metadata.mapped + because it overlaps with e.g. the stats.allocated and + stats.active + statistics, whereas the other metadata statistics do + not. + + + + + stats.arenas.<i>.npurge + (uint64_t) + r- + [] + + Number of dirty page purge sweeps performed. + + + + + + stats.arenas.<i>.nmadvise + (uint64_t) + r- + [] + + Number of madvise... + MADV_DONTNEED or + similar calls made to purge dirty pages. + + + + + stats.arenas.<i>.purged + (uint64_t) + r- + [] + + Number of pages purged. + + + + + stats.arenas.<i>.small.allocated + (size_t) + r- + [] + + Number of bytes currently allocated by small objects. + + + + + + stats.arenas.<i>.small.nmalloc + (uint64_t) + r- + [] + + Cumulative number of allocation requests served by + small bins. + + + + + stats.arenas.<i>.small.ndalloc + (uint64_t) + r- + [] + + Cumulative number of small objects returned to bins. + + + + + + stats.arenas.<i>.small.nrequests + (uint64_t) + r- + [] + + Cumulative number of small allocation requests. + + + + + + stats.arenas.<i>.large.allocated + (size_t) + r- + [] + + Number of bytes currently allocated by large objects. + + + + + + stats.arenas.<i>.large.nmalloc + (uint64_t) + r- + [] + + Cumulative number of large allocation requests served + directly by the arena. + + + + + stats.arenas.<i>.large.ndalloc + (uint64_t) + r- + [] + + Cumulative number of large deallocation requests served + directly by the arena. + + + + + stats.arenas.<i>.large.nrequests + (uint64_t) + r- + [] + + Cumulative number of large allocation requests. + + + + + + stats.arenas.<i>.huge.allocated + (size_t) + r- + [] + + Number of bytes currently allocated by huge objects. + + + + + + stats.arenas.<i>.huge.nmalloc + (uint64_t) + r- + [] + + Cumulative number of huge allocation requests served + directly by the arena. + + + + + stats.arenas.<i>.huge.ndalloc + (uint64_t) + r- + [] + + Cumulative number of huge deallocation requests served + directly by the arena. + + + + + stats.arenas.<i>.huge.nrequests + (uint64_t) + r- + [] + + Cumulative number of huge allocation requests. + + + + + + stats.arenas.<i>.bins.<j>.nmalloc + (uint64_t) + r- + [] + + Cumulative number of allocations served by bin. + + + + + + stats.arenas.<i>.bins.<j>.ndalloc + (uint64_t) + r- + [] + + Cumulative number of allocations returned to bin. + + + + + + stats.arenas.<i>.bins.<j>.nrequests + (uint64_t) + r- + [] + + Cumulative number of allocation + requests. + + + + + stats.arenas.<i>.bins.<j>.curregs + (size_t) + r- + [] + + Current number of regions for this size + class. + + + + + stats.arenas.<i>.bins.<j>.nfills + (uint64_t) + r- + [ ] + + Cumulative number of tcache fills. + + + + + stats.arenas.<i>.bins.<j>.nflushes + (uint64_t) + r- + [ ] + + Cumulative number of tcache flushes. + + + + + stats.arenas.<i>.bins.<j>.nruns + (uint64_t) + r- + [] + + Cumulative number of runs created. + + + + + stats.arenas.<i>.bins.<j>.nreruns + (uint64_t) + r- + [] + + Cumulative number of times the current run from which + to allocate changed. + + + + + stats.arenas.<i>.bins.<j>.curruns + (size_t) + r- + [] + + Current number of runs. + + + + + stats.arenas.<i>.lruns.<j>.nmalloc + (uint64_t) + r- + [] + + Cumulative number of allocation requests for this size + class served directly by the arena. + + + + + stats.arenas.<i>.lruns.<j>.ndalloc + (uint64_t) + r- + [] + + Cumulative number of deallocation requests for this + size class served directly by the arena. + + + + + stats.arenas.<i>.lruns.<j>.nrequests + (uint64_t) + r- + [] + + Cumulative number of allocation requests for this size + class. + + + + + stats.arenas.<i>.lruns.<j>.curruns + (size_t) + r- + [] + + Current number of runs for this size class. + + + + + + stats.arenas.<i>.hchunks.<j>.nmalloc + (uint64_t) + r- + [] + + Cumulative number of allocation requests for this size + class served directly by the arena. + + + + + stats.arenas.<i>.hchunks.<j>.ndalloc + (uint64_t) + r- + [] + + Cumulative number of deallocation requests for this + size class served directly by the arena. + + + + + stats.arenas.<i>.hchunks.<j>.nrequests + (uint64_t) + r- + [] + + Cumulative number of allocation requests for this size + class. + + + + + stats.arenas.<i>.hchunks.<j>.curhchunks + (size_t) + r- + [] + + Current number of huge allocations for this size class. + + + + + + DEBUGGING MALLOC PROBLEMS + When debugging, it is a good idea to configure/build jemalloc with + the and + options, and recompile the program with suitable options and symbols for + debugger support. When so configured, jemalloc incorporates a wide variety + of run-time assertions that catch application errors such as double-free, + write-after-free, etc. + + Programs often accidentally depend on “uninitialized” + memory actually being filled with zero bytes. Junk filling + (see the opt.junk + option) tends to expose such bugs in the form of obviously incorrect + results and/or coredumps. Conversely, zero + filling (see the opt.zero option) eliminates + the symptoms of such bugs. Between these two options, it is usually + possible to quickly detect, diagnose, and eliminate such bugs. + + This implementation does not provide much detail about the problems + it detects, because the performance impact for storing such information + would be prohibitive. However, jemalloc does integrate with the most + excellent Valgrind tool if the + configuration option is enabled. + + + DIAGNOSTIC MESSAGES + If any of the memory allocation/deallocation functions detect an + error or warning condition, a message will be printed to file descriptor + STDERR_FILENO. Errors will result in the process + dumping core. If the opt.abort option is set, most + warnings are treated as errors. + + The malloc_message variable allows the programmer + to override the function which emits the text strings forming the errors + and warnings if for some reason the STDERR_FILENO file + descriptor is not suitable for this. + malloc_message takes the + cbopaque pointer argument that is + NULL unless overridden by the arguments in a call to + malloc_stats_print, followed by a string + pointer. Please note that doing anything which tries to allocate memory in + this function is likely to result in a crash or deadlock. + + All messages are prefixed by + “<jemalloc>: ”. + + + RETURN VALUES + + Standard API + The malloc and + calloc functions return a pointer to the + allocated memory if successful; otherwise a NULL + pointer is returned and errno is set to + ENOMEM. + + The posix_memalign function + returns the value 0 if successful; otherwise it returns an error value. + The posix_memalign function will fail + if: + + + EINVAL + + The alignment parameter is + not a power of 2 at least as large as + sizeof(void *). + + + + ENOMEM + + Memory allocation error. + + + + + The aligned_alloc function returns + a pointer to the allocated memory if successful; otherwise a + NULL pointer is returned and + errno is set. The + aligned_alloc function will fail if: + + + EINVAL + + The alignment parameter is + not a power of 2. + + + + ENOMEM + + Memory allocation error. + + + + + The realloc function returns a + pointer, possibly identical to ptr, to the + allocated memory if successful; otherwise a NULL + pointer is returned, and errno is set to + ENOMEM if the error was the result of an + allocation failure. The realloc + function always leaves the original buffer intact when an error occurs. + + + The free function returns no + value. + + + Non-standard API + The mallocx and + rallocx functions return a pointer to + the allocated memory if successful; otherwise a NULL + pointer is returned to indicate insufficient contiguous memory was + available to service the allocation request. + + The xallocx function returns the + real size of the resulting resized allocation pointed to by + ptr, which is a value less than + size if the allocation could not be adequately + grown in place. + + The sallocx function returns the + real size of the allocation pointed to by ptr. + + + The nallocx returns the real size + that would result from a successful equivalent + mallocx function call, or zero if + insufficient memory is available to perform the size computation. + + The mallctl, + mallctlnametomib, and + mallctlbymib functions return 0 on + success; otherwise they return an error value. The functions will fail + if: + + + EINVAL + + newp is not + NULL, and newlen is too + large or too small. Alternatively, *oldlenp + is too large or too small; in this case as much data as possible + are read despite the error. + + + ENOENT + + name or + mib specifies an unknown/invalid + value. + + + EPERM + + Attempt to read or write void value, or attempt to + write read-only value. + + + EAGAIN + + A memory allocation failure + occurred. + + + EFAULT + + An interface with side effects failed in some way + not directly related to mallctl* + read/write processing. + + + + + The malloc_usable_size function + returns the usable size of the allocation pointed to by + ptr. + + + + ENVIRONMENT + The following environment variable affects the execution of the + allocation functions: + + + MALLOC_CONF + + If the environment variable + MALLOC_CONF is set, the characters it contains + will be interpreted as options. + + + + + + EXAMPLES + To dump core whenever a problem occurs: + ln -s 'abort:true' /etc/malloc.conf + + To specify in the source a chunk size that is 16 MiB: + + + + SEE ALSO + madvise + 2, + mmap + 2, + sbrk + 2, + utrace + 2, + alloca + 3, + atexit + 3, + getpagesize + 3 + + + STANDARDS + The malloc, + calloc, + realloc, and + free functions conform to ISO/IEC + 9899:1990 (“ISO C90”). + + The posix_memalign function conforms + to IEEE Std 1003.1-2001 (“POSIX.1”). + + diff --git a/redis.submodule/jemalloc/doc/manpages.xsl.in b/redis.submodule/jemalloc/doc/manpages.xsl.in new file mode 100644 index 0000000..88b2626 --- /dev/null +++ b/redis.submodule/jemalloc/doc/manpages.xsl.in @@ -0,0 +1,4 @@ + + + + diff --git a/redis.submodule/jemalloc/doc/stylesheet.xsl b/redis.submodule/jemalloc/doc/stylesheet.xsl new file mode 100644 index 0000000..4e334a8 --- /dev/null +++ b/redis.submodule/jemalloc/doc/stylesheet.xsl @@ -0,0 +1,7 @@ + + ansi + + + "" + + diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/arena.h b/redis.submodule/jemalloc/include/jemalloc/internal/arena.h new file mode 100644 index 0000000..12c6179 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/arena.h @@ -0,0 +1,1347 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) + +/* Maximum number of regions in one run. */ +#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN) +#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) + +/* + * Minimum redzone size. Redzones may be larger than this if necessary to + * preserve region alignment. + */ +#define REDZONE_MINSIZE 16 + +/* + * The minimum ratio of active:dirty pages per arena is computed as: + * + * (nactive >> lg_dirty_mult) >= ndirty + * + * So, supposing that lg_dirty_mult is 3, there can be no less than 8 times as + * many active pages as dirty pages. + */ +#define LG_DIRTY_MULT_DEFAULT 3 + +typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t; +typedef struct arena_run_s arena_run_t; +typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t; +typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t; +typedef struct arena_chunk_s arena_chunk_t; +typedef struct arena_bin_info_s arena_bin_info_t; +typedef struct arena_bin_s arena_bin_t; +typedef struct arena_s arena_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#ifdef JEMALLOC_ARENA_STRUCTS_A +struct arena_run_s { + /* Index of bin this run is associated with. */ + szind_t binind; + + /* Number of free regions in run. */ + unsigned nfree; + + /* Per region allocated/deallocated bitmap. */ + bitmap_t bitmap[BITMAP_GROUPS_MAX]; +}; + +/* Each element of the chunk map corresponds to one page within the chunk. */ +struct arena_chunk_map_bits_s { + /* + * Run address (or size) and various flags are stored together. The bit + * layout looks like (assuming 32-bit system): + * + * ???????? ???????? ???nnnnn nnndumla + * + * ? : Unallocated: Run address for first/last pages, unset for internal + * pages. + * Small: Run page offset. + * Large: Run page count for first page, unset for trailing pages. + * n : binind for small size class, BININD_INVALID for large size class. + * d : dirty? + * u : unzeroed? + * m : decommitted? + * l : large? + * a : allocated? + * + * Following are example bit patterns for the three types of runs. + * + * p : run page offset + * s : run size + * n : binind for size class; large objects set these to BININD_INVALID + * x : don't care + * - : 0 + * + : 1 + * [DUMLA] : bit set + * [dumla] : bit unset + * + * Unallocated (clean): + * ssssssss ssssssss sss+++++ +++dum-a + * xxxxxxxx xxxxxxxx xxxxxxxx xxx-Uxxx + * ssssssss ssssssss sss+++++ +++dUm-a + * + * Unallocated (dirty): + * ssssssss ssssssss sss+++++ +++D-m-a + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * ssssssss ssssssss sss+++++ +++D-m-a + * + * Small: + * pppppppp pppppppp pppnnnnn nnnd---A + * pppppppp pppppppp pppnnnnn nnn----A + * pppppppp pppppppp pppnnnnn nnnd---A + * + * Large: + * ssssssss ssssssss sss+++++ +++D--LA + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * -------- -------- ---+++++ +++D--LA + * + * Large (sampled, size <= LARGE_MINCLASS): + * ssssssss ssssssss sssnnnnn nnnD--LA + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * -------- -------- ---+++++ +++D--LA + * + * Large (not sampled, size == LARGE_MINCLASS): + * ssssssss ssssssss sss+++++ +++D--LA + * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + * -------- -------- ---+++++ +++D--LA + */ + size_t bits; +#define CHUNK_MAP_ALLOCATED ((size_t)0x01U) +#define CHUNK_MAP_LARGE ((size_t)0x02U) +#define CHUNK_MAP_STATE_MASK ((size_t)0x3U) + +#define CHUNK_MAP_DECOMMITTED ((size_t)0x04U) +#define CHUNK_MAP_UNZEROED ((size_t)0x08U) +#define CHUNK_MAP_DIRTY ((size_t)0x10U) +#define CHUNK_MAP_FLAGS_MASK ((size_t)0x1cU) + +#define CHUNK_MAP_BININD_SHIFT 5 +#define BININD_INVALID ((size_t)0xffU) +#define CHUNK_MAP_BININD_MASK (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) +#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK + +#define CHUNK_MAP_RUNIND_SHIFT (CHUNK_MAP_BININD_SHIFT + 8) +#define CHUNK_MAP_SIZE_SHIFT (CHUNK_MAP_RUNIND_SHIFT - LG_PAGE) +#define CHUNK_MAP_SIZE_MASK \ + (~(CHUNK_MAP_BININD_MASK | CHUNK_MAP_FLAGS_MASK | CHUNK_MAP_STATE_MASK)) +}; + +struct arena_runs_dirty_link_s { + qr(arena_runs_dirty_link_t) rd_link; +}; + +/* + * Each arena_chunk_map_misc_t corresponds to one page within the chunk, just + * like arena_chunk_map_bits_t. Two separate arrays are stored within each + * chunk header in order to improve cache locality. + */ +struct arena_chunk_map_misc_s { + /* + * Linkage for run trees. There are two disjoint uses: + * + * 1) arena_t's runs_avail tree. + * 2) arena_run_t conceptually uses this linkage for in-use non-full + * runs, rather than directly embedding linkage. + */ + rb_node(arena_chunk_map_misc_t) rb_link; + + union { + /* Linkage for list of dirty runs. */ + arena_runs_dirty_link_t rd; + + /* Profile counters, used for large object runs. */ + union { + void *prof_tctx_pun; + prof_tctx_t *prof_tctx; + }; + + /* Small region run metadata. */ + arena_run_t run; + }; +}; +typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t; +typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t; +#endif /* JEMALLOC_ARENA_STRUCTS_A */ + +#ifdef JEMALLOC_ARENA_STRUCTS_B +/* Arena chunk header. */ +struct arena_chunk_s { + /* + * A pointer to the arena that owns the chunk is stored within the node. + * This field as a whole is used by chunks_rtree to support both + * ivsalloc() and core-based debugging. + */ + extent_node_t node; + + /* + * Map of pages within chunk that keeps track of free/large/small. The + * first map_bias entries are omitted, since the chunk header does not + * need to be tracked in the map. This omission saves a header page + * for common chunk sizes (e.g. 4 MiB). + */ + arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */ +}; + +/* + * Read-only information associated with each element of arena_t's bins array + * is stored separately, partly to reduce memory usage (only one copy, rather + * than one per arena), but mainly to avoid false cacheline sharing. + * + * Each run has the following layout: + * + * /--------------------\ + * | pad? | + * |--------------------| + * | redzone | + * reg0_offset | region 0 | + * | redzone | + * |--------------------| \ + * | redzone | | + * | region 1 | > reg_interval + * | redzone | / + * |--------------------| + * | ... | + * | ... | + * | ... | + * |--------------------| + * | redzone | + * | region nregs-1 | + * | redzone | + * |--------------------| + * | alignment pad? | + * \--------------------/ + * + * reg_interval has at least the same minimum alignment as reg_size; this + * preserves the alignment constraint that sa2u() depends on. Alignment pad is + * either 0 or redzone_size; it is present only if needed to align reg0_offset. + */ +struct arena_bin_info_s { + /* Size of regions in a run for this bin's size class. */ + size_t reg_size; + + /* Redzone size. */ + size_t redzone_size; + + /* Interval between regions (reg_size + (redzone_size << 1)). */ + size_t reg_interval; + + /* Total size of a run for this bin's size class. */ + size_t run_size; + + /* Total number of regions in a run for this bin's size class. */ + uint32_t nregs; + + /* + * Metadata used to manipulate bitmaps for runs associated with this + * bin. + */ + bitmap_info_t bitmap_info; + + /* Offset of first region in a run for this bin's size class. */ + uint32_t reg0_offset; +}; + +struct arena_bin_s { + /* + * All operations on runcur, runs, and stats require that lock be + * locked. Run allocation/deallocation are protected by the arena lock, + * which may be acquired while holding one or more bin locks, but not + * vise versa. + */ + malloc_mutex_t lock; + + /* + * Current run being used to service allocations of this bin's size + * class. + */ + arena_run_t *runcur; + + /* + * Tree of non-full runs. This tree is used when looking for an + * existing run when runcur is no longer usable. We choose the + * non-full run that is lowest in memory; this policy tends to keep + * objects packed well, and it can also help reduce the number of + * almost-empty chunks. + */ + arena_run_tree_t runs; + + /* Bin statistics. */ + malloc_bin_stats_t stats; +}; + +struct arena_s { + /* This arena's index within the arenas array. */ + unsigned ind; + + /* + * Number of threads currently assigned to this arena. This field is + * protected by arenas_lock. + */ + unsigned nthreads; + + /* + * There are three classes of arena operations from a locking + * perspective: + * 1) Thread assignment (modifies nthreads) is protected by arenas_lock. + * 2) Bin-related operations are protected by bin locks. + * 3) Chunk- and run-related operations are protected by this mutex. + */ + malloc_mutex_t lock; + + arena_stats_t stats; + /* + * List of tcaches for extant threads associated with this arena. + * Stats from these are merged incrementally, and at exit if + * opt_stats_print is enabled. + */ + ql_head(tcache_t) tcache_ql; + + uint64_t prof_accumbytes; + + /* + * PRNG state for cache index randomization of large allocation base + * pointers. + */ + uint64_t offset_state; + + dss_prec_t dss_prec; + + /* + * In order to avoid rapid chunk allocation/deallocation when an arena + * oscillates right on the cusp of needing a new chunk, cache the most + * recently freed chunk. The spare is left in the arena's chunk trees + * until it is deleted. + * + * There is one spare chunk per arena, rather than one spare total, in + * order to avoid interactions between multiple threads that could make + * a single spare inadequate. + */ + arena_chunk_t *spare; + + /* Minimum ratio (log base 2) of nactive:ndirty. */ + ssize_t lg_dirty_mult; + + /* True if a thread is currently executing arena_purge(). */ + bool purging; + + /* Number of pages in active runs and huge regions. */ + size_t nactive; + + /* + * Current count of pages within unused runs that are potentially + * dirty, and for which madvise(... MADV_DONTNEED) has not been called. + * By tracking this, we can institute a limit on how much dirty unused + * memory is mapped for each arena. + */ + size_t ndirty; + + /* + * Size/address-ordered tree of this arena's available runs. The tree + * is used for first-best-fit run allocation. + */ + arena_avail_tree_t runs_avail; + + /* + * Unused dirty memory this arena manages. Dirty memory is conceptually + * tracked as an arbitrarily interleaved LRU of dirty runs and cached + * chunks, but the list linkage is actually semi-duplicated in order to + * avoid extra arena_chunk_map_misc_t space overhead. + * + * LRU-----------------------------------------------------------MRU + * + * /-- arena ---\ + * | | + * | | + * |------------| /- chunk -\ + * ...->|chunks_cache|<--------------------------->| /----\ |<--... + * |------------| | |node| | + * | | | | | | + * | | /- run -\ /- run -\ | | | | + * | | | | | | | | | | + * | | | | | | | | | | + * |------------| |-------| |-------| | |----| | + * ...->|runs_dirty |<-->|rd |<-->|rd |<---->|rd |<----... + * |------------| |-------| |-------| | |----| | + * | | | | | | | | | | + * | | | | | | | \----/ | + * | | \-------/ \-------/ | | + * | | | | + * | | | | + * \------------/ \---------/ + */ + arena_runs_dirty_link_t runs_dirty; + extent_node_t chunks_cache; + + /* Extant huge allocations. */ + ql_head(extent_node_t) huge; + /* Synchronizes all huge allocation/update/deallocation. */ + malloc_mutex_t huge_mtx; + + /* + * Trees of chunks that were previously allocated (trees differ only in + * node ordering). These are used when allocating chunks, in an attempt + * to re-use address space. Depending on function, different tree + * orderings are needed, which is why there are two trees with the same + * contents. + */ + extent_tree_t chunks_szad_cached; + extent_tree_t chunks_ad_cached; + extent_tree_t chunks_szad_retained; + extent_tree_t chunks_ad_retained; + + malloc_mutex_t chunks_mtx; + /* Cache of nodes that were allocated via base_alloc(). */ + ql_head(extent_node_t) node_cache; + malloc_mutex_t node_cache_mtx; + + /* User-configurable chunk hook functions. */ + chunk_hooks_t chunk_hooks; + + /* bins is used to store trees of free regions. */ + arena_bin_t bins[NBINS]; +}; +#endif /* JEMALLOC_ARENA_STRUCTS_B */ + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +static const size_t large_pad = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + PAGE +#else + 0 +#endif + ; + +extern ssize_t opt_lg_dirty_mult; + +extern arena_bin_info_t arena_bin_info[NBINS]; + +extern size_t map_bias; /* Number of arena chunk header pages. */ +extern size_t map_misc_offset; +extern size_t arena_maxrun; /* Max run size for arenas. */ +extern size_t large_maxclass; /* Max large size class. */ +extern unsigned nlclasses; /* Number of large size classes. */ +extern unsigned nhclasses; /* Number of huge size classes. */ + +void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, + bool cache); +void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, + bool cache); +extent_node_t *arena_node_alloc(arena_t *arena); +void arena_node_dalloc(arena_t *arena, extent_node_t *node); +void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, + bool *zero); +void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize); +void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, + size_t oldsize, size_t usize); +void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, + size_t oldsize, size_t usize); +bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, + size_t oldsize, size_t usize, bool *zero); +ssize_t arena_lg_dirty_mult_get(arena_t *arena); +bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult); +void arena_maybe_purge(arena_t *arena); +void arena_purge_all(arena_t *arena); +void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, + szind_t binind, uint64_t prof_accumbytes); +void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, + bool zero); +#ifdef JEMALLOC_JET +typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, + uint8_t); +extern arena_redzone_corruption_t *arena_redzone_corruption; +typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); +extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; +#else +void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); +#endif +void arena_quarantine_junk_small(void *ptr, size_t usize); +void *arena_malloc_small(arena_t *arena, size_t size, bool zero); +void *arena_malloc_large(arena_t *arena, size_t size, bool zero); +void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache); +void arena_prof_promoted(const void *ptr, size_t size); +void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, + void *ptr, arena_chunk_map_bits_t *bitselm); +void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t pageind, arena_chunk_map_bits_t *bitselm); +void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t pageind); +#ifdef JEMALLOC_JET +typedef void (arena_dalloc_junk_large_t)(void *, size_t); +extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; +#else +void arena_dalloc_junk_large(void *ptr, size_t usize); +#endif +void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, + void *ptr); +void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); +#ifdef JEMALLOC_JET +typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); +extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; +#endif +bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero); +void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, + size_t size, size_t alignment, bool zero, tcache_t *tcache); +dss_prec_t arena_dss_prec_get(arena_t *arena); +bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); +ssize_t arena_lg_dirty_mult_default_get(void); +bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); +void arena_stats_merge(arena_t *arena, const char **dss, + ssize_t *lg_dirty_mult, size_t *nactive, size_t *ndirty, + arena_stats_t *astats, malloc_bin_stats_t *bstats, + malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats); +arena_t *arena_new(unsigned ind); +bool arena_boot(void); +void arena_prefork(arena_t *arena); +void arena_postfork_parent(arena_t *arena); +void arena_postfork_child(arena_t *arena); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk, + size_t pageind); +arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk, + size_t pageind); +size_t arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm); +void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm); +arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd); +arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run); +size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbitsp_read(size_t *mapbitsp); +size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_size_decode(size_t mapbits); +size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, + size_t pageind); +size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); +szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); +size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); +void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); +size_t arena_mapbits_size_encode(size_t size); +void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, + size_t size, size_t flags); +void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, + size_t size); +void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, + size_t flags); +void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, + size_t size, size_t flags); +void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, + szind_t binind); +void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, + size_t runind, szind_t binind, size_t flags); +void arena_metadata_allocated_add(arena_t *arena, size_t size); +void arena_metadata_allocated_sub(arena_t *arena, size_t size); +size_t arena_metadata_allocated_get(arena_t *arena); +bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); +bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); +bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); +szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); +szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); +unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, + const void *ptr); +prof_tctx_t *arena_prof_tctx_get(const void *ptr); +void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx); +void arena_prof_tctx_reset(const void *ptr, size_t usize, + const void *old_ptr, prof_tctx_t *old_tctx); +void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, + tcache_t *tcache); +arena_t *arena_aalloc(const void *ptr); +size_t arena_salloc(const void *ptr, bool demote); +void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache); +void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) +# ifdef JEMALLOC_ARENA_INLINE_A +JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t * +arena_bitselm_get(arena_chunk_t *chunk, size_t pageind) +{ + + assert(pageind >= map_bias); + assert(pageind < chunk_npages); + + return (&chunk->map_bits[pageind-map_bias]); +} + +JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * +arena_miscelm_get(arena_chunk_t *chunk, size_t pageind) +{ + + assert(pageind >= map_bias); + assert(pageind < chunk_npages); + + return ((arena_chunk_map_misc_t *)((uintptr_t)chunk + + (uintptr_t)map_misc_offset) + pageind-map_bias); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm) +{ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + size_t pageind = ((uintptr_t)miscelm - ((uintptr_t)chunk + + map_misc_offset)) / sizeof(arena_chunk_map_misc_t) + map_bias; + + assert(pageind >= map_bias); + assert(pageind < chunk_npages); + + return (pageind); +} + +JEMALLOC_ALWAYS_INLINE void * +arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm) +{ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + size_t pageind = arena_miscelm_to_pageind(miscelm); + + return ((void *)((uintptr_t)chunk + (pageind << LG_PAGE))); +} + +JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * +arena_rd_to_miscelm(arena_runs_dirty_link_t *rd) +{ + arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t + *)((uintptr_t)rd - offsetof(arena_chunk_map_misc_t, rd)); + + assert(arena_miscelm_to_pageind(miscelm) >= map_bias); + assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); + + return (miscelm); +} + +JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t * +arena_run_to_miscelm(arena_run_t *run) +{ + arena_chunk_map_misc_t *miscelm = (arena_chunk_map_misc_t + *)((uintptr_t)run - offsetof(arena_chunk_map_misc_t, run)); + + assert(arena_miscelm_to_pageind(miscelm) >= map_bias); + assert(arena_miscelm_to_pageind(miscelm) < chunk_npages); + + return (miscelm); +} + +JEMALLOC_ALWAYS_INLINE size_t * +arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) +{ + + return (&arena_bitselm_get(chunk, pageind)->bits); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbitsp_read(size_t *mapbitsp) +{ + + return (*mapbitsp); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) +{ + + return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_size_decode(size_t mapbits) +{ + size_t size; + +#if CHUNK_MAP_SIZE_SHIFT > 0 + size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT; +#elif CHUNK_MAP_SIZE_SHIFT == 0 + size = mapbits & CHUNK_MAP_SIZE_MASK; +#else + size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT; +#endif + + return (size); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); + return (arena_mapbits_size_decode(mapbits)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == + (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); + return (arena_mapbits_size_decode(mapbits)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == + CHUNK_MAP_ALLOCATED); + return (mapbits >> CHUNK_MAP_RUNIND_SHIFT); +} + +JEMALLOC_ALWAYS_INLINE szind_t +arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + szind_t binind; + + mapbits = arena_mapbits_get(chunk, pageind); + binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; + assert(binind < NBINS || binind == BININD_INVALID); + return (binind); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + return (mapbits & CHUNK_MAP_DIRTY); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + return (mapbits & CHUNK_MAP_UNZEROED); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_DECOMMITTED) == 0 || (mapbits & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + return (mapbits & CHUNK_MAP_DECOMMITTED); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + return (mapbits & CHUNK_MAP_LARGE); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) +{ + size_t mapbits; + + mapbits = arena_mapbits_get(chunk, pageind); + return (mapbits & CHUNK_MAP_ALLOCATED); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) +{ + + *mapbitsp = mapbits; +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_mapbits_size_encode(size_t size) +{ + size_t mapbits; + +#if CHUNK_MAP_SIZE_SHIFT > 0 + mapbits = size << CHUNK_MAP_SIZE_SHIFT; +#elif CHUNK_MAP_SIZE_SHIFT == 0 + mapbits = size; +#else + mapbits = size >> -CHUNK_MAP_SIZE_SHIFT; +#endif + + assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0); + return (mapbits); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, + size_t flags) +{ + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + + assert((size & PAGE_MASK) == 0); + assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); + assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | + CHUNK_MAP_BININD_INVALID | flags); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, + size_t size) +{ + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t mapbits = arena_mapbitsp_read(mapbitsp); + + assert((size & PAGE_MASK) == 0); + assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); + arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | + (mapbits & ~CHUNK_MAP_SIZE_MASK)); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags) +{ + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + + assert((flags & CHUNK_MAP_UNZEROED) == flags); + arena_mapbitsp_write(mapbitsp, flags); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, + size_t flags) +{ + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + + assert((size & PAGE_MASK) == 0); + assert((flags & CHUNK_MAP_FLAGS_MASK) == flags); + assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags & + (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0); + arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) | + CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE | + CHUNK_MAP_ALLOCATED); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, + szind_t binind) +{ + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + size_t mapbits = arena_mapbitsp_read(mapbitsp); + + assert(binind <= BININD_INVALID); + assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS + + large_pad); + arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | + (binind << CHUNK_MAP_BININD_SHIFT)); +} + +JEMALLOC_ALWAYS_INLINE void +arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, + szind_t binind, size_t flags) +{ + size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); + + assert(binind < BININD_INVALID); + assert(pageind - runind >= map_bias); + assert((flags & CHUNK_MAP_UNZEROED) == flags); + arena_mapbitsp_write(mapbitsp, (runind << CHUNK_MAP_RUNIND_SHIFT) | + (binind << CHUNK_MAP_BININD_SHIFT) | flags | CHUNK_MAP_ALLOCATED); +} + +JEMALLOC_INLINE void +arena_metadata_allocated_add(arena_t *arena, size_t size) +{ + + atomic_add_z(&arena->stats.metadata_allocated, size); +} + +JEMALLOC_INLINE void +arena_metadata_allocated_sub(arena_t *arena, size_t size) +{ + + atomic_sub_z(&arena->stats.metadata_allocated, size); +} + +JEMALLOC_INLINE size_t +arena_metadata_allocated_get(arena_t *arena) +{ + + return (atomic_read_z(&arena->stats.metadata_allocated)); +} + +JEMALLOC_INLINE bool +arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) +{ + + cassert(config_prof); + assert(prof_interval != 0); + + arena->prof_accumbytes += accumbytes; + if (arena->prof_accumbytes >= prof_interval) { + arena->prof_accumbytes -= prof_interval; + return (true); + } + return (false); +} + +JEMALLOC_INLINE bool +arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) +{ + + cassert(config_prof); + + if (likely(prof_interval == 0)) + return (false); + return (arena_prof_accum_impl(arena, accumbytes)); +} + +JEMALLOC_INLINE bool +arena_prof_accum(arena_t *arena, uint64_t accumbytes) +{ + + cassert(config_prof); + + if (likely(prof_interval == 0)) + return (false); + + { + bool ret; + + malloc_mutex_lock(&arena->lock); + ret = arena_prof_accum_impl(arena, accumbytes); + malloc_mutex_unlock(&arena->lock); + return (ret); + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +arena_ptr_small_binind_get(const void *ptr, size_t mapbits) +{ + szind_t binind; + + binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; + + if (config_debug) { + arena_chunk_t *chunk; + arena_t *arena; + size_t pageind; + size_t actual_mapbits; + size_t rpages_ind; + arena_run_t *run; + arena_bin_t *bin; + szind_t run_binind, actual_binind; + arena_bin_info_t *bin_info; + arena_chunk_map_misc_t *miscelm; + void *rpages; + + assert(binind != BININD_INVALID); + assert(binind < NBINS); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena = extent_node_arena_get(&chunk->node); + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + actual_mapbits = arena_mapbits_get(chunk, pageind); + assert(mapbits == actual_mapbits); + assert(arena_mapbits_large_get(chunk, pageind) == 0); + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, + pageind); + miscelm = arena_miscelm_get(chunk, rpages_ind); + run = &miscelm->run; + run_binind = run->binind; + bin = &arena->bins[run_binind]; + actual_binind = bin - arena->bins; + assert(run_binind == actual_binind); + bin_info = &arena_bin_info[actual_binind]; + rpages = arena_miscelm_to_rpages(miscelm); + assert(((uintptr_t)ptr - ((uintptr_t)rpages + + (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval + == 0); + } + + return (binind); +} +# endif /* JEMALLOC_ARENA_INLINE_A */ + +# ifdef JEMALLOC_ARENA_INLINE_B +JEMALLOC_INLINE szind_t +arena_bin_index(arena_t *arena, arena_bin_t *bin) +{ + szind_t binind = bin - arena->bins; + assert(binind < NBINS); + return (binind); +} + +JEMALLOC_INLINE unsigned +arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) +{ + unsigned shift, diff, regind; + size_t interval; + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + void *rpages = arena_miscelm_to_rpages(miscelm); + + /* + * Freeing a pointer lower than region zero can cause assertion + * failure. + */ + assert((uintptr_t)ptr >= (uintptr_t)rpages + + (uintptr_t)bin_info->reg0_offset); + + /* + * Avoid doing division with a variable divisor if possible. Using + * actual division here can reduce allocator throughput by over 20%! + */ + diff = (unsigned)((uintptr_t)ptr - (uintptr_t)rpages - + bin_info->reg0_offset); + + /* Rescale (factor powers of 2 out of the numerator and denominator). */ + interval = bin_info->reg_interval; + shift = jemalloc_ffs(interval) - 1; + diff >>= shift; + interval >>= shift; + + if (interval == 1) { + /* The divisor was a power of 2. */ + regind = diff; + } else { + /* + * To divide by a number D that is not a power of two we + * multiply by (2^21 / D) and then right shift by 21 positions. + * + * X / D + * + * becomes + * + * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT + * + * We can omit the first three elements, because we never + * divide by 0, and 1 and 2 are both powers of two, which are + * handled above. + */ +#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) +#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) + static const unsigned interval_invs[] = { + SIZE_INV(3), + SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), + SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), + SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), + SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), + SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), + SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), + SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) + }; + + if (likely(interval <= ((sizeof(interval_invs) / + sizeof(unsigned)) + 2))) { + regind = (diff * interval_invs[interval - 3]) >> + SIZE_INV_SHIFT; + } else + regind = diff / interval; +#undef SIZE_INV +#undef SIZE_INV_SHIFT + } + assert(diff == regind * interval); + assert(regind < bin_info->nregs); + + return (regind); +} + +JEMALLOC_INLINE prof_tctx_t * +arena_prof_tctx_get(const void *ptr) +{ + prof_tctx_t *ret; + arena_chunk_t *chunk; + + cassert(config_prof); + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + size_t mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); + if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) + ret = (prof_tctx_t *)(uintptr_t)1U; + else { + arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk, + pageind); + ret = atomic_read_p(&elm->prof_tctx_pun); + } + } else + ret = huge_prof_tctx_get(ptr); + + return (ret); +} + +JEMALLOC_INLINE void +arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx) +{ + arena_chunk_t *chunk; + + cassert(config_prof); + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + + if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx > + (uintptr_t)1U)) { + arena_chunk_map_misc_t *elm; + + assert(arena_mapbits_large_get(chunk, pageind) != 0); + + elm = arena_miscelm_get(chunk, pageind); + atomic_write_p(&elm->prof_tctx_pun, tctx); + } else { + /* + * tctx must always be initialized for large runs. + * Assert that the surrounding conditional logic is + * equivalent to checking whether ptr refers to a large + * run. + */ + assert(arena_mapbits_large_get(chunk, pageind) == 0); + } + } else + huge_prof_tctx_set(ptr, tctx); +} + +JEMALLOC_INLINE void +arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, + prof_tctx_t *old_tctx) +{ + + cassert(config_prof); + assert(ptr != NULL); + + if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr && + (uintptr_t)old_tctx > (uintptr_t)1U))) { + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + size_t pageind; + arena_chunk_map_misc_t *elm; + + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> + LG_PAGE; + assert(arena_mapbits_allocated_get(chunk, pageind) != + 0); + assert(arena_mapbits_large_get(chunk, pageind) != 0); + + elm = arena_miscelm_get(chunk, pageind); + atomic_write_p(&elm->prof_tctx_pun, + (prof_tctx_t *)(uintptr_t)1U); + } else + huge_prof_tctx_reset(ptr); + } +} + +JEMALLOC_ALWAYS_INLINE void * +arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, + tcache_t *tcache) +{ + + assert(size != 0); + + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) + return (NULL); + + if (likely(size <= SMALL_MAXCLASS)) { + if (likely(tcache != NULL)) { + return (tcache_alloc_small(tsd, arena, tcache, size, + zero)); + } else + return (arena_malloc_small(arena, size, zero)); + } else if (likely(size <= large_maxclass)) { + /* + * Initialize tcache after checking size in order to avoid + * infinite recursion during tcache initialization. + */ + if (likely(tcache != NULL) && size <= tcache_maxclass) { + return (tcache_alloc_large(tsd, arena, tcache, size, + zero)); + } else + return (arena_malloc_large(arena, size, zero)); + } else + return (huge_malloc(tsd, arena, size, zero, tcache)); +} + +JEMALLOC_ALWAYS_INLINE arena_t * +arena_aalloc(const void *ptr) +{ + arena_chunk_t *chunk; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) + return (extent_node_arena_get(&chunk->node)); + else + return (huge_aalloc(ptr)); +} + +/* Return the size of the allocation pointed to by ptr. */ +JEMALLOC_ALWAYS_INLINE size_t +arena_salloc(const void *ptr, bool demote) +{ + size_t ret; + arena_chunk_t *chunk; + size_t pageind; + szind_t binind; + + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + binind = arena_mapbits_binind_get(chunk, pageind); + if (unlikely(binind == BININD_INVALID || (config_prof && !demote + && arena_mapbits_large_get(chunk, pageind) != 0))) { + /* + * Large allocation. In the common case (demote), and + * as this is an inline function, most callers will only + * end up looking at binind to determine that ptr is a + * small allocation. + */ + assert(config_cache_oblivious || ((uintptr_t)ptr & + PAGE_MASK) == 0); + ret = arena_mapbits_large_size_get(chunk, pageind) - + large_pad; + assert(ret != 0); + assert(pageind + ((ret+large_pad)>>LG_PAGE) <= + chunk_npages); + assert(arena_mapbits_dirty_get(chunk, pageind) == + arena_mapbits_dirty_get(chunk, + pageind+((ret+large_pad)>>LG_PAGE)-1)); + } else { + /* + * Small allocation (possibly promoted to a large + * object). + */ + assert(arena_mapbits_large_get(chunk, pageind) != 0 || + arena_ptr_small_binind_get(ptr, + arena_mapbits_get(chunk, pageind)) == binind); + ret = index2size(binind); + } + } else + ret = huge_salloc(ptr); + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void +arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) +{ + arena_chunk_t *chunk; + size_t pageind, mapbits; + + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + mapbits = arena_mapbits_get(chunk, pageind); + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { + /* Small allocation. */ + if (likely(tcache != NULL)) { + szind_t binind = arena_ptr_small_binind_get(ptr, + mapbits); + tcache_dalloc_small(tsd, tcache, ptr, binind); + } else { + arena_dalloc_small(extent_node_arena_get( + &chunk->node), chunk, ptr, pageind); + } + } else { + size_t size = arena_mapbits_large_size_get(chunk, + pageind); + + assert(config_cache_oblivious || ((uintptr_t)ptr & + PAGE_MASK) == 0); + + if (likely(tcache != NULL) && size - large_pad <= + tcache_maxclass) { + tcache_dalloc_large(tsd, tcache, ptr, size - + large_pad); + } else { + arena_dalloc_large(extent_node_arena_get( + &chunk->node), chunk, ptr); + } + } + } else + huge_dalloc(tsd, ptr, tcache); +} + +JEMALLOC_ALWAYS_INLINE void +arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) +{ + arena_chunk_t *chunk; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + if (config_prof && opt_prof) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> + LG_PAGE; + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + if (arena_mapbits_large_get(chunk, pageind) != 0) { + /* + * Make sure to use promoted size, not request + * size. + */ + size = arena_mapbits_large_size_get(chunk, + pageind) - large_pad; + } + } + assert(s2u(size) == s2u(arena_salloc(ptr, false))); + + if (likely(size <= SMALL_MAXCLASS)) { + /* Small allocation. */ + if (likely(tcache != NULL)) { + szind_t binind = size2index(size); + tcache_dalloc_small(tsd, tcache, ptr, binind); + } else { + size_t pageind = ((uintptr_t)ptr - + (uintptr_t)chunk) >> LG_PAGE; + arena_dalloc_small(extent_node_arena_get( + &chunk->node), chunk, ptr, pageind); + } + } else { + assert(config_cache_oblivious || ((uintptr_t)ptr & + PAGE_MASK) == 0); + + if (likely(tcache != NULL) && size <= tcache_maxclass) + tcache_dalloc_large(tsd, tcache, ptr, size); + else { + arena_dalloc_large(extent_node_arena_get( + &chunk->node), chunk, ptr); + } + } + } else + huge_dalloc(tsd, ptr, tcache); +} +# endif /* JEMALLOC_ARENA_INLINE_B */ +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/atomic.h b/redis.submodule/jemalloc/include/jemalloc/internal/atomic.h new file mode 100644 index 0000000..a9aad35 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/atomic.h @@ -0,0 +1,651 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#define atomic_read_uint64(p) atomic_add_uint64(p, 0) +#define atomic_read_uint32(p) atomic_add_uint32(p, 0) +#define atomic_read_p(p) atomic_add_p(p, NULL) +#define atomic_read_z(p) atomic_add_z(p, 0) +#define atomic_read_u(p) atomic_add_u(p, 0) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +/* + * All arithmetic functions return the arithmetic result of the atomic + * operation. Some atomic operation APIs return the value prior to mutation, in + * which case the following functions must redundantly compute the result so + * that it can be returned. These functions are normally inlined, so the extra + * operations can be optimized away if the return values aren't used by the + * callers. + * + * atomic_read_( *p) { return (*p); } + * atomic_add_( *p, x) { return (*p + x); } + * atomic_sub_( *p, x) { return (*p - x); } + * bool atomic_cas_( *p, c, s) + * { + * if (*p != c) + * return (true); + * *p = s; + * return (false); + * } + * void atomic_write_( *p, x) { *p = x; } + */ + +#ifndef JEMALLOC_ENABLE_INLINE +uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); +uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); +bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s); +void atomic_write_uint64(uint64_t *p, uint64_t x); +uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); +uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); +bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s); +void atomic_write_uint32(uint32_t *p, uint32_t x); +void *atomic_add_p(void **p, void *x); +void *atomic_sub_p(void **p, void *x); +bool atomic_cas_p(void **p, void *c, void *s); +void atomic_write_p(void **p, const void *x); +size_t atomic_add_z(size_t *p, size_t x); +size_t atomic_sub_z(size_t *p, size_t x); +bool atomic_cas_z(size_t *p, size_t c, size_t s); +void atomic_write_z(size_t *p, size_t x); +unsigned atomic_add_u(unsigned *p, unsigned x); +unsigned atomic_sub_u(unsigned *p, unsigned x); +bool atomic_cas_u(unsigned *p, unsigned c, unsigned s); +void atomic_write_u(unsigned *p, unsigned x); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) +/******************************************************************************/ +/* 64-bit operations. */ +#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) +# if (defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + uint64_t t = x; + + asm volatile ( + "lock; xaddq %0, %1;" + : "+r" (t), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (t + x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + uint64_t t; + + x = (uint64_t)(-(int64_t)x); + t = x; + asm volatile ( + "lock; xaddq %0, %1;" + : "+r" (t), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (t + x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + uint8_t success; + + asm volatile ( + "lock; cmpxchgq %4, %0;" + "sete %1;" + : "=m" (*p), "=a" (success) /* Outputs. */ + : "m" (*p), "a" (c), "r" (s) /* Inputs. */ + : "memory" /* Clobbers. */ + ); + + return (!(bool)success); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + + asm volatile ( + "xchgq %1, %0;" /* Lock is implied by xchgq. */ + : "=m" (*p), "+r" (x) /* Outputs. */ + : "m" (*p) /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +# elif (defined(JEMALLOC_C11ATOMICS)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + return (atomic_fetch_add(a, x) + x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + return (atomic_fetch_sub(a, x) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + return (!atomic_compare_exchange_strong(a, &c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; + atomic_store(a, x); +} +# elif (defined(JEMALLOC_ATOMIC9)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + /* + * atomic_fetchadd_64() doesn't exist, but we only ever use this + * function on LP64 systems, so atomic_fetchadd_long() will do. + */ + assert(sizeof(uint64_t) == sizeof(unsigned long)); + + return (atomic_fetchadd_long(p, (unsigned long)x) + x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + assert(sizeof(uint64_t) == sizeof(unsigned long)); + + return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + + assert(sizeof(uint64_t) == sizeof(unsigned long)); + + return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s)); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + + assert(sizeof(uint64_t) == sizeof(unsigned long)); + + atomic_store_rel_long(p, x); +} +# elif (defined(JEMALLOC_OSATOMIC)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + + return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p)); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + uint64_t o; + + /*The documented OSAtomic*() API does not expose an atomic exchange. */ + do { + o = atomic_read_uint64(p); + } while (atomic_cas_uint64(p, o, x)); +} +# elif (defined(_MSC_VER)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (InterlockedExchangeAdd64(p, x) + x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + uint64_t o; + + o = InterlockedCompareExchange64(p, s, c); + return (o != c); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + + InterlockedExchange64(p, x); +} +# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \ + defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (__sync_add_and_fetch(p, x)); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (__sync_sub_and_fetch(p, x)); +} + +JEMALLOC_INLINE bool +atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) +{ + + return (!__sync_bool_compare_and_swap(p, c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint64(uint64_t *p, uint64_t x) +{ + + __sync_lock_test_and_set(p, x); +} +# else +# error "Missing implementation for 64-bit atomic operations" +# endif +#endif + +/******************************************************************************/ +/* 32-bit operations. */ +#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + uint32_t t = x; + + asm volatile ( + "lock; xaddl %0, %1;" + : "+r" (t), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (t + x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + uint32_t t; + + x = (uint32_t)(-(int32_t)x); + t = x; + asm volatile ( + "lock; xaddl %0, %1;" + : "+r" (t), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (t + x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + uint8_t success; + + asm volatile ( + "lock; cmpxchgl %4, %0;" + "sete %1;" + : "=m" (*p), "=a" (success) /* Outputs. */ + : "m" (*p), "a" (c), "r" (s) /* Inputs. */ + : "memory" + ); + + return (!(bool)success); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + + asm volatile ( + "xchgl %1, %0;" /* Lock is implied by xchgl. */ + : "=m" (*p), "+r" (x) /* Outputs. */ + : "m" (*p) /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +# elif (defined(JEMALLOC_C11ATOMICS)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; + return (atomic_fetch_add(a, x) + x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; + return (atomic_fetch_sub(a, x) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; + return (!atomic_compare_exchange_strong(a, &c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; + atomic_store(a, x); +} +#elif (defined(JEMALLOC_ATOMIC9)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (atomic_fetchadd_32(p, x) + x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + + return (!atomic_cmpset_32(p, c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + + atomic_store_rel_32(p, x); +} +#elif (defined(JEMALLOC_OSATOMIC)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + + return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p)); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + uint32_t o; + + /*The documented OSAtomic*() API does not expose an atomic exchange. */ + do { + o = atomic_read_uint32(p); + } while (atomic_cas_uint32(p, o, x)); +} +#elif (defined(_MSC_VER)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (InterlockedExchangeAdd(p, x) + x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (InterlockedExchangeAdd(p, -((int32_t)x)) - x); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + uint32_t o; + + o = InterlockedCompareExchange(p, s, c); + return (o != c); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + + InterlockedExchange(p, x); +} +#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \ + defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (__sync_add_and_fetch(p, x)); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (__sync_sub_and_fetch(p, x)); +} + +JEMALLOC_INLINE bool +atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) +{ + + return (!__sync_bool_compare_and_swap(p, c, s)); +} + +JEMALLOC_INLINE void +atomic_write_uint32(uint32_t *p, uint32_t x) +{ + + __sync_lock_test_and_set(p, x); +} +#else +# error "Missing implementation for 32-bit atomic operations" +#endif + +/******************************************************************************/ +/* Pointer operations. */ +JEMALLOC_INLINE void * +atomic_add_p(void **p, void *x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); +#elif (LG_SIZEOF_PTR == 2) + return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); +#endif +} + +JEMALLOC_INLINE void * +atomic_sub_p(void **p, void *x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((void *)atomic_add_uint64((uint64_t *)p, + (uint64_t)-((int64_t)x))); +#elif (LG_SIZEOF_PTR == 2) + return ((void *)atomic_add_uint32((uint32_t *)p, + (uint32_t)-((int32_t)x))); +#endif +} + +JEMALLOC_INLINE bool +atomic_cas_p(void **p, void *c, void *s) +{ + +#if (LG_SIZEOF_PTR == 3) + return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); +#elif (LG_SIZEOF_PTR == 2) + return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); +#endif +} + +JEMALLOC_INLINE void +atomic_write_p(void **p, const void *x) +{ + +#if (LG_SIZEOF_PTR == 3) + atomic_write_uint64((uint64_t *)p, (uint64_t)x); +#elif (LG_SIZEOF_PTR == 2) + atomic_write_uint32((uint32_t *)p, (uint32_t)x); +#endif +} + +/******************************************************************************/ +/* size_t operations. */ +JEMALLOC_INLINE size_t +atomic_add_z(size_t *p, size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); +#elif (LG_SIZEOF_PTR == 2) + return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); +#endif +} + +JEMALLOC_INLINE size_t +atomic_sub_z(size_t *p, size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((size_t)atomic_add_uint64((uint64_t *)p, + (uint64_t)-((int64_t)x))); +#elif (LG_SIZEOF_PTR == 2) + return ((size_t)atomic_add_uint32((uint32_t *)p, + (uint32_t)-((int32_t)x))); +#endif +} + +JEMALLOC_INLINE bool +atomic_cas_z(size_t *p, size_t c, size_t s) +{ + +#if (LG_SIZEOF_PTR == 3) + return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); +#elif (LG_SIZEOF_PTR == 2) + return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); +#endif +} + +JEMALLOC_INLINE void +atomic_write_z(size_t *p, size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + atomic_write_uint64((uint64_t *)p, (uint64_t)x); +#elif (LG_SIZEOF_PTR == 2) + atomic_write_uint32((uint32_t *)p, (uint32_t)x); +#endif +} + +/******************************************************************************/ +/* unsigned operations. */ +JEMALLOC_INLINE unsigned +atomic_add_u(unsigned *p, unsigned x) +{ + +#if (LG_SIZEOF_INT == 3) + return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); +#elif (LG_SIZEOF_INT == 2) + return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); +#endif +} + +JEMALLOC_INLINE unsigned +atomic_sub_u(unsigned *p, unsigned x) +{ + +#if (LG_SIZEOF_INT == 3) + return ((unsigned)atomic_add_uint64((uint64_t *)p, + (uint64_t)-((int64_t)x))); +#elif (LG_SIZEOF_INT == 2) + return ((unsigned)atomic_add_uint32((uint32_t *)p, + (uint32_t)-((int32_t)x))); +#endif +} + +JEMALLOC_INLINE bool +atomic_cas_u(unsigned *p, unsigned c, unsigned s) +{ + +#if (LG_SIZEOF_INT == 3) + return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); +#elif (LG_SIZEOF_INT == 2) + return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); +#endif +} + +JEMALLOC_INLINE void +atomic_write_u(unsigned *p, unsigned x) +{ + +#if (LG_SIZEOF_INT == 3) + atomic_write_uint64((uint64_t *)p, (uint64_t)x); +#elif (LG_SIZEOF_INT == 2) + atomic_write_uint32((uint32_t *)p, (uint32_t)x); +#endif +} + +/******************************************************************************/ +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/base.h b/redis.submodule/jemalloc/include/jemalloc/internal/base.h new file mode 100644 index 0000000..39e46ee --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/base.h @@ -0,0 +1,24 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *base_alloc(size_t size); +void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped); +bool base_boot(void); +void base_prefork(void); +void base_postfork_parent(void); +void base_postfork_child(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/bitmap.h b/redis.submodule/jemalloc/include/jemalloc/internal/bitmap.h new file mode 100644 index 0000000..fcc6005 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/bitmap.h @@ -0,0 +1,230 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ +#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS +#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) + +typedef struct bitmap_level_s bitmap_level_t; +typedef struct bitmap_info_s bitmap_info_t; +typedef unsigned long bitmap_t; +#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG + +/* Number of bits per group. */ +#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) +#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) +#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) + +/* Number of groups required to store a given number of bits. */ +#define BITMAP_BITS2GROUPS(nbits) \ + ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) + +/* + * Number of groups required at a particular level for a given number of bits. + */ +#define BITMAP_GROUPS_L0(nbits) \ + BITMAP_BITS2GROUPS(nbits) +#define BITMAP_GROUPS_L1(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) +#define BITMAP_GROUPS_L2(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) +#define BITMAP_GROUPS_L3(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS((nbits))))) + +/* + * Assuming the number of levels, number of groups required for a given number + * of bits. + */ +#define BITMAP_GROUPS_1_LEVEL(nbits) \ + BITMAP_GROUPS_L0(nbits) +#define BITMAP_GROUPS_2_LEVEL(nbits) \ + (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) +#define BITMAP_GROUPS_3_LEVEL(nbits) \ + (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) +#define BITMAP_GROUPS_4_LEVEL(nbits) \ + (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) + +/* + * Maximum number of groups required to support LG_BITMAP_MAXBITS. + */ +#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) +#else +# error "Unsupported bitmap size" +#endif + +/* Maximum number of levels possible. */ +#define BITMAP_MAX_LEVELS \ + (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ + + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct bitmap_level_s { + /* Offset of this level's groups within the array of groups. */ + size_t group_offset; +}; + +struct bitmap_info_s { + /* Logical number of bits in bitmap (stored at bottom level). */ + size_t nbits; + + /* Number of levels necessary for nbits. */ + unsigned nlevels; + + /* + * Only the first (nlevels+1) elements are used, and levels are ordered + * bottom to top (e.g. the bottom level is stored in levels[0]). + */ + bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); +size_t bitmap_info_ngroups(const bitmap_info_t *binfo); +size_t bitmap_size(size_t nbits); +void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); +bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); +void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); +size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); +void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) +JEMALLOC_INLINE bool +bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ + unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1; + bitmap_t rg = bitmap[rgoff]; + /* The bitmap is full iff the root group is 0. */ + return (rg == 0); +} + +JEMALLOC_INLINE bool +bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) +{ + size_t goff; + bitmap_t g; + + assert(bit < binfo->nbits); + goff = bit >> LG_BITMAP_GROUP_NBITS; + g = bitmap[goff]; + return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))); +} + +JEMALLOC_INLINE void +bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) +{ + size_t goff; + bitmap_t *gp; + bitmap_t g; + + assert(bit < binfo->nbits); + assert(!bitmap_get(bitmap, binfo, bit)); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + assert(bitmap_get(bitmap, binfo, bit)); + /* Propagate group state transitions up the tree. */ + if (g == 0) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + if (g != 0) + break; + } + } +} + +/* sfu: set first unset. */ +JEMALLOC_INLINE size_t +bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ + size_t bit; + bitmap_t g; + unsigned i; + + assert(!bitmap_full(bitmap, binfo)); + + i = binfo->nlevels - 1; + g = bitmap[binfo->levels[i].group_offset]; + bit = jemalloc_ffsl(g) - 1; + while (i > 0) { + i--; + g = bitmap[binfo->levels[i].group_offset + bit]; + bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1); + } + + bitmap_set(bitmap, binfo, bit); + return (bit); +} + +JEMALLOC_INLINE void +bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) +{ + size_t goff; + bitmap_t *gp; + bitmap_t g; + bool propagate; + + assert(bit < binfo->nbits); + assert(bitmap_get(bitmap, binfo, bit)); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + propagate = (g == 0); + assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); + g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + assert(!bitmap_get(bitmap, binfo, bit)); + /* Propagate group state transitions up the tree. */ + if (propagate) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + propagate = (g == 0); + assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) + == 0); + g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + if (!propagate) + break; + } + } +} + +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/chunk.h b/redis.submodule/jemalloc/include/jemalloc/internal/chunk.h new file mode 100644 index 0000000..5d19383 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/chunk.h @@ -0,0 +1,99 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * Size and alignment of memory chunks that are allocated by the OS's virtual + * memory system. + */ +#define LG_CHUNK_DEFAULT 21 + +/* Return the chunk address for allocation address a. */ +#define CHUNK_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~chunksize_mask)) + +/* Return the chunk offset of address a. */ +#define CHUNK_ADDR2OFFSET(a) \ + ((size_t)((uintptr_t)(a) & chunksize_mask)) + +/* Return the smallest chunk multiple that is >= s. */ +#define CHUNK_CEILING(s) \ + (((s) + chunksize_mask) & ~chunksize_mask) + +#define CHUNK_HOOKS_INITIALIZER { \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + NULL, \ + NULL \ +} + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern size_t opt_lg_chunk; +extern const char *opt_dss; + +extern rtree_t chunks_rtree; + +extern size_t chunksize; +extern size_t chunksize_mask; /* (chunksize - 1). */ +extern size_t chunk_npages; + +extern const chunk_hooks_t chunk_hooks_default; + +chunk_hooks_t chunk_hooks_get(arena_t *arena); +chunk_hooks_t chunk_hooks_set(arena_t *arena, + const chunk_hooks_t *chunk_hooks); + +bool chunk_register(const void *chunk, const extent_node_t *node); +void chunk_deregister(const void *chunk, const extent_node_t *node); +void *chunk_alloc_base(size_t size); +void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, + void *new_addr, size_t size, size_t alignment, bool *zero, + bool dalloc_node); +void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, + void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); +void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, bool committed); +void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, bool zeroed, bool committed); +void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, bool committed); +bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, + size_t length); +bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t size, size_t offset, size_t length); +bool chunk_boot(void); +void chunk_prefork(void); +void chunk_postfork_parent(void); +void chunk_postfork_child(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +extent_node_t *chunk_lookup(const void *chunk, bool dependent); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_)) +JEMALLOC_INLINE extent_node_t * +chunk_lookup(const void *ptr, bool dependent) +{ + + return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent)); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + +#include "jemalloc/internal/chunk_dss.h" +#include "jemalloc/internal/chunk_mmap.h" diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/chunk_dss.h b/redis.submodule/jemalloc/include/jemalloc/internal/chunk_dss.h new file mode 100644 index 0000000..388f46b --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/chunk_dss.h @@ -0,0 +1,39 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef enum { + dss_prec_disabled = 0, + dss_prec_primary = 1, + dss_prec_secondary = 2, + + dss_prec_limit = 3 +} dss_prec_t; +#define DSS_PREC_DEFAULT dss_prec_secondary +#define DSS_DEFAULT "secondary" + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +extern const char *dss_prec_names[]; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +dss_prec_t chunk_dss_prec_get(void); +bool chunk_dss_prec_set(dss_prec_t dss_prec); +void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit); +bool chunk_in_dss(void *chunk); +bool chunk_dss_boot(void); +void chunk_dss_prefork(void); +void chunk_dss_postfork_parent(void); +void chunk_dss_postfork_child(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/chunk_mmap.h b/redis.submodule/jemalloc/include/jemalloc/internal/chunk_mmap.h new file mode 100644 index 0000000..7d8014c --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/chunk_mmap.h @@ -0,0 +1,21 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, + bool *commit); +bool chunk_dalloc_mmap(void *chunk, size_t size); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/ckh.h b/redis.submodule/jemalloc/include/jemalloc/internal/ckh.h new file mode 100644 index 0000000..75c1c97 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/ckh.h @@ -0,0 +1,88 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct ckh_s ckh_t; +typedef struct ckhc_s ckhc_t; + +/* Typedefs to allow easy function pointer passing. */ +typedef void ckh_hash_t (const void *, size_t[2]); +typedef bool ckh_keycomp_t (const void *, const void *); + +/* Maintain counters used to get an idea of performance. */ +/* #define CKH_COUNT */ +/* Print counter values in ckh_delete() (requires CKH_COUNT). */ +/* #define CKH_VERBOSE */ + +/* + * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit + * one bucket per L1 cache line. + */ +#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +/* Hash table cell. */ +struct ckhc_s { + const void *key; + const void *data; +}; + +struct ckh_s { +#ifdef CKH_COUNT + /* Counters used to get an idea of performance. */ + uint64_t ngrows; + uint64_t nshrinks; + uint64_t nshrinkfails; + uint64_t ninserts; + uint64_t nrelocs; +#endif + + /* Used for pseudo-random number generation. */ +#define CKH_A 1103515241 +#define CKH_C 12347 + uint32_t prng_state; + + /* Total number of items. */ + size_t count; + + /* + * Minimum and current number of hash table buckets. There are + * 2^LG_CKH_BUCKET_CELLS cells per bucket. + */ + unsigned lg_minbuckets; + unsigned lg_curbuckets; + + /* Hash and comparison functions. */ + ckh_hash_t *hash; + ckh_keycomp_t *keycomp; + + /* Hash table with 2^lg_curbuckets buckets. */ + ckhc_t *tab; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, + ckh_keycomp_t *keycomp); +void ckh_delete(tsd_t *tsd, ckh_t *ckh); +size_t ckh_count(ckh_t *ckh); +bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); +bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); +bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, + void **data); +bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data); +void ckh_string_hash(const void *key, size_t r_hash[2]); +bool ckh_string_keycomp(const void *k1, const void *k2); +void ckh_pointer_hash(const void *key, size_t r_hash[2]); +bool ckh_pointer_keycomp(const void *k1, const void *k2); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/ctl.h b/redis.submodule/jemalloc/include/jemalloc/internal/ctl.h new file mode 100644 index 0000000..751c14b --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/ctl.h @@ -0,0 +1,111 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct ctl_node_s ctl_node_t; +typedef struct ctl_named_node_s ctl_named_node_t; +typedef struct ctl_indexed_node_s ctl_indexed_node_t; +typedef struct ctl_arena_stats_s ctl_arena_stats_t; +typedef struct ctl_stats_s ctl_stats_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct ctl_node_s { + bool named; +}; + +struct ctl_named_node_s { + struct ctl_node_s node; + const char *name; + /* If (nchildren == 0), this is a terminal node. */ + unsigned nchildren; + const ctl_node_t *children; + int (*ctl)(const size_t *, size_t, void *, size_t *, + void *, size_t); +}; + +struct ctl_indexed_node_s { + struct ctl_node_s node; + const ctl_named_node_t *(*index)(const size_t *, size_t, size_t); +}; + +struct ctl_arena_stats_s { + bool initialized; + unsigned nthreads; + const char *dss; + ssize_t lg_dirty_mult; + size_t pactive; + size_t pdirty; + arena_stats_t astats; + + /* Aggregate stats for small size classes, based on bin stats. */ + size_t allocated_small; + uint64_t nmalloc_small; + uint64_t ndalloc_small; + uint64_t nrequests_small; + + malloc_bin_stats_t bstats[NBINS]; + malloc_large_stats_t *lstats; /* nlclasses elements. */ + malloc_huge_stats_t *hstats; /* nhclasses elements. */ +}; + +struct ctl_stats_s { + size_t allocated; + size_t active; + size_t metadata; + size_t resident; + size_t mapped; + unsigned narenas; + ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen); +int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); + +int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen); +bool ctl_boot(void); +void ctl_prefork(void); +void ctl_postfork_parent(void); +void ctl_postfork_child(void); + +#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ + if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ + != 0) { \ + malloc_printf( \ + ": Failure in xmallctl(\"%s\", ...)\n", \ + name); \ + abort(); \ + } \ +} while (0) + +#define xmallctlnametomib(name, mibp, miblenp) do { \ + if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ + malloc_printf(": Failure in " \ + "xmallctlnametomib(\"%s\", ...)\n", name); \ + abort(); \ + } \ +} while (0) + +#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ + if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ + newlen) != 0) { \ + malloc_write( \ + ": Failure in xmallctlbymib()\n"); \ + abort(); \ + } \ +} while (0) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/extent.h b/redis.submodule/jemalloc/include/jemalloc/internal/extent.h new file mode 100644 index 0000000..386d50e --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/extent.h @@ -0,0 +1,239 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct extent_node_s extent_node_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +/* Tree of extents. Use accessor functions for en_* fields. */ +struct extent_node_s { + /* Arena from which this extent came, if any. */ + arena_t *en_arena; + + /* Pointer to the extent that this tree node is responsible for. */ + void *en_addr; + + /* Total region size. */ + size_t en_size; + + /* + * The zeroed flag is used by chunk recycling code to track whether + * memory is zero-filled. + */ + bool en_zeroed; + + /* + * True if physical memory is committed to the extent, whether + * explicitly or implicitly as on a system that overcommits and + * satisfies physical memory needs on demand via soft page faults. + */ + bool en_committed; + + /* + * The achunk flag is used to validate that huge allocation lookups + * don't return arena chunks. + */ + bool en_achunk; + + /* Profile counters, used for huge objects. */ + prof_tctx_t *en_prof_tctx; + + /* Linkage for arena's runs_dirty and chunks_cache rings. */ + arena_runs_dirty_link_t rd; + qr(extent_node_t) cc_link; + + union { + /* Linkage for the size/address-ordered tree. */ + rb_node(extent_node_t) szad_link; + + /* Linkage for arena's huge and node_cache lists. */ + ql_elm(extent_node_t) ql_link; + }; + + /* Linkage for the address-ordered tree. */ + rb_node(extent_node_t) ad_link; +}; +typedef rb_tree(extent_node_t) extent_tree_t; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t) + +rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +arena_t *extent_node_arena_get(const extent_node_t *node); +void *extent_node_addr_get(const extent_node_t *node); +size_t extent_node_size_get(const extent_node_t *node); +bool extent_node_zeroed_get(const extent_node_t *node); +bool extent_node_committed_get(const extent_node_t *node); +bool extent_node_achunk_get(const extent_node_t *node); +prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node); +void extent_node_arena_set(extent_node_t *node, arena_t *arena); +void extent_node_addr_set(extent_node_t *node, void *addr); +void extent_node_size_set(extent_node_t *node, size_t size); +void extent_node_zeroed_set(extent_node_t *node, bool zeroed); +void extent_node_committed_set(extent_node_t *node, bool committed); +void extent_node_achunk_set(extent_node_t *node, bool achunk); +void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx); +void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, + size_t size, bool zeroed, bool committed); +void extent_node_dirty_linkage_init(extent_node_t *node); +void extent_node_dirty_insert(extent_node_t *node, + arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty); +void extent_node_dirty_remove(extent_node_t *node); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_)) +JEMALLOC_INLINE arena_t * +extent_node_arena_get(const extent_node_t *node) +{ + + return (node->en_arena); +} + +JEMALLOC_INLINE void * +extent_node_addr_get(const extent_node_t *node) +{ + + return (node->en_addr); +} + +JEMALLOC_INLINE size_t +extent_node_size_get(const extent_node_t *node) +{ + + return (node->en_size); +} + +JEMALLOC_INLINE bool +extent_node_zeroed_get(const extent_node_t *node) +{ + + return (node->en_zeroed); +} + +JEMALLOC_INLINE bool +extent_node_committed_get(const extent_node_t *node) +{ + + assert(!node->en_achunk); + return (node->en_committed); +} + +JEMALLOC_INLINE bool +extent_node_achunk_get(const extent_node_t *node) +{ + + return (node->en_achunk); +} + +JEMALLOC_INLINE prof_tctx_t * +extent_node_prof_tctx_get(const extent_node_t *node) +{ + + return (node->en_prof_tctx); +} + +JEMALLOC_INLINE void +extent_node_arena_set(extent_node_t *node, arena_t *arena) +{ + + node->en_arena = arena; +} + +JEMALLOC_INLINE void +extent_node_addr_set(extent_node_t *node, void *addr) +{ + + node->en_addr = addr; +} + +JEMALLOC_INLINE void +extent_node_size_set(extent_node_t *node, size_t size) +{ + + node->en_size = size; +} + +JEMALLOC_INLINE void +extent_node_zeroed_set(extent_node_t *node, bool zeroed) +{ + + node->en_zeroed = zeroed; +} + +JEMALLOC_INLINE void +extent_node_committed_set(extent_node_t *node, bool committed) +{ + + node->en_committed = committed; +} + +JEMALLOC_INLINE void +extent_node_achunk_set(extent_node_t *node, bool achunk) +{ + + node->en_achunk = achunk; +} + +JEMALLOC_INLINE void +extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx) +{ + + node->en_prof_tctx = tctx; +} + +JEMALLOC_INLINE void +extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, + bool zeroed, bool committed) +{ + + extent_node_arena_set(node, arena); + extent_node_addr_set(node, addr); + extent_node_size_set(node, size); + extent_node_zeroed_set(node, zeroed); + extent_node_committed_set(node, committed); + extent_node_achunk_set(node, false); + if (config_prof) + extent_node_prof_tctx_set(node, NULL); +} + +JEMALLOC_INLINE void +extent_node_dirty_linkage_init(extent_node_t *node) +{ + + qr_new(&node->rd, rd_link); + qr_new(node, cc_link); +} + +JEMALLOC_INLINE void +extent_node_dirty_insert(extent_node_t *node, + arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty) +{ + + qr_meld(runs_dirty, &node->rd, rd_link); + qr_meld(chunks_dirty, node, cc_link); +} + +JEMALLOC_INLINE void +extent_node_dirty_remove(extent_node_t *node) +{ + + qr_remove(&node->rd, rd_link); + qr_remove(node, cc_link); +} + +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/hash.h b/redis.submodule/jemalloc/include/jemalloc/internal/hash.h new file mode 100644 index 0000000..bcead33 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/hash.h @@ -0,0 +1,336 @@ +/* + * The following hash function is based on MurmurHash3, placed into the public + * domain by Austin Appleby. See http://code.google.com/p/smhasher/ for + * details. + */ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +uint32_t hash_x86_32(const void *key, int len, uint32_t seed); +void hash_x86_128(const void *key, const int len, uint32_t seed, + uint64_t r_out[2]); +void hash_x64_128(const void *key, const int len, const uint32_t seed, + uint64_t r_out[2]); +void hash(const void *key, size_t len, const uint32_t seed, + size_t r_hash[2]); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) +/******************************************************************************/ +/* Internal implementation. */ +JEMALLOC_INLINE uint32_t +hash_rotl_32(uint32_t x, int8_t r) +{ + + return ((x << r) | (x >> (32 - r))); +} + +JEMALLOC_INLINE uint64_t +hash_rotl_64(uint64_t x, int8_t r) +{ + + return ((x << r) | (x >> (64 - r))); +} + +JEMALLOC_INLINE uint32_t +hash_get_block_32(const uint32_t *p, int i) +{ + + return (p[i]); +} + +JEMALLOC_INLINE uint64_t +hash_get_block_64(const uint64_t *p, int i) +{ + + return (p[i]); +} + +JEMALLOC_INLINE uint32_t +hash_fmix_32(uint32_t h) +{ + + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return (h); +} + +JEMALLOC_INLINE uint64_t +hash_fmix_64(uint64_t k) +{ + + k ^= k >> 33; + k *= KQU(0xff51afd7ed558ccd); + k ^= k >> 33; + k *= KQU(0xc4ceb9fe1a85ec53); + k ^= k >> 33; + + return (k); +} + +JEMALLOC_INLINE uint32_t +hash_x86_32(const void *key, int len, uint32_t seed) +{ + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len / 4; + + uint32_t h1 = seed; + + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + /* body */ + { + const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); + int i; + + for (i = -nblocks; i; i++) { + uint32_t k1 = hash_get_block_32(blocks, i); + + k1 *= c1; + k1 = hash_rotl_32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = hash_rotl_32(h1, 13); + h1 = h1*5 + 0xe6546b64; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t *) (data + nblocks*4); + + uint32_t k1 = 0; + + switch (len & 3) { + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); + k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; + + h1 = hash_fmix_32(h1); + + return (h1); +} + +UNUSED JEMALLOC_INLINE void +hash_x86_128(const void *key, const int len, uint32_t seed, + uint64_t r_out[2]) +{ + const uint8_t * data = (const uint8_t *) key; + const int nblocks = len / 16; + + uint32_t h1 = seed; + uint32_t h2 = seed; + uint32_t h3 = seed; + uint32_t h4 = seed; + + const uint32_t c1 = 0x239b961b; + const uint32_t c2 = 0xab0e9789; + const uint32_t c3 = 0x38b34ae5; + const uint32_t c4 = 0xa1e38b93; + + /* body */ + { + const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); + int i; + + for (i = -nblocks; i; i++) { + uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); + uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); + uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); + uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); + + k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + + h1 = hash_rotl_32(h1, 19); h1 += h2; + h1 = h1*5 + 0x561ccd1b; + + k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; + + h2 = hash_rotl_32(h2, 17); h2 += h3; + h2 = h2*5 + 0x0bcaa747; + + k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + + h3 = hash_rotl_32(h3, 15); h3 += h4; + h3 = h3*5 + 0x96cd1c35; + + k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; + + h4 = hash_rotl_32(h4, 13); h4 += h1; + h4 = h4*5 + 0x32ac3b17; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t *) (data + nblocks*16); + uint32_t k1 = 0; + uint32_t k2 = 0; + uint32_t k3 = 0; + uint32_t k4 = 0; + + switch (len & 15) { + case 15: k4 ^= tail[14] << 16; + case 14: k4 ^= tail[13] << 8; + case 13: k4 ^= tail[12] << 0; + k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; + + case 12: k3 ^= tail[11] << 24; + case 11: k3 ^= tail[10] << 16; + case 10: k3 ^= tail[ 9] << 8; + case 9: k3 ^= tail[ 8] << 0; + k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; + + case 8: k2 ^= tail[ 7] << 24; + case 7: k2 ^= tail[ 6] << 16; + case 6: k2 ^= tail[ 5] << 8; + case 5: k2 ^= tail[ 4] << 0; + k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; + + case 4: k1 ^= tail[ 3] << 24; + case 3: k1 ^= tail[ 2] << 16; + case 2: k1 ^= tail[ 1] << 8; + case 1: k1 ^= tail[ 0] << 0; + k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + h1 = hash_fmix_32(h1); + h2 = hash_fmix_32(h2); + h3 = hash_fmix_32(h3); + h4 = hash_fmix_32(h4); + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + r_out[0] = (((uint64_t) h2) << 32) | h1; + r_out[1] = (((uint64_t) h4) << 32) | h3; +} + +UNUSED JEMALLOC_INLINE void +hash_x64_128(const void *key, const int len, const uint32_t seed, + uint64_t r_out[2]) +{ + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len / 16; + + uint64_t h1 = seed; + uint64_t h2 = seed; + + const uint64_t c1 = KQU(0x87c37b91114253d5); + const uint64_t c2 = KQU(0x4cf5ad432745937f); + + /* body */ + { + const uint64_t *blocks = (const uint64_t *) (data); + int i; + + for (i = 0; i < nblocks; i++) { + uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); + uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); + + k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + + h1 = hash_rotl_64(h1, 27); h1 += h2; + h1 = h1*5 + 0x52dce729; + + k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; + + h2 = hash_rotl_64(h2, 31); h2 += h1; + h2 = h2*5 + 0x38495ab5; + } + } + + /* tail */ + { + const uint8_t *tail = (const uint8_t*)(data + nblocks*16); + uint64_t k1 = 0; + uint64_t k2 = 0; + + switch (len & 15) { + case 15: k2 ^= ((uint64_t)(tail[14])) << 48; + case 14: k2 ^= ((uint64_t)(tail[13])) << 40; + case 13: k2 ^= ((uint64_t)(tail[12])) << 32; + case 12: k2 ^= ((uint64_t)(tail[11])) << 24; + case 11: k2 ^= ((uint64_t)(tail[10])) << 16; + case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; + case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; + k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; + + case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; + case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; + case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; + case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; + case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; + case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; + case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; + case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; + k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; + } + } + + /* finalization */ + h1 ^= len; h2 ^= len; + + h1 += h2; + h2 += h1; + + h1 = hash_fmix_64(h1); + h2 = hash_fmix_64(h2); + + h1 += h2; + h2 += h1; + + r_out[0] = h1; + r_out[1] = h2; +} + +/******************************************************************************/ +/* API. */ +JEMALLOC_INLINE void +hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) +{ +#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) + hash_x64_128(key, len, seed, (uint64_t *)r_hash); +#else + uint64_t hashes[2]; + hash_x86_128(key, len, seed, hashes); + r_hash[0] = (size_t)hashes[0]; + r_hash[1] = (size_t)hashes[1]; +#endif +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/huge.h b/redis.submodule/jemalloc/include/jemalloc/internal/huge.h new file mode 100644 index 0000000..ece7af9 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/huge.h @@ -0,0 +1,36 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, + tcache_t *tcache); +void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, + bool zero, tcache_t *tcache); +bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, + size_t usize_max, bool zero); +void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache); +#ifdef JEMALLOC_JET +typedef void (huge_dalloc_junk_t)(void *, size_t); +extern huge_dalloc_junk_t *huge_dalloc_junk; +#endif +void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache); +arena_t *huge_aalloc(const void *ptr); +size_t huge_salloc(const void *ptr); +prof_tctx_t *huge_prof_tctx_get(const void *ptr); +void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx); +void huge_prof_tctx_reset(const void *ptr); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/redis.submodule/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in new file mode 100644 index 0000000..654cd08 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in @@ -0,0 +1,1138 @@ +#ifndef JEMALLOC_INTERNAL_H +#define JEMALLOC_INTERNAL_H + +#include "jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +#ifdef JEMALLOC_UTRACE +#include +#endif + +#define JEMALLOC_NO_DEMANGLE +#ifdef JEMALLOC_JET +# define JEMALLOC_N(n) jet_##n +# include "jemalloc/internal/public_namespace.h" +# define JEMALLOC_NO_RENAME +# include "../jemalloc@install_suffix@.h" +# undef JEMALLOC_NO_RENAME +#else +# define JEMALLOC_N(n) @private_namespace@##n +# include "../jemalloc@install_suffix@.h" +#endif +#include "jemalloc/internal/private_namespace.h" + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +static const bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; +static const bool config_fill = +#ifdef JEMALLOC_FILL + true +#else + false +#endif + ; +static const bool config_lazy_lock = +#ifdef JEMALLOC_LAZY_LOCK + true +#else + false +#endif + ; +static const bool config_prof = +#ifdef JEMALLOC_PROF + true +#else + false +#endif + ; +static const bool config_prof_libgcc = +#ifdef JEMALLOC_PROF_LIBGCC + true +#else + false +#endif + ; +static const bool config_prof_libunwind = +#ifdef JEMALLOC_PROF_LIBUNWIND + true +#else + false +#endif + ; +static const bool maps_coalesce = +#ifdef JEMALLOC_MAPS_COALESCE + true +#else + false +#endif + ; +static const bool config_munmap = +#ifdef JEMALLOC_MUNMAP + true +#else + false +#endif + ; +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; +static const bool config_tcache = +#ifdef JEMALLOC_TCACHE + true +#else + false +#endif + ; +static const bool config_tls = +#ifdef JEMALLOC_TLS + true +#else + false +#endif + ; +static const bool config_utrace = +#ifdef JEMALLOC_UTRACE + true +#else + false +#endif + ; +static const bool config_valgrind = +#ifdef JEMALLOC_VALGRIND + true +#else + false +#endif + ; +static const bool config_xmalloc = +#ifdef JEMALLOC_XMALLOC + true +#else + false +#endif + ; +static const bool config_ivsalloc = +#ifdef JEMALLOC_IVSALLOC + true +#else + false +#endif + ; +static const bool config_cache_oblivious = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + true +#else + false +#endif + ; + +#ifdef JEMALLOC_C11ATOMICS +#include +#endif + +#ifdef JEMALLOC_ATOMIC9 +#include +#endif + +#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) +#include +#endif + +#ifdef JEMALLOC_ZONE +#include +#include +#include +#include +#endif + +#define RB_COMPACT +#include "jemalloc/internal/rb.h" +#include "jemalloc/internal/qr.h" +#include "jemalloc/internal/ql.h" + +/* + * jemalloc can conceptually be broken into components (arena, tcache, etc.), + * but there are circular dependencies that cannot be broken without + * substantial performance degradation. In order to reduce the effect on + * visual code flow, read the header files in multiple passes, with one of the + * following cpp variables defined during each pass: + * + * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data + * types. + * JEMALLOC_H_STRUCTS : Data structures. + * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. + * JEMALLOC_H_INLINES : Inline functions. + */ +/******************************************************************************/ +#define JEMALLOC_H_TYPES + +#include "jemalloc/internal/jemalloc_internal_macros.h" + +/* Size class index type. */ +typedef unsigned szind_t; + +/* + * Flags bits: + * + * a: arena + * t: tcache + * 0: unused + * z: zero + * n: alignment + * + * aaaaaaaa aaaatttt tttttttt 0znnnnnn + */ +#define MALLOCX_ARENA_MASK ((int)~0xfffff) +#define MALLOCX_ARENA_MAX 0xffe +#define MALLOCX_TCACHE_MASK ((int)~0xfff000ffU) +#define MALLOCX_TCACHE_MAX 0xffd +#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) +/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ +#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ + (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) +#define MALLOCX_ALIGN_GET(flags) \ + (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) +#define MALLOCX_ZERO_GET(flags) \ + ((bool)(flags & MALLOCX_ZERO)) + +#define MALLOCX_TCACHE_GET(flags) \ + (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2) +#define MALLOCX_ARENA_GET(flags) \ + (((unsigned)(((unsigned)flags) >> 20)) - 1) + +/* Smallest size class to support. */ +#define TINY_MIN (1U << LG_TINY_MIN) + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +#ifndef LG_QUANTUM +# if (defined(__i386__) || defined(_M_IX86)) +# define LG_QUANTUM 4 +# endif +# ifdef __ia64__ +# define LG_QUANTUM 4 +# endif +# ifdef __alpha__ +# define LG_QUANTUM 4 +# endif +# if (defined(__sparc64__) || defined(__sparcv9)) +# define LG_QUANTUM 4 +# endif +# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) +# define LG_QUANTUM 4 +# endif +# ifdef __arm__ +# define LG_QUANTUM 3 +# endif +# ifdef __aarch64__ +# define LG_QUANTUM 4 +# endif +# ifdef __hppa__ +# define LG_QUANTUM 4 +# endif +# ifdef __mips__ +# define LG_QUANTUM 3 +# endif +# ifdef __or1k__ +# define LG_QUANTUM 3 +# endif +# ifdef __powerpc__ +# define LG_QUANTUM 4 +# endif +# ifdef __s390__ +# define LG_QUANTUM 4 +# endif +# ifdef __SH4__ +# define LG_QUANTUM 4 +# endif +# ifdef __tile__ +# define LG_QUANTUM 4 +# endif +# ifdef __le32__ +# define LG_QUANTUM 4 +# endif +# ifndef LG_QUANTUM +# error "Unknown minimum alignment for architecture; specify via " + "--with-lg-quantum" +# endif +#endif + +#define QUANTUM ((size_t)(1U << LG_QUANTUM)) +#define QUANTUM_MASK (QUANTUM - 1) + +/* Return the smallest quantum multiple that is >= a. */ +#define QUANTUM_CEILING(a) \ + (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) + +#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) +#define LONG_MASK (LONG - 1) + +/* Return the smallest long multiple that is >= a. */ +#define LONG_CEILING(a) \ + (((a) + LONG_MASK) & ~LONG_MASK) + +#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) +#define PTR_MASK (SIZEOF_PTR - 1) + +/* Return the smallest (void *) multiple that is >= a. */ +#define PTR_CEILING(a) \ + (((a) + PTR_MASK) & ~PTR_MASK) + +/* + * Maximum size of L1 cache line. This is used to avoid cache line aliasing. + * In addition, this controls the spacing of cacheline-spaced size classes. + * + * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can + * only handle raw constants. + */ +#define LG_CACHELINE 6 +#define CACHELINE 64 +#define CACHELINE_MASK (CACHELINE - 1) + +/* Return the smallest cacheline multiple that is >= s. */ +#define CACHELINE_CEILING(s) \ + (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) + +/* Page size. LG_PAGE is determined by the configure script. */ +#ifdef PAGE_MASK +# undef PAGE_MASK +#endif +#define PAGE ((size_t)(1U << LG_PAGE)) +#define PAGE_MASK ((size_t)(PAGE - 1)) + +/* Return the page base address for the page containing address a. */ +#define PAGE_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~PAGE_MASK)) + +/* Return the smallest pagesize multiple that is >= s. */ +#define PAGE_CEILING(s) \ + (((s) + PAGE_MASK) & ~PAGE_MASK) + +/* Return the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2BASE(a, alignment) \ + ((void *)((uintptr_t)(a) & (-(alignment)))) + +/* Return the offset between a and the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ + ((size_t)((uintptr_t)(a) & (alignment - 1))) + +/* Return the smallest alignment multiple that is >= s. */ +#define ALIGNMENT_CEILING(s, alignment) \ + (((s) + (alignment - 1)) & (-(alignment))) + +/* Declare a variable-length array. */ +#if __STDC_VERSION__ < 199901L +# ifdef _MSC_VER +# include +# define alloca _alloca +# else +# ifdef JEMALLOC_HAS_ALLOCA_H +# include +# else +# include +# endif +# endif +# define VARIABLE_ARRAY(type, name, count) \ + type *name = alloca(sizeof(type) * (count)) +#else +# define VARIABLE_ARRAY(type, name, count) type name[(count)] +#endif + +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/arena.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" + +#undef JEMALLOC_H_TYPES +/******************************************************************************/ +#define JEMALLOC_H_STRUCTS + +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/bitmap.h" +#define JEMALLOC_ARENA_STRUCTS_A +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_STRUCTS_A +#include "jemalloc/internal/extent.h" +#define JEMALLOC_ARENA_STRUCTS_B +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_STRUCTS_B +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" + +#include "jemalloc/internal/tsd.h" + +#undef JEMALLOC_H_STRUCTS +/******************************************************************************/ +#define JEMALLOC_H_EXTERNS + +extern bool opt_abort; +extern const char *opt_junk; +extern bool opt_junk_alloc; +extern bool opt_junk_free; +extern size_t opt_quarantine; +extern bool opt_redzone; +extern bool opt_utrace; +extern bool opt_xmalloc; +extern bool opt_zero; +extern size_t opt_narenas; + +extern bool in_valgrind; + +/* Number of CPUs. */ +extern unsigned ncpus; + +/* + * index2size_tab encodes the same information as could be computed (at + * unacceptable cost in some code paths) by index2size_compute(). + */ +extern size_t const index2size_tab[NSIZES]; +/* + * size2index_tab is a compact lookup table that rounds request sizes up to + * size classes. In order to reduce cache footprint, the table is compressed, + * and all accesses are via size2index(). + */ +extern uint8_t const size2index_tab[]; + +arena_t *a0get(void); +void *a0malloc(size_t size); +void a0dalloc(void *ptr); +void *bootstrap_malloc(size_t size); +void *bootstrap_calloc(size_t num, size_t size); +void bootstrap_free(void *ptr); +arena_t *arenas_extend(unsigned ind); +arena_t *arena_init(unsigned ind); +unsigned narenas_total_get(void); +arena_t *arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing); +arena_t *arena_choose_hard(tsd_t *tsd); +void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); +unsigned arena_nbound(unsigned ind); +void thread_allocated_cleanup(tsd_t *tsd); +void thread_deallocated_cleanup(tsd_t *tsd); +void arena_cleanup(tsd_t *tsd); +void arenas_cache_cleanup(tsd_t *tsd); +void narenas_cache_cleanup(tsd_t *tsd); +void arenas_cache_bypass_cleanup(tsd_t *tsd); +void jemalloc_prefork(void); +void jemalloc_postfork_parent(void); +void jemalloc_postfork_child(void); + +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/arena.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" +#include "jemalloc/internal/tsd.h" + +#undef JEMALLOC_H_EXTERNS +/******************************************************************************/ +#define JEMALLOC_H_INLINES + +#include "jemalloc/internal/valgrind.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" + +#ifndef JEMALLOC_ENABLE_INLINE +szind_t size2index_compute(size_t size); +szind_t size2index_lookup(size_t size); +szind_t size2index(size_t size); +size_t index2size_compute(szind_t index); +size_t index2size_lookup(szind_t index); +size_t index2size(szind_t index); +size_t s2u_compute(size_t size); +size_t s2u_lookup(size_t size); +size_t s2u(size_t size); +size_t sa2u(size_t size, size_t alignment); +arena_t *arena_choose(tsd_t *tsd, arena_t *arena); +arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing, + bool refresh_if_missing); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) +JEMALLOC_INLINE szind_t +size2index_compute(size_t size) +{ + +#if (NTBINS != 0) + if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { + size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; + size_t lg_ceil = lg_floor(pow2_ceil(size)); + return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); + } +#endif + { + size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ? + (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1)) + : lg_floor((size<<1)-1); + size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : + x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); + size_t grp = shift << LG_SIZE_CLASS_GROUP; + + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; + + size_t delta_inverse_mask = ZI(-1) << lg_delta; + size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + size_t index = NTBINS + grp + mod; + return (index); + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +size2index_lookup(size_t size) +{ + + assert(size <= LOOKUP_MAXCLASS); + { + size_t ret = ((size_t)(size2index_tab[(size-1) >> + LG_TINY_MIN])); + assert(ret == size2index_compute(size)); + return (ret); + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +size2index(size_t size) +{ + + assert(size > 0); + if (likely(size <= LOOKUP_MAXCLASS)) + return (size2index_lookup(size)); + return (size2index_compute(size)); +} + +JEMALLOC_INLINE size_t +index2size_compute(szind_t index) +{ + +#if (NTBINS > 0) + if (index < NTBINS) + return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index)); +#endif + { + size_t reduced_index = index - NTBINS; + size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP; + size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) - + 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_QUANTUM + + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_QUANTUM-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t usize = grp_size + mod_size; + return (usize); + } +} + +JEMALLOC_ALWAYS_INLINE size_t +index2size_lookup(szind_t index) +{ + size_t ret = (size_t)index2size_tab[index]; + assert(ret == index2size_compute(index)); + return (ret); +} + +JEMALLOC_ALWAYS_INLINE size_t +index2size(szind_t index) +{ + + assert(index < NSIZES); + return (index2size_lookup(index)); +} + +JEMALLOC_ALWAYS_INLINE size_t +s2u_compute(size_t size) +{ + +#if (NTBINS > 0) + if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { + size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; + size_t lg_ceil = lg_floor(pow2_ceil(size)); + return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : + (ZU(1) << lg_ceil)); + } +#endif + { + size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ? + (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1)) + : lg_floor((size<<1)-1); + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (size + delta_mask) & ~delta_mask; + return (usize); + } +} + +JEMALLOC_ALWAYS_INLINE size_t +s2u_lookup(size_t size) +{ + size_t ret = index2size_lookup(size2index_lookup(size)); + + assert(ret == s2u_compute(size)); + return (ret); +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size. + */ +JEMALLOC_ALWAYS_INLINE size_t +s2u(size_t size) +{ + + assert(size > 0); + if (likely(size <= LOOKUP_MAXCLASS)) + return (s2u_lookup(size)); + return (s2u_compute(size)); +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size and alignment. + */ +JEMALLOC_ALWAYS_INLINE size_t +sa2u(size_t size, size_t alignment) +{ + size_t usize; + + assert(alignment != 0 && ((alignment - 1) & alignment) == 0); + + /* Try for a small size class. */ + if (size <= SMALL_MAXCLASS && alignment < PAGE) { + /* + * Round size up to the nearest multiple of alignment. + * + * This done, we can take advantage of the fact that for each + * small size class, every object is aligned at the smallest + * power of two that is non-zero in the base two representation + * of the size. For example: + * + * Size | Base 2 | Minimum alignment + * -----+----------+------------------ + * 96 | 1100000 | 32 + * 144 | 10100000 | 32 + * 192 | 11000000 | 64 + */ + usize = s2u(ALIGNMENT_CEILING(size, alignment)); + if (usize < LARGE_MINCLASS) + return (usize); + } + + /* Try for a large size class. */ + if (likely(size <= large_maxclass) && likely(alignment < chunksize)) { + /* + * We can't achieve subpage alignment, so round up alignment + * to the minimum that can actually be supported. + */ + alignment = PAGE_CEILING(alignment); + + /* Make sure result is a large size class. */ + usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size); + + /* + * Calculate the size of the over-size run that arena_palloc() + * would need to allocate in order to guarantee the alignment. + */ + if (usize + large_pad + alignment - PAGE <= arena_maxrun) + return (usize); + } + + /* Huge size class. Beware of size_t overflow. */ + + /* + * We can't achieve subchunk alignment, so round up alignment to the + * minimum that can actually be supported. + */ + alignment = CHUNK_CEILING(alignment); + if (alignment == 0) { + /* size_t overflow. */ + return (0); + } + + /* Make sure result is a huge size class. */ + if (size <= chunksize) + usize = chunksize; + else { + usize = s2u(size); + if (usize < size) { + /* size_t overflow. */ + return (0); + } + } + + /* + * Calculate the multi-chunk mapping that huge_palloc() would need in + * order to guarantee the alignment. + */ + if (usize + alignment - PAGE < usize) { + /* size_t overflow. */ + return (0); + } + return (usize); +} + +/* Choose an arena based on a per-thread value. */ +JEMALLOC_INLINE arena_t * +arena_choose(tsd_t *tsd, arena_t *arena) +{ + arena_t *ret; + + if (arena != NULL) + return (arena); + + if (unlikely((ret = tsd_arena_get(tsd)) == NULL)) + ret = arena_choose_hard(tsd); + + return (ret); +} + +JEMALLOC_INLINE arena_t * +arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing, + bool refresh_if_missing) +{ + arena_t *arena; + arena_t **arenas_cache = tsd_arenas_cache_get(tsd); + + /* init_if_missing requires refresh_if_missing. */ + assert(!init_if_missing || refresh_if_missing); + + if (unlikely(arenas_cache == NULL)) { + /* arenas_cache hasn't been initialized yet. */ + return (arena_get_hard(tsd, ind, init_if_missing)); + } + if (unlikely(ind >= tsd_narenas_cache_get(tsd))) { + /* + * ind is invalid, cache is old (too small), or arena to be + * initialized. + */ + return (refresh_if_missing ? arena_get_hard(tsd, ind, + init_if_missing) : NULL); + } + arena = arenas_cache[ind]; + if (likely(arena != NULL) || !refresh_if_missing) + return (arena); + return (arena_get_hard(tsd, ind, init_if_missing)); +} +#endif + +#include "jemalloc/internal/bitmap.h" +/* + * Include portions of arena.h interleaved with tcache.h in order to resolve + * circular dependencies. + */ +#define JEMALLOC_ARENA_INLINE_A +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_INLINE_A +#include "jemalloc/internal/tcache.h" +#define JEMALLOC_ARENA_INLINE_B +#include "jemalloc/internal/arena.h" +#undef JEMALLOC_ARENA_INLINE_B +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" + +#ifndef JEMALLOC_ENABLE_INLINE +arena_t *iaalloc(const void *ptr); +size_t isalloc(const void *ptr, bool demote); +void *iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, + bool is_metadata, arena_t *arena); +void *imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena); +void *imalloc(tsd_t *tsd, size_t size); +void *icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena); +void *icalloc(tsd_t *tsd, size_t size); +void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, bool is_metadata, arena_t *arena); +void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena); +void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero); +size_t ivsalloc(const void *ptr, bool demote); +size_t u2rz(size_t usize); +size_t p2rz(const void *ptr); +void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata); +void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache); +void idalloc(tsd_t *tsd, void *ptr); +void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache); +void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache); +void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache); +void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero, tcache_t *tcache, + arena_t *arena); +void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); +void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t alignment, bool zero); +bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) +JEMALLOC_ALWAYS_INLINE arena_t * +iaalloc(const void *ptr) +{ + + assert(ptr != NULL); + + return (arena_aalloc(ptr)); +} + +/* + * Typical usage: + * void *ptr = [...] + * size_t sz = isalloc(ptr, config_prof); + */ +JEMALLOC_ALWAYS_INLINE size_t +isalloc(const void *ptr, bool demote) +{ + + assert(ptr != NULL); + /* Demotion only makes sense if config_prof is true. */ + assert(config_prof || !demote); + + return (arena_salloc(ptr, demote)); +} + +JEMALLOC_ALWAYS_INLINE void * +iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata, + arena_t *arena) +{ + void *ret; + + assert(size != 0); + + ret = arena_malloc(tsd, arena, size, zero, tcache); + if (config_stats && is_metadata && likely(ret != NULL)) { + arena_metadata_allocated_add(iaalloc(ret), isalloc(ret, + config_prof)); + } + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena) +{ + + return (iallocztm(tsd, size, false, tcache, false, arena)); +} + +JEMALLOC_ALWAYS_INLINE void * +imalloc(tsd_t *tsd, size_t size) +{ + + return (iallocztm(tsd, size, false, tcache_get(tsd, true), false, NULL)); +} + +JEMALLOC_ALWAYS_INLINE void * +icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena) +{ + + return (iallocztm(tsd, size, true, tcache, false, arena)); +} + +JEMALLOC_ALWAYS_INLINE void * +icalloc(tsd_t *tsd, size_t size) +{ + + return (iallocztm(tsd, size, true, tcache_get(tsd, true), false, NULL)); +} + +JEMALLOC_ALWAYS_INLINE void * +ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, bool is_metadata, arena_t *arena) +{ + void *ret; + + assert(usize != 0); + assert(usize == sa2u(usize, alignment)); + + ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache); + assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); + if (config_stats && is_metadata && likely(ret != NULL)) { + arena_metadata_allocated_add(iaalloc(ret), isalloc(ret, + config_prof)); + } + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena) +{ + + return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena)); +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) +{ + + return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd, + NULL), false, NULL)); +} + +JEMALLOC_ALWAYS_INLINE size_t +ivsalloc(const void *ptr, bool demote) +{ + extent_node_t *node; + + /* Return 0 if ptr is not within a chunk managed by jemalloc. */ + node = chunk_lookup(ptr, false); + if (node == NULL) + return (0); + /* Only arena chunks should be looked up via interior pointers. */ + assert(extent_node_addr_get(node) == ptr || + extent_node_achunk_get(node)); + + return (isalloc(ptr, demote)); +} + +JEMALLOC_INLINE size_t +u2rz(size_t usize) +{ + size_t ret; + + if (usize <= SMALL_MAXCLASS) { + szind_t binind = size2index(usize); + ret = arena_bin_info[binind].redzone_size; + } else + ret = 0; + + return (ret); +} + +JEMALLOC_INLINE size_t +p2rz(const void *ptr) +{ + size_t usize = isalloc(ptr, false); + + return (u2rz(usize)); +} + +JEMALLOC_ALWAYS_INLINE void +idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata) +{ + + assert(ptr != NULL); + if (config_stats && is_metadata) { + arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr, + config_prof)); + } + + arena_dalloc(tsd, ptr, tcache); +} + +JEMALLOC_ALWAYS_INLINE void +idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache) +{ + + idalloctm(tsd, ptr, tcache, false); +} + +JEMALLOC_ALWAYS_INLINE void +idalloc(tsd_t *tsd, void *ptr) +{ + + idalloctm(tsd, ptr, tcache_get(tsd, false), false); +} + +JEMALLOC_ALWAYS_INLINE void +iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) +{ + + if (config_fill && unlikely(opt_quarantine)) + quarantine(tsd, ptr); + else + idalloctm(tsd, ptr, tcache, false); +} + +JEMALLOC_ALWAYS_INLINE void +isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) +{ + + arena_sdalloc(tsd, ptr, size, tcache); +} + +JEMALLOC_ALWAYS_INLINE void +isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) +{ + + if (config_fill && unlikely(opt_quarantine)) + quarantine(tsd, ptr); + else + isdalloct(tsd, ptr, size, tcache); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) +{ + void *p; + size_t usize, copysize; + + usize = sa2u(size + extra, alignment); + if (usize == 0) + return (NULL); + p = ipalloct(tsd, usize, alignment, zero, tcache, arena); + if (p == NULL) { + if (extra == 0) + return (NULL); + /* Try again, without extra this time. */ + usize = sa2u(size, alignment); + if (usize == 0) + return (NULL); + p = ipalloct(tsd, usize, alignment, zero, tcache, arena); + if (p == NULL) + return (NULL); + } + /* + * Copy at most size bytes (not size+extra), since the caller has no + * expectation that the extra bytes will be reliably preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + memcpy(p, ptr, copysize); + isqalloc(tsd, ptr, oldsize, tcache); + return (p); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero, tcache_t *tcache, arena_t *arena) +{ + + assert(ptr != NULL); + assert(size != 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* + * Existing object alignment is inadequate; allocate new space + * and copy. + */ + return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment, + zero, tcache, arena)); + } + + return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero, + tcache)); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero) +{ + + return (iralloct(tsd, ptr, oldsize, size, alignment, zero, + tcache_get(tsd, true), NULL)); +} + +JEMALLOC_ALWAYS_INLINE bool +ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, + bool zero) +{ + + assert(ptr != NULL); + assert(size != 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* Existing object alignment is inadequate. */ + return (true); + } + + return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero)); +} +#endif + +#include "jemalloc/internal/prof.h" + +#undef JEMALLOC_H_INLINES +/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_H */ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/redis.submodule/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h new file mode 100644 index 0000000..a601d6e --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h @@ -0,0 +1,64 @@ +#ifndef JEMALLOC_INTERNAL_DECLS_H +#define JEMALLOC_INTERNAL_DECLS_H + +#include +#ifdef _WIN32 +# include +# include "msvc_compat/windows_extra.h" + +#else +# include +# include +# if !defined(__pnacl__) && !defined(__native_client__) +# include +# if !defined(SYS_write) && defined(__NR_write) +# define SYS_write __NR_write +# endif +# include +# endif +# include +# include +#endif +#include + +#include +#ifndef SIZE_T_MAX +# define SIZE_T_MAX SIZE_MAX +#endif +#include +#include +#include +#include +#include +#include +#ifndef offsetof +# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) +#endif +#include +#include +#include +#ifdef _MSC_VER +# include +typedef intptr_t ssize_t; +# define PATH_MAX 1024 +# define STDERR_FILENO 2 +# define __func__ __FUNCTION__ +# ifdef JEMALLOC_HAS_RESTRICT +# define restrict __restrict +# endif +/* Disable warnings about deprecated system functions. */ +# pragma warning(disable: 4996) +#if _MSC_VER < 1800 +static int +isblank(int c) +{ + + return (c == '\t' || c == ' '); +} +#endif +#else +# include +#endif +#include + +#endif /* JEMALLOC_INTERNAL_H */ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in b/redis.submodule/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in new file mode 100644 index 0000000..b0f8caa --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -0,0 +1,262 @@ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +#undef JEMALLOC_PREFIX +#undef JEMALLOC_CPREFIX + +/* + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#undef JEMALLOC_PRIVATE_NAMESPACE + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ +#undef CPU_SPINWAIT + +/* Defined if C11 atomics are available. */ +#undef JEMALLOC_C11ATOMICS + +/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ +#undef JEMALLOC_ATOMIC9 + +/* + * Defined if OSAtomic*() functions are available, as provided by Darwin, and + * documented in the atomic(3) manual page. + */ +#undef JEMALLOC_OSATOMIC + +/* + * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and + * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 + +/* + * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and + * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +#undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 + +/* + * Defined if __builtin_clz() and __builtin_clzl() are available. + */ +#undef JEMALLOC_HAVE_BUILTIN_CLZ + +/* + * Defined if madvise(2) is available. + */ +#undef JEMALLOC_HAVE_MADVISE + +/* + * Defined if OSSpin*() functions are available, as provided by Darwin, and + * documented in the spinlock(3) manual page. + */ +#undef JEMALLOC_OSSPIN + +/* + * Defined if secure_getenv(3) is available. + */ +#undef JEMALLOC_HAVE_SECURE_GETENV + +/* + * Defined if issetugid(2) is available. + */ +#undef JEMALLOC_HAVE_ISSETUGID + +/* + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +#undef JEMALLOC_MALLOC_THREAD_CLEANUP + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ +#undef JEMALLOC_THREADED_INIT + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +#undef JEMALLOC_MUTEX_INIT_CB + +/* Non-empty if the tls_model attribute is supported. */ +#undef JEMALLOC_TLS_MODEL + +/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ +#undef JEMALLOC_CC_SILENCE + +/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ +#undef JEMALLOC_CODE_COVERAGE + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +#undef JEMALLOC_DEBUG + +/* JEMALLOC_STATS enables statistics calculation. */ +#undef JEMALLOC_STATS + +/* JEMALLOC_PROF enables allocation profiling. */ +#undef JEMALLOC_PROF + +/* Use libunwind for profile backtracing if defined. */ +#undef JEMALLOC_PROF_LIBUNWIND + +/* Use libgcc for profile backtracing if defined. */ +#undef JEMALLOC_PROF_LIBGCC + +/* Use gcc intrinsics for profile backtracing if defined. */ +#undef JEMALLOC_PROF_GCC + +/* + * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. + * This makes it possible to allocate/deallocate objects without any locking + * when the cache is in the steady state. + */ +#undef JEMALLOC_TCACHE + +/* + * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage + * segment (DSS). + */ +#undef JEMALLOC_DSS + +/* Support memory filling (junk/zero/quarantine/redzone). */ +#undef JEMALLOC_FILL + +/* Support utrace(2)-based tracing. */ +#undef JEMALLOC_UTRACE + +/* Support Valgrind. */ +#undef JEMALLOC_VALGRIND + +/* Support optional abort() on OOM. */ +#undef JEMALLOC_XMALLOC + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +#undef JEMALLOC_LAZY_LOCK + +/* Minimum size class to support is 2^LG_TINY_MIN bytes. */ +#undef LG_TINY_MIN + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +#undef LG_QUANTUM + +/* One page is 2^LG_PAGE bytes. */ +#undef LG_PAGE + +/* + * If defined, adjacent virtual memory mappings with identical attributes + * automatically coalesce, and they fragment when changes are made to subranges. + * This is the normal order of things for mmap()/munmap(), but on Windows + * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. + * mappings do *not* coalesce/fragment. + */ +#undef JEMALLOC_MAPS_COALESCE + +/* + * If defined, use munmap() to unmap freed chunks, rather than storing them for + * later reuse. This is disabled by default on Linux because common sequences + * of mmap()/munmap() calls will cause virtual memory map holes. + */ +#undef JEMALLOC_MUNMAP + +/* TLS is used to map arenas and magazine caches to threads. */ +#undef JEMALLOC_TLS + +/* + * ffs()/ffsl() functions to use for bitmapping. Don't use these directly; + * instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h. + */ +#undef JEMALLOC_INTERNAL_FFSL +#undef JEMALLOC_INTERNAL_FFS + +/* + * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside + * within jemalloc-owned chunks before dereferencing them. + */ +#undef JEMALLOC_IVSALLOC + +/* + * If defined, explicitly attempt to more uniformly distribute large allocation + * pointer alignments across all cache indices. + */ +#undef JEMALLOC_CACHE_OBLIVIOUS + +/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +#undef JEMALLOC_ZONE +#undef JEMALLOC_ZONE_VERSION + +/* + * Methods for purging unused pages differ between operating systems. + * + * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, + * such that new pages will be demand-zeroed if + * the address region is later touched. + * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being + * unused, such that they will be discarded rather + * than swapped out. + */ +#undef JEMALLOC_PURGE_MADVISE_DONTNEED +#undef JEMALLOC_PURGE_MADVISE_FREE + +/* Define if operating system has alloca.h header. */ +#undef JEMALLOC_HAS_ALLOCA_H + +/* C99 restrict keyword supported. */ +#undef JEMALLOC_HAS_RESTRICT + +/* For use by hash code. */ +#undef JEMALLOC_BIG_ENDIAN + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#undef LG_SIZEOF_INT + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#undef LG_SIZEOF_LONG + +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#undef LG_SIZEOF_INTMAX_T + +/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ +#undef JEMALLOC_GLIBC_MALLOC_HOOK + +/* glibc memalign hook. */ +#undef JEMALLOC_GLIBC_MEMALIGN_HOOK + +/* Adaptive mutex support in pthreads. */ +#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP + +/* + * If defined, jemalloc symbols are not exported (doesn't work when + * JEMALLOC_PREFIX is not defined). + */ +#undef JEMALLOC_EXPORT + +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/redis.submodule/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h new file mode 100644 index 0000000..a08ba77 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h @@ -0,0 +1,57 @@ +/* + * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for + * functions that are static inline functions if inlining is enabled, and + * single-definition library-private functions if inlining is disabled. + * + * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in + * which case the denoted functions are always static, regardless of whether + * inlining is enabled. + */ +#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE) + /* Disable inlining to make debugging/profiling easier. */ +# define JEMALLOC_ALWAYS_INLINE +# define JEMALLOC_ALWAYS_INLINE_C static +# define JEMALLOC_INLINE +# define JEMALLOC_INLINE_C static +# define inline +#else +# define JEMALLOC_ENABLE_INLINE +# ifdef JEMALLOC_HAVE_ATTR +# define JEMALLOC_ALWAYS_INLINE \ + static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) +# define JEMALLOC_ALWAYS_INLINE_C \ + static inline JEMALLOC_ATTR(always_inline) +# else +# define JEMALLOC_ALWAYS_INLINE static inline +# define JEMALLOC_ALWAYS_INLINE_C static inline +# endif +# define JEMALLOC_INLINE static inline +# define JEMALLOC_INLINE_C static inline +# ifdef _MSC_VER +# define inline _inline +# endif +#endif + +#ifdef JEMALLOC_CC_SILENCE +# define UNUSED JEMALLOC_ATTR(unused) +#else +# define UNUSED +#endif + +#define ZU(z) ((size_t)z) +#define ZI(z) ((ssize_t)z) +#define QU(q) ((uint64_t)q) +#define QI(q) ((int64_t)q) + +#define KZU(z) ZU(z##ULL) +#define KZI(z) ZI(z##LL) +#define KQU(q) QU(q##ULL) +#define KQI(q) QI(q##LL) + +#ifndef __DECONST +# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) +#endif + +#ifndef JEMALLOC_HAS_RESTRICT +# define restrict +#endif diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/mb.h b/redis.submodule/jemalloc/include/jemalloc/internal/mb.h new file mode 100644 index 0000000..3cfa787 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/mb.h @@ -0,0 +1,115 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void mb_write(void); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) +#ifdef __i386__ +/* + * According to the Intel Architecture Software Developer's Manual, current + * processors execute instructions in order from the perspective of other + * processors in a multiprocessor system, but 1) Intel reserves the right to + * change that, and 2) the compiler's optimizer could re-order instructions if + * there weren't some form of barrier. Therefore, even if running on an + * architecture that does not need memory barriers (everything through at least + * i686), an "optimizer barrier" is necessary. + */ +JEMALLOC_INLINE void +mb_write(void) +{ + +# if 0 + /* This is a true memory barrier. */ + asm volatile ("pusha;" + "xor %%eax,%%eax;" + "cpuid;" + "popa;" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +#else + /* + * This is hopefully enough to keep the compiler from reordering + * instructions around this one. + */ + asm volatile ("nop;" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +#endif +} +#elif (defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE void +mb_write(void) +{ + + asm volatile ("sfence" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +#elif defined(__powerpc__) +JEMALLOC_INLINE void +mb_write(void) +{ + + asm volatile ("eieio" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +#elif defined(__sparc64__) +JEMALLOC_INLINE void +mb_write(void) +{ + + asm volatile ("membar #StoreStore" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +#elif defined(__tile__) +JEMALLOC_INLINE void +mb_write(void) +{ + + __sync_synchronize(); +} +#else +/* + * This is much slower than a simple memory barrier, but the semantics of mutex + * unlock make this work. + */ +JEMALLOC_INLINE void +mb_write(void) +{ + malloc_mutex_t mtx; + + malloc_mutex_init(&mtx); + malloc_mutex_lock(&mtx); + malloc_mutex_unlock(&mtx); +} +#endif +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/mutex.h b/redis.submodule/jemalloc/include/jemalloc/internal/mutex.h new file mode 100644 index 0000000..f051f29 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/mutex.h @@ -0,0 +1,111 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct malloc_mutex_s malloc_mutex_t; + +#ifdef _WIN32 +# define MALLOC_MUTEX_INITIALIZER +#elif (defined(JEMALLOC_OSSPIN)) +# define MALLOC_MUTEX_INITIALIZER {0} +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) +# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} +#else +# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \ + defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP +# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} +# else +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT +# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} +# endif +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct malloc_mutex_s { +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + SRWLOCK lock; +# else + CRITICAL_SECTION lock; +# endif +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLock lock; +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) + pthread_mutex_t lock; + malloc_mutex_t *postponed_next; +#else + pthread_mutex_t lock; +#endif +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#ifdef JEMALLOC_LAZY_LOCK +extern bool isthreaded; +#else +# undef isthreaded /* Undo private_namespace.h definition. */ +# define isthreaded true +#endif + +bool malloc_mutex_init(malloc_mutex_t *mutex); +void malloc_mutex_prefork(malloc_mutex_t *mutex); +void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); +void malloc_mutex_postfork_child(malloc_mutex_t *mutex); +bool mutex_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void malloc_mutex_lock(malloc_mutex_t *mutex); +void malloc_mutex_unlock(malloc_mutex_t *mutex); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) +JEMALLOC_INLINE void +malloc_mutex_lock(malloc_mutex_t *mutex) +{ + + if (isthreaded) { +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + AcquireSRWLockExclusive(&mutex->lock); +# else + EnterCriticalSection(&mutex->lock); +# endif +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLockLock(&mutex->lock); +#else + pthread_mutex_lock(&mutex->lock); +#endif + } +} + +JEMALLOC_INLINE void +malloc_mutex_unlock(malloc_mutex_t *mutex) +{ + + if (isthreaded) { +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + ReleaseSRWLockExclusive(&mutex->lock); +# else + LeaveCriticalSection(&mutex->lock); +# endif +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLockUnlock(&mutex->lock); +#else + pthread_mutex_unlock(&mutex->lock); +#endif + } +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/pages.h b/redis.submodule/jemalloc/include/jemalloc/internal/pages.h new file mode 100644 index 0000000..da7eb96 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/pages.h @@ -0,0 +1,26 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *pages_map(void *addr, size_t size); +void pages_unmap(void *addr, size_t size); +void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, + size_t size); +bool pages_commit(void *addr, size_t size); +bool pages_decommit(void *addr, size_t size); +bool pages_purge(void *addr, size_t size); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/private_namespace.sh b/redis.submodule/jemalloc/include/jemalloc/internal/private_namespace.sh new file mode 100755 index 0000000..cd25eb3 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/private_namespace.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +for symbol in `cat $1` ; do + echo "#define ${symbol} JEMALLOC_N(${symbol})" +done diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/private_symbols.txt b/redis.submodule/jemalloc/include/jemalloc/internal/private_symbols.txt new file mode 100644 index 0000000..a90021a --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/private_symbols.txt @@ -0,0 +1,499 @@ +a0dalloc +a0get +a0malloc +arena_aalloc +arena_alloc_junk_small +arena_bin_index +arena_bin_info +arena_bitselm_get +arena_boot +arena_choose +arena_choose_hard +arena_chunk_alloc_huge +arena_chunk_cache_maybe_insert +arena_chunk_cache_maybe_remove +arena_chunk_dalloc_huge +arena_chunk_ralloc_huge_expand +arena_chunk_ralloc_huge_shrink +arena_chunk_ralloc_huge_similar +arena_cleanup +arena_dalloc +arena_dalloc_bin +arena_dalloc_bin_junked_locked +arena_dalloc_junk_large +arena_dalloc_junk_small +arena_dalloc_large +arena_dalloc_large_junked_locked +arena_dalloc_small +arena_dss_prec_get +arena_dss_prec_set +arena_get +arena_get_hard +arena_init +arena_lg_dirty_mult_default_get +arena_lg_dirty_mult_default_set +arena_lg_dirty_mult_get +arena_lg_dirty_mult_set +arena_malloc +arena_malloc_large +arena_malloc_small +arena_mapbits_allocated_get +arena_mapbits_binind_get +arena_mapbits_decommitted_get +arena_mapbits_dirty_get +arena_mapbits_get +arena_mapbits_internal_set +arena_mapbits_large_binind_set +arena_mapbits_large_get +arena_mapbits_large_set +arena_mapbits_large_size_get +arena_mapbitsp_get +arena_mapbitsp_read +arena_mapbitsp_write +arena_mapbits_size_decode +arena_mapbits_size_encode +arena_mapbits_small_runind_get +arena_mapbits_small_set +arena_mapbits_unallocated_set +arena_mapbits_unallocated_size_get +arena_mapbits_unallocated_size_set +arena_mapbits_unzeroed_get +arena_maxrun +arena_maybe_purge +arena_metadata_allocated_add +arena_metadata_allocated_get +arena_metadata_allocated_sub +arena_migrate +arena_miscelm_get +arena_miscelm_to_pageind +arena_miscelm_to_rpages +arena_nbound +arena_new +arena_node_alloc +arena_node_dalloc +arena_palloc +arena_postfork_child +arena_postfork_parent +arena_prefork +arena_prof_accum +arena_prof_accum_impl +arena_prof_accum_locked +arena_prof_promoted +arena_prof_tctx_get +arena_prof_tctx_reset +arena_prof_tctx_set +arena_ptr_small_binind_get +arena_purge_all +arena_quarantine_junk_small +arena_ralloc +arena_ralloc_junk_large +arena_ralloc_no_move +arena_rd_to_miscelm +arena_redzone_corruption +arena_run_regind +arena_run_to_miscelm +arena_salloc +arenas_cache_bypass_cleanup +arenas_cache_cleanup +arena_sdalloc +arena_stats_merge +arena_tcache_fill_small +atomic_add_p +atomic_add_u +atomic_add_uint32 +atomic_add_uint64 +atomic_add_z +atomic_cas_p +atomic_cas_u +atomic_cas_uint32 +atomic_cas_uint64 +atomic_cas_z +atomic_sub_p +atomic_sub_u +atomic_sub_uint32 +atomic_sub_uint64 +atomic_sub_z +base_alloc +base_boot +base_postfork_child +base_postfork_parent +base_prefork +base_stats_get +bitmap_full +bitmap_get +bitmap_info_init +bitmap_info_ngroups +bitmap_init +bitmap_set +bitmap_sfu +bitmap_size +bitmap_unset +bootstrap_calloc +bootstrap_free +bootstrap_malloc +bt_init +buferror +chunk_alloc_base +chunk_alloc_cache +chunk_alloc_dss +chunk_alloc_mmap +chunk_alloc_wrapper +chunk_boot +chunk_dalloc_arena +chunk_dalloc_cache +chunk_dalloc_mmap +chunk_dalloc_wrapper +chunk_deregister +chunk_dss_boot +chunk_dss_postfork_child +chunk_dss_postfork_parent +chunk_dss_prec_get +chunk_dss_prec_set +chunk_dss_prefork +chunk_hooks_default +chunk_hooks_get +chunk_hooks_set +chunk_in_dss +chunk_lookup +chunk_npages +chunk_postfork_child +chunk_postfork_parent +chunk_prefork +chunk_purge_arena +chunk_purge_wrapper +chunk_register +chunksize +chunksize_mask +chunks_rtree +ckh_count +ckh_delete +ckh_insert +ckh_iter +ckh_new +ckh_pointer_hash +ckh_pointer_keycomp +ckh_remove +ckh_search +ckh_string_hash +ckh_string_keycomp +ctl_boot +ctl_bymib +ctl_byname +ctl_nametomib +ctl_postfork_child +ctl_postfork_parent +ctl_prefork +dss_prec_names +extent_node_achunk_get +extent_node_achunk_set +extent_node_addr_get +extent_node_addr_set +extent_node_arena_get +extent_node_arena_set +extent_node_dirty_insert +extent_node_dirty_linkage_init +extent_node_dirty_remove +extent_node_init +extent_node_prof_tctx_get +extent_node_prof_tctx_set +extent_node_size_get +extent_node_size_set +extent_node_zeroed_get +extent_node_zeroed_set +extent_tree_ad_empty +extent_tree_ad_first +extent_tree_ad_insert +extent_tree_ad_iter +extent_tree_ad_iter_recurse +extent_tree_ad_iter_start +extent_tree_ad_last +extent_tree_ad_new +extent_tree_ad_next +extent_tree_ad_nsearch +extent_tree_ad_prev +extent_tree_ad_psearch +extent_tree_ad_remove +extent_tree_ad_reverse_iter +extent_tree_ad_reverse_iter_recurse +extent_tree_ad_reverse_iter_start +extent_tree_ad_search +extent_tree_szad_empty +extent_tree_szad_first +extent_tree_szad_insert +extent_tree_szad_iter +extent_tree_szad_iter_recurse +extent_tree_szad_iter_start +extent_tree_szad_last +extent_tree_szad_new +extent_tree_szad_next +extent_tree_szad_nsearch +extent_tree_szad_prev +extent_tree_szad_psearch +extent_tree_szad_remove +extent_tree_szad_reverse_iter +extent_tree_szad_reverse_iter_recurse +extent_tree_szad_reverse_iter_start +extent_tree_szad_search +get_errno +hash +hash_fmix_32 +hash_fmix_64 +hash_get_block_32 +hash_get_block_64 +hash_rotl_32 +hash_rotl_64 +hash_x64_128 +hash_x86_128 +hash_x86_32 +huge_aalloc +huge_dalloc +huge_dalloc_junk +huge_malloc +huge_palloc +huge_prof_tctx_get +huge_prof_tctx_reset +huge_prof_tctx_set +huge_ralloc +huge_ralloc_no_move +huge_salloc +iaalloc +iallocztm +icalloc +icalloct +idalloc +idalloct +idalloctm +imalloc +imalloct +index2size +index2size_compute +index2size_lookup +index2size_tab +in_valgrind +ipalloc +ipalloct +ipallocztm +iqalloc +iralloc +iralloct +iralloct_realign +isalloc +isdalloct +isqalloc +isthreaded +ivsalloc +ixalloc +jemalloc_postfork_child +jemalloc_postfork_parent +jemalloc_prefork +large_maxclass +lg_floor +malloc_cprintf +malloc_mutex_init +malloc_mutex_lock +malloc_mutex_postfork_child +malloc_mutex_postfork_parent +malloc_mutex_prefork +malloc_mutex_unlock +malloc_printf +malloc_snprintf +malloc_strtoumax +malloc_tsd_boot0 +malloc_tsd_boot1 +malloc_tsd_cleanup_register +malloc_tsd_dalloc +malloc_tsd_malloc +malloc_tsd_no_cleanup +malloc_vcprintf +malloc_vsnprintf +malloc_write +map_bias +map_misc_offset +mb_write +mutex_boot +narenas_cache_cleanup +narenas_total_get +ncpus +nhbins +opt_abort +opt_dss +opt_junk +opt_junk_alloc +opt_junk_free +opt_lg_chunk +opt_lg_dirty_mult +opt_lg_prof_interval +opt_lg_prof_sample +opt_lg_tcache_max +opt_narenas +opt_prof +opt_prof_accum +opt_prof_active +opt_prof_final +opt_prof_gdump +opt_prof_leak +opt_prof_prefix +opt_prof_thread_active_init +opt_quarantine +opt_redzone +opt_stats_print +opt_tcache +opt_utrace +opt_xmalloc +opt_zero +p2rz +pages_commit +pages_decommit +pages_map +pages_purge +pages_trim +pages_unmap +pow2_ceil +prof_active_get +prof_active_get_unlocked +prof_active_set +prof_alloc_prep +prof_alloc_rollback +prof_backtrace +prof_boot0 +prof_boot1 +prof_boot2 +prof_dump_header +prof_dump_open +prof_free +prof_free_sampled_object +prof_gdump +prof_gdump_get +prof_gdump_get_unlocked +prof_gdump_set +prof_gdump_val +prof_idump +prof_interval +prof_lookup +prof_malloc +prof_malloc_sample_object +prof_mdump +prof_postfork_child +prof_postfork_parent +prof_prefork +prof_realloc +prof_reset +prof_sample_accum_update +prof_sample_threshold_update +prof_tctx_get +prof_tctx_reset +prof_tctx_set +prof_tdata_cleanup +prof_tdata_get +prof_tdata_init +prof_tdata_reinit +prof_thread_active_get +prof_thread_active_init_get +prof_thread_active_init_set +prof_thread_active_set +prof_thread_name_get +prof_thread_name_set +quarantine +quarantine_alloc_hook +quarantine_alloc_hook_work +quarantine_cleanup +register_zone +rtree_child_read +rtree_child_read_hard +rtree_child_tryread +rtree_delete +rtree_get +rtree_new +rtree_node_valid +rtree_set +rtree_start_level +rtree_subkey +rtree_subtree_read +rtree_subtree_read_hard +rtree_subtree_tryread +rtree_val_read +rtree_val_write +s2u +s2u_compute +s2u_lookup +sa2u +set_errno +size2index +size2index_compute +size2index_lookup +size2index_tab +stats_cactive +stats_cactive_add +stats_cactive_get +stats_cactive_sub +stats_print +tcache_alloc_easy +tcache_alloc_large +tcache_alloc_small +tcache_alloc_small_hard +tcache_arena_associate +tcache_arena_dissociate +tcache_arena_reassociate +tcache_bin_flush_large +tcache_bin_flush_small +tcache_bin_info +tcache_boot +tcache_cleanup +tcache_create +tcache_dalloc_large +tcache_dalloc_small +tcache_enabled_cleanup +tcache_enabled_get +tcache_enabled_set +tcache_event +tcache_event_hard +tcache_flush +tcache_get +tcache_get_hard +tcache_maxclass +tcaches +tcache_salloc +tcaches_create +tcaches_destroy +tcaches_flush +tcaches_get +tcache_stats_merge +thread_allocated_cleanup +thread_deallocated_cleanup +tsd_arena_get +tsd_arena_set +tsd_boot +tsd_boot0 +tsd_boot1 +tsd_booted +tsd_cleanup +tsd_cleanup_wrapper +tsd_fetch +tsd_get +tsd_wrapper_get +tsd_wrapper_set +tsd_initialized +tsd_init_check_recursion +tsd_init_finish +tsd_init_head +tsd_nominal +tsd_quarantine_get +tsd_quarantine_set +tsd_set +tsd_tcache_enabled_get +tsd_tcache_enabled_set +tsd_tcache_get +tsd_tcache_set +tsd_tls +tsd_tsd +tsd_prof_tdata_get +tsd_prof_tdata_set +tsd_thread_allocated_get +tsd_thread_allocated_set +tsd_thread_deallocated_get +tsd_thread_deallocated_set +u2rz +valgrind_freelike_block +valgrind_make_mem_defined +valgrind_make_mem_noaccess +valgrind_make_mem_undefined diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/private_unnamespace.sh b/redis.submodule/jemalloc/include/jemalloc/internal/private_unnamespace.sh new file mode 100755 index 0000000..23fed8e --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/private_unnamespace.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +for symbol in `cat $1` ; do + echo "#undef ${symbol}" +done diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/prng.h b/redis.submodule/jemalloc/include/jemalloc/internal/prng.h new file mode 100644 index 0000000..216d0ef --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/prng.h @@ -0,0 +1,60 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * Simple linear congruential pseudo-random number generator: + * + * prng(y) = (a*x + c) % m + * + * where the following constants ensure maximal period: + * + * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. + * c == Odd number (relatively prime to 2^n). + * m == 2^32 + * + * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. + * + * This choice of m has the disadvantage that the quality of the bits is + * proportional to bit position. For example, the lowest bit has a cycle of 2, + * the next has a cycle of 4, etc. For this reason, we prefer to use the upper + * bits. + * + * Macro parameters: + * uint32_t r : Result. + * unsigned lg_range : (0..32], number of least significant bits to return. + * uint32_t state : Seed value. + * const uint32_t a, c : See above discussion. + */ +#define prng32(r, lg_range, state, a, c) do { \ + assert((lg_range) > 0); \ + assert((lg_range) <= 32); \ + \ + r = (state * (a)) + (c); \ + state = r; \ + r >>= (32 - (lg_range)); \ +} while (false) + +/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */ +#define prng64(r, lg_range, state, a, c) do { \ + assert((lg_range) > 0); \ + assert((lg_range) <= 64); \ + \ + r = (state * (a)) + (c); \ + state = r; \ + r >>= (64 - (lg_range)); \ +} while (false) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/prof.h b/redis.submodule/jemalloc/include/jemalloc/internal/prof.h new file mode 100644 index 0000000..e5198c3 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/prof.h @@ -0,0 +1,545 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct prof_bt_s prof_bt_t; +typedef struct prof_cnt_s prof_cnt_t; +typedef struct prof_tctx_s prof_tctx_t; +typedef struct prof_gctx_s prof_gctx_t; +typedef struct prof_tdata_s prof_tdata_t; + +/* Option defaults. */ +#ifdef JEMALLOC_PROF +# define PROF_PREFIX_DEFAULT "jeprof" +#else +# define PROF_PREFIX_DEFAULT "" +#endif +#define LG_PROF_SAMPLE_DEFAULT 19 +#define LG_PROF_INTERVAL_DEFAULT -1 + +/* + * Hard limit on stack backtrace depth. The version of prof_backtrace() that + * is based on __builtin_return_address() necessarily has a hard-coded number + * of backtrace frame handlers, and should be kept in sync with this setting. + */ +#define PROF_BT_MAX 128 + +/* Initial hash table size. */ +#define PROF_CKH_MINITEMS 64 + +/* Size of memory buffer to use when writing dump files. */ +#define PROF_DUMP_BUFSIZE 65536 + +/* Size of stack-allocated buffer used by prof_printf(). */ +#define PROF_PRINTF_BUFSIZE 128 + +/* + * Number of mutexes shared among all gctx's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NCTX_LOCKS 1024 + +/* + * Number of mutexes shared among all tdata's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NTDATA_LOCKS 256 + +/* + * prof_tdata pointers close to NULL are used to encode state information that + * is used for cleaning up during thread shutdown. + */ +#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) +#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) +#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct prof_bt_s { + /* Backtrace, stored as len program counters. */ + void **vec; + unsigned len; +}; + +#ifdef JEMALLOC_PROF_LIBGCC +/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ +typedef struct { + prof_bt_t *bt; + unsigned max; +} prof_unwind_data_t; +#endif + +struct prof_cnt_s { + /* Profiling counters. */ + uint64_t curobjs; + uint64_t curbytes; + uint64_t accumobjs; + uint64_t accumbytes; +}; + +typedef enum { + prof_tctx_state_initializing, + prof_tctx_state_nominal, + prof_tctx_state_dumping, + prof_tctx_state_purgatory /* Dumper must finish destroying. */ +} prof_tctx_state_t; + +struct prof_tctx_s { + /* Thread data for thread that performed the allocation. */ + prof_tdata_t *tdata; + + /* + * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be + * defunct during teardown. + */ + uint64_t thr_uid; + uint64_t thr_discrim; + + /* Profiling counters, protected by tdata->lock. */ + prof_cnt_t cnts; + + /* Associated global context. */ + prof_gctx_t *gctx; + + /* + * UID that distinguishes multiple tctx's created by the same thread, + * but coexisting in gctx->tctxs. There are two ways that such + * coexistence can occur: + * - A dumper thread can cause a tctx to be retained in the purgatory + * state. + * - Although a single "producer" thread must create all tctx's which + * share the same thr_uid, multiple "consumers" can each concurrently + * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only + * gets called once each time cnts.cur{objs,bytes} drop to 0, but this + * threshold can be hit again before the first consumer finishes + * executing prof_tctx_destroy(). + */ + uint64_t tctx_uid; + + /* Linkage into gctx's tctxs. */ + rb_node(prof_tctx_t) tctx_link; + + /* + * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents + * sample vs destroy race. + */ + bool prepared; + + /* Current dump-related state, protected by gctx->lock. */ + prof_tctx_state_t state; + + /* + * Copy of cnts snapshotted during early dump phase, protected by + * dump_mtx. + */ + prof_cnt_t dump_cnts; +}; +typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; + +struct prof_gctx_s { + /* Protects nlimbo, cnt_summed, and tctxs. */ + malloc_mutex_t *lock; + + /* + * Number of threads that currently cause this gctx to be in a state of + * limbo due to one of: + * - Initializing this gctx. + * - Initializing per thread counters associated with this gctx. + * - Preparing to destroy this gctx. + * - Dumping a heap profile that includes this gctx. + * nlimbo must be 1 (single destroyer) in order to safely destroy the + * gctx. + */ + unsigned nlimbo; + + /* + * Tree of profile counters, one for each thread that has allocated in + * this context. + */ + prof_tctx_tree_t tctxs; + + /* Linkage for tree of contexts to be dumped. */ + rb_node(prof_gctx_t) dump_link; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* Associated backtrace. */ + prof_bt_t bt; + + /* Backtrace vector, variable size, referred to by bt. */ + void *vec[1]; +}; +typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; + +struct prof_tdata_s { + malloc_mutex_t *lock; + + /* Monotonically increasing unique thread identifier. */ + uint64_t thr_uid; + + /* + * Monotonically increasing discriminator among tdata structures + * associated with the same thr_uid. + */ + uint64_t thr_discrim; + + /* Included in heap profile dumps if non-NULL. */ + char *thread_name; + + bool attached; + bool expired; + + rb_node(prof_tdata_t) tdata_link; + + /* + * Counter used to initialize prof_tctx_t's tctx_uid. No locking is + * necessary when incrementing this field, because only one thread ever + * does so. + */ + uint64_t tctx_uid_next; + + /* + * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks + * backtraces for which it has non-zero allocation/deallocation counters + * associated with thread-specific prof_tctx_t objects. Other threads + * may write to prof_tctx_t contents when freeing associated objects. + */ + ckh_t bt2tctx; + + /* Sampling state. */ + uint64_t prng_state; + uint64_t bytes_until_sample; + + /* State used to avoid dumping while operating on prof internals. */ + bool enq; + bool enq_idump; + bool enq_gdump; + + /* + * Set to true during an early dump phase for tdata's which are + * currently being dumped. New threads' tdata's have this initialized + * to false so that they aren't accidentally included in later dump + * phases. + */ + bool dumping; + + /* + * True if profiling is active for this tdata's thread + * (thread.prof.active mallctl). + */ + bool active; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* Backtrace vector, used for calls to prof_backtrace(). */ + void *vec[PROF_BT_MAX]; +}; +typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern bool opt_prof; +extern bool opt_prof_active; +extern bool opt_prof_thread_active_init; +extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ +extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ +extern bool opt_prof_gdump; /* High-water memory dumping. */ +extern bool opt_prof_final; /* Final profile dumping. */ +extern bool opt_prof_leak; /* Dump leak summary at exit. */ +extern bool opt_prof_accum; /* Report cumulative bytes. */ +extern char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* Accessed via prof_active_[gs]et{_unlocked,}(). */ +extern bool prof_active; + +/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ +extern bool prof_gdump_val; + +/* + * Profile dump interval, measured in bytes allocated. Each arena triggers a + * profile dump when it reaches this threshold. The effect is that the + * interval between profile dumps averages prof_interval, though the actual + * interval between dumps will tend to be sporadic, and the interval will be a + * maximum of approximately (prof_interval * narenas). + */ +extern uint64_t prof_interval; + +/* + * Initialized as opt_lg_prof_sample, and potentially modified during profiling + * resets. + */ +extern size_t lg_prof_sample; + +void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); +void prof_malloc_sample_object(const void *ptr, size_t usize, + prof_tctx_t *tctx); +void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); +void bt_init(prof_bt_t *bt, void **vec); +void prof_backtrace(prof_bt_t *bt); +prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); +#ifdef JEMALLOC_JET +size_t prof_tdata_count(void); +size_t prof_bt_count(void); +const prof_cnt_t *prof_cnt_all(void); +typedef int (prof_dump_open_t)(bool, const char *); +extern prof_dump_open_t *prof_dump_open; +typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *); +extern prof_dump_header_t *prof_dump_header; +#endif +void prof_idump(void); +bool prof_mdump(const char *filename); +void prof_gdump(void); +prof_tdata_t *prof_tdata_init(tsd_t *tsd); +prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); +void prof_reset(tsd_t *tsd, size_t lg_sample); +void prof_tdata_cleanup(tsd_t *tsd); +const char *prof_thread_name_get(void); +bool prof_active_get(void); +bool prof_active_set(bool active); +int prof_thread_name_set(tsd_t *tsd, const char *thread_name); +bool prof_thread_active_get(void); +bool prof_thread_active_set(bool active); +bool prof_thread_active_init_get(void); +bool prof_thread_active_init_set(bool active_init); +bool prof_gdump_get(void); +bool prof_gdump_set(bool active); +void prof_boot0(void); +void prof_boot1(void); +bool prof_boot2(void); +void prof_prefork(void); +void prof_postfork_parent(void); +void prof_postfork_child(void); +void prof_sample_threshold_update(prof_tdata_t *tdata); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +bool prof_active_get_unlocked(void); +bool prof_gdump_get_unlocked(void); +prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create); +bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit, + prof_tdata_t **tdata_out); +prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, + bool update); +prof_tctx_t *prof_tctx_get(const void *ptr); +void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx); +void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, + prof_tctx_t *tctx); +void prof_malloc_sample_object(const void *ptr, size_t usize, + prof_tctx_t *tctx); +void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx); +void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, + prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, + size_t old_usize, prof_tctx_t *old_tctx); +void prof_free(tsd_t *tsd, const void *ptr, size_t usize); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) +JEMALLOC_ALWAYS_INLINE bool +prof_active_get_unlocked(void) +{ + + /* + * Even if opt_prof is true, sampling can be temporarily disabled by + * setting prof_active to false. No locking is used when reading + * prof_active in the fast path, so there are no guarantees regarding + * how long it will take for all threads to notice state changes. + */ + return (prof_active); +} + +JEMALLOC_ALWAYS_INLINE bool +prof_gdump_get_unlocked(void) +{ + + /* + * No locking is used when reading prof_gdump_val in the fast path, so + * there are no guarantees regarding how long it will take for all + * threads to notice state changes. + */ + return (prof_gdump_val); +} + +JEMALLOC_ALWAYS_INLINE prof_tdata_t * +prof_tdata_get(tsd_t *tsd, bool create) +{ + prof_tdata_t *tdata; + + cassert(config_prof); + + tdata = tsd_prof_tdata_get(tsd); + if (create) { + if (unlikely(tdata == NULL)) { + if (tsd_nominal(tsd)) { + tdata = prof_tdata_init(tsd); + tsd_prof_tdata_set(tsd, tdata); + } + } else if (unlikely(tdata->expired)) { + tdata = prof_tdata_reinit(tsd, tdata); + tsd_prof_tdata_set(tsd, tdata); + } + assert(tdata == NULL || tdata->attached); + } + + return (tdata); +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_tctx_get(const void *ptr) +{ + + cassert(config_prof); + assert(ptr != NULL); + + return (arena_prof_tctx_get(ptr)); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx) +{ + + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_set(ptr, usize, tctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr, + prof_tctx_t *old_tctx) +{ + + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx); +} + +JEMALLOC_ALWAYS_INLINE bool +prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, + prof_tdata_t **tdata_out) +{ + prof_tdata_t *tdata; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, true); + if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) + tdata = NULL; + + if (tdata_out != NULL) + *tdata_out = tdata; + + if (tdata == NULL) + return (true); + + if (tdata->bytes_until_sample >= usize) { + if (update) + tdata->bytes_until_sample -= usize; + return (true); + } else { + /* Compute new sample threshold. */ + if (update) + prof_sample_threshold_update(tdata); + return (!tdata->active); + } +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) +{ + prof_tctx_t *ret; + prof_tdata_t *tdata; + prof_bt_t bt; + + assert(usize == s2u(usize)); + + if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, + &tdata))) + ret = (prof_tctx_t *)(uintptr_t)1U; + else { + bt_init(&bt, tdata->vec); + prof_backtrace(&bt); + ret = prof_lookup(tsd, &bt); + } + + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void +prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx) +{ + + cassert(config_prof); + assert(ptr != NULL); + assert(usize == isalloc(ptr, true)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) + prof_malloc_sample_object(ptr, usize, tctx); + else + prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U); +} + +JEMALLOC_ALWAYS_INLINE void +prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, + bool prof_active, bool updated, const void *old_ptr, size_t old_usize, + prof_tctx_t *old_tctx) +{ + bool sampled, old_sampled; + + cassert(config_prof); + assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); + + if (prof_active && !updated && ptr != NULL) { + assert(usize == isalloc(ptr, true)); + if (prof_sample_accum_update(tsd, usize, true, NULL)) { + /* + * Don't sample. The usize passed to prof_alloc_prep() + * was larger than what actually got allocated, so a + * backtrace was captured for this allocation, even + * though its actual usize was insufficient to cross the + * sample threshold. + */ + tctx = (prof_tctx_t *)(uintptr_t)1U; + } + } + + sampled = ((uintptr_t)tctx > (uintptr_t)1U); + old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); + + if (unlikely(sampled)) + prof_malloc_sample_object(ptr, usize, tctx); + else + prof_tctx_reset(ptr, usize, old_ptr, old_tctx); + + if (unlikely(old_sampled)) + prof_free_sampled_object(tsd, old_usize, old_tctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_free(tsd_t *tsd, const void *ptr, size_t usize) +{ + prof_tctx_t *tctx = prof_tctx_get(ptr); + + cassert(config_prof); + assert(usize == isalloc(ptr, true)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) + prof_free_sampled_object(tsd, usize, tctx); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/public_namespace.sh b/redis.submodule/jemalloc/include/jemalloc/internal/public_namespace.sh new file mode 100755 index 0000000..362109f --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/public_namespace.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +for nm in `cat $1` ; do + n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` + echo "#define je_${n} JEMALLOC_N(${n})" +done diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/public_unnamespace.sh b/redis.submodule/jemalloc/include/jemalloc/internal/public_unnamespace.sh new file mode 100755 index 0000000..4239d17 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/public_unnamespace.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +for nm in `cat $1` ; do + n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` + echo "#undef je_${n}" +done diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/ql.h b/redis.submodule/jemalloc/include/jemalloc/internal/ql.h new file mode 100644 index 0000000..1834bb8 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/ql.h @@ -0,0 +1,81 @@ +/* List definitions. */ +#define ql_head(a_type) \ +struct { \ + a_type *qlh_first; \ +} + +#define ql_head_initializer(a_head) {NULL} + +#define ql_elm(a_type) qr(a_type) + +/* List functions. */ +#define ql_new(a_head) do { \ + (a_head)->qlh_first = NULL; \ +} while (0) + +#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) + +#define ql_first(a_head) ((a_head)->qlh_first) + +#define ql_last(a_head, a_field) \ + ((ql_first(a_head) != NULL) \ + ? qr_prev(ql_first(a_head), a_field) : NULL) + +#define ql_next(a_head, a_elm, a_field) \ + ((ql_last(a_head, a_field) != (a_elm)) \ + ? qr_next((a_elm), a_field) : NULL) + +#define ql_prev(a_head, a_elm, a_field) \ + ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ + : NULL) + +#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ + qr_before_insert((a_qlelm), (a_elm), a_field); \ + if (ql_first(a_head) == (a_qlelm)) { \ + ql_first(a_head) = (a_elm); \ + } \ +} while (0) + +#define ql_after_insert(a_qlelm, a_elm, a_field) \ + qr_after_insert((a_qlelm), (a_elm), a_field) + +#define ql_head_insert(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) != NULL) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = (a_elm); \ +} while (0) + +#define ql_tail_insert(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) != NULL) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = qr_next((a_elm), a_field); \ +} while (0) + +#define ql_remove(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) == (a_elm)) { \ + ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ + } \ + if (ql_first(a_head) != (a_elm)) { \ + qr_remove((a_elm), a_field); \ + } else { \ + ql_first(a_head) = NULL; \ + } \ +} while (0) + +#define ql_head_remove(a_head, a_type, a_field) do { \ + a_type *t = ql_first(a_head); \ + ql_remove((a_head), t, a_field); \ +} while (0) + +#define ql_tail_remove(a_head, a_type, a_field) do { \ + a_type *t = ql_last(a_head, a_field); \ + ql_remove((a_head), t, a_field); \ +} while (0) + +#define ql_foreach(a_var, a_head, a_field) \ + qr_foreach((a_var), ql_first(a_head), a_field) + +#define ql_reverse_foreach(a_var, a_head, a_field) \ + qr_reverse_foreach((a_var), ql_first(a_head), a_field) diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/qr.h b/redis.submodule/jemalloc/include/jemalloc/internal/qr.h new file mode 100644 index 0000000..0fbaec2 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/qr.h @@ -0,0 +1,69 @@ +/* Ring definitions. */ +#define qr(a_type) \ +struct { \ + a_type *qre_next; \ + a_type *qre_prev; \ +} + +/* Ring functions. */ +#define qr_new(a_qr, a_field) do { \ + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) + +#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) + +#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ + (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ + (a_qr)->a_field.qre_next = (a_qrelm); \ + (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ + (a_qrelm)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_after_insert(a_qrelm, a_qr, a_field) \ + do \ + { \ + (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ + (a_qr)->a_field.qre_prev = (a_qrelm); \ + (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ + (a_qrelm)->a_field.qre_next = (a_qr); \ + } while (0) + +#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ + void *t; \ + (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ + (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ + t = (a_qr_a)->a_field.qre_prev; \ + (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ + (a_qr_b)->a_field.qre_prev = t; \ +} while (0) + +/* + * qr_meld() and qr_split() are functionally equivalent, so there's no need to + * have two copies of the code. + */ +#define qr_split(a_qr_a, a_qr_b, a_field) \ + qr_meld((a_qr_a), (a_qr_b), a_field) + +#define qr_remove(a_qr, a_field) do { \ + (a_qr)->a_field.qre_prev->a_field.qre_next \ + = (a_qr)->a_field.qre_next; \ + (a_qr)->a_field.qre_next->a_field.qre_prev \ + = (a_qr)->a_field.qre_prev; \ + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_foreach(var, a_qr, a_field) \ + for ((var) = (a_qr); \ + (var) != NULL; \ + (var) = (((var)->a_field.qre_next != (a_qr)) \ + ? (var)->a_field.qre_next : NULL)) + +#define qr_reverse_foreach(var, a_qr, a_field) \ + for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ + (var) != NULL; \ + (var) = (((var) != (a_qr)) \ + ? (var)->a_field.qre_prev : NULL)) diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/quarantine.h b/redis.submodule/jemalloc/include/jemalloc/internal/quarantine.h new file mode 100644 index 0000000..ae60739 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/quarantine.h @@ -0,0 +1,60 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct quarantine_obj_s quarantine_obj_t; +typedef struct quarantine_s quarantine_t; + +/* Default per thread quarantine size if valgrind is enabled. */ +#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct quarantine_obj_s { + void *ptr; + size_t usize; +}; + +struct quarantine_s { + size_t curbytes; + size_t curobjs; + size_t first; +#define LG_MAXOBJS_INIT 10 + size_t lg_maxobjs; + quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void quarantine_alloc_hook_work(tsd_t *tsd); +void quarantine(tsd_t *tsd, void *ptr); +void quarantine_cleanup(tsd_t *tsd); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void quarantine_alloc_hook(void); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) +JEMALLOC_ALWAYS_INLINE void +quarantine_alloc_hook(void) +{ + tsd_t *tsd; + + assert(config_fill && opt_quarantine); + + tsd = tsd_fetch(); + if (tsd_quarantine_get(tsd) == NULL) + quarantine_alloc_hook_work(tsd); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/rb.h b/redis.submodule/jemalloc/include/jemalloc/internal/rb.h new file mode 100644 index 0000000..2ca8e59 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/rb.h @@ -0,0 +1,981 @@ +/*- + ******************************************************************************* + * + * cpp macro implementation of left-leaning 2-3 red-black trees. Parent + * pointers are not used, and color bits are stored in the least significant + * bit of right-child pointers (if RB_COMPACT is defined), thus making node + * linkage as compact as is possible for red-black trees. + * + * Usage: + * + * #include + * #include + * #define NDEBUG // (Optional, see assert(3).) + * #include + * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) + * #include + * ... + * + ******************************************************************************* + */ + +#ifndef RB_H_ +#define RB_H_ + +#ifdef RB_COMPACT +/* Node structure. */ +#define rb_node(a_type) \ +struct { \ + a_type *rbn_left; \ + a_type *rbn_right_red; \ +} +#else +#define rb_node(a_type) \ +struct { \ + a_type *rbn_left; \ + a_type *rbn_right; \ + bool rbn_red; \ +} +#endif + +/* Root structure. */ +#define rb_tree(a_type) \ +struct { \ + a_type *rbt_root; \ + a_type rbt_nil; \ +} + +/* Left accessors. */ +#define rbtn_left_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_left) +#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ + (a_node)->a_field.rbn_left = a_left; \ +} while (0) + +#ifdef RB_COMPACT +/* Right accessors. */ +#define rbtn_right_get(a_type, a_field, a_node) \ + ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ + & ((ssize_t)-2))) +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ + | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ +} while (0) + +/* Color accessors. */ +#define rbtn_red_get(a_type, a_field, a_node) \ + ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ + & ((size_t)1))) +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ + (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ + | ((ssize_t)a_red)); \ +} while (0) +#define rbtn_red_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ + (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ +} while (0) +#define rbtn_black_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ + (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ +} while (0) +#else +/* Right accessors. */ +#define rbtn_right_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_right) +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ + (a_node)->a_field.rbn_right = a_right; \ +} while (0) + +/* Color accessors. */ +#define rbtn_red_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_red) +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ + (a_node)->a_field.rbn_red = (a_red); \ +} while (0) +#define rbtn_red_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_red = true; \ +} while (0) +#define rbtn_black_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_red = false; \ +} while (0) +#endif + +/* Node initializer. */ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ + rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ + rbtn_red_set(a_type, a_field, (a_node)); \ +} while (0) + +/* Tree initializer. */ +#define rb_new(a_type, a_field, a_rbt) do { \ + (a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \ + rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \ + rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \ +} while (0) + +/* Internal utility macros. */ +#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ + (r_node) = (a_root); \ + if ((r_node) != &(a_rbt)->rbt_nil) { \ + for (; \ + rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\ + (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ + } \ + } \ +} while (0) + +#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ + (r_node) = (a_root); \ + if ((r_node) != &(a_rbt)->rbt_nil) { \ + for (; rbtn_right_get(a_type, a_field, (r_node)) != \ + &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \ + (r_node))) { \ + } \ + } \ +} while (0) + +#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ + (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ + rbtn_right_set(a_type, a_field, (a_node), \ + rbtn_left_get(a_type, a_field, (r_node))); \ + rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ +} while (0) + +#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ + (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ + rbtn_left_set(a_type, a_field, (a_node), \ + rbtn_right_get(a_type, a_field, (r_node))); \ + rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ +} while (0) + +/* + * The rb_proto() macro generates function prototypes that correspond to the + * functions generated by an equivalently parameterized call to rb_gen(). + */ + +#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ +a_attr void \ +a_prefix##new(a_rbt_type *rbtree); \ +a_attr bool \ +a_prefix##empty(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##first(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##last(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##next(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##search(a_rbt_type *rbtree, a_type *key); \ +a_attr a_type * \ +a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \ +a_attr a_type * \ +a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \ +a_attr void \ +a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ +a_attr void \ +a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ + a_rbt_type *, a_type *, void *), void *arg); \ +a_attr a_type * \ +a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); + +/* + * The rb_gen() macro generates a type-specific red-black tree implementation, + * based on the above cpp macros. + * + * Arguments: + * + * a_attr : Function attribute for generated functions (ex: static). + * a_prefix : Prefix for generated functions (ex: ex_). + * a_rb_type : Type for red-black tree data structure (ex: ex_t). + * a_type : Type for red-black tree node data structure (ex: ex_node_t). + * a_field : Name of red-black tree node linkage (ex: ex_link). + * a_cmp : Node comparison function name, with the following prototype: + * int (a_cmp *)(a_type *a_node, a_type *a_other); + * ^^^^^^ + * or a_key + * Interpretation of comparison function return values: + * -1 : a_node < a_other + * 0 : a_node == a_other + * 1 : a_node > a_other + * In all cases, the a_node or a_key macro argument is the first + * argument to the comparison function, which makes it possible + * to write comparison functions that treat the first argument + * specially. + * + * Assuming the following setup: + * + * typedef struct ex_node_s ex_node_t; + * struct ex_node_s { + * rb_node(ex_node_t) ex_link; + * }; + * typedef rb_tree(ex_node_t) ex_t; + * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) + * + * The following API is generated: + * + * static void + * ex_new(ex_t *tree); + * Description: Initialize a red-black tree structure. + * Args: + * tree: Pointer to an uninitialized red-black tree object. + * + * static bool + * ex_empty(ex_t *tree); + * Description: Determine whether tree is empty. + * Args: + * tree: Pointer to an initialized red-black tree object. + * Ret: True if tree is empty, false otherwise. + * + * static ex_node_t * + * ex_first(ex_t *tree); + * static ex_node_t * + * ex_last(ex_t *tree); + * Description: Get the first/last node in tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * Ret: First/last node in tree, or NULL if tree is empty. + * + * static ex_node_t * + * ex_next(ex_t *tree, ex_node_t *node); + * static ex_node_t * + * ex_prev(ex_t *tree, ex_node_t *node); + * Description: Get node's successor/predecessor. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: A node in tree. + * Ret: node's successor/predecessor in tree, or NULL if node is + * last/first. + * + * static ex_node_t * + * ex_search(ex_t *tree, ex_node_t *key); + * Description: Search for node that matches key. + * Args: + * tree: Pointer to an initialized red-black tree object. + * key : Search key. + * Ret: Node in tree that matches key, or NULL if no match. + * + * static ex_node_t * + * ex_nsearch(ex_t *tree, ex_node_t *key); + * static ex_node_t * + * ex_psearch(ex_t *tree, ex_node_t *key); + * Description: Search for node that matches key. If no match is found, + * return what would be key's successor/predecessor, were + * key in tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * key : Search key. + * Ret: Node in tree that matches key, or if no match, hypothetical node's + * successor/predecessor (NULL if no successor/predecessor). + * + * static void + * ex_insert(ex_t *tree, ex_node_t *node); + * Description: Insert node into tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: Node to be inserted into tree. + * + * static void + * ex_remove(ex_t *tree, ex_node_t *node); + * Description: Remove node from tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: Node in tree to be removed. + * + * static ex_node_t * + * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, + * ex_node_t *, void *), void *arg); + * static ex_node_t * + * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, + * ex_node_t *, void *), void *arg); + * Description: Iterate forward/backward over tree, starting at node. If + * tree is modified, iteration must be immediately + * terminated by the callback function that causes the + * modification. + * Args: + * tree : Pointer to an initialized red-black tree object. + * start: Node at which to start iteration, or NULL to start at + * first/last node. + * cb : Callback function, which is called for each node during + * iteration. Under normal circumstances the callback function + * should return NULL, which causes iteration to continue. If a + * callback function returns non-NULL, iteration is immediately + * terminated and the non-NULL return value is returned by the + * iterator. This is useful for re-starting iteration after + * modifying tree. + * arg : Opaque pointer passed to cb(). + * Ret: NULL if iteration completed, or the non-NULL callback return value + * that caused termination of the iteration. + */ +#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ +a_attr void \ +a_prefix##new(a_rbt_type *rbtree) { \ + rb_new(a_type, a_field, rbtree); \ +} \ +a_attr bool \ +a_prefix##empty(a_rbt_type *rbtree) { \ + return (rbtree->rbt_root == &rbtree->rbt_nil); \ +} \ +a_attr a_type * \ +a_prefix##first(a_rbt_type *rbtree) { \ + a_type *ret; \ + rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ + if (ret == &rbtree->rbt_nil) { \ + ret = NULL; \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##last(a_rbt_type *rbtree) { \ + a_type *ret; \ + rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ + if (ret == &rbtree->rbt_nil) { \ + ret = NULL; \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ + a_type *ret; \ + if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ + rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ + a_field, node), ret); \ + } else { \ + a_type *tnode = rbtree->rbt_root; \ + assert(tnode != &rbtree->rbt_nil); \ + ret = &rbtree->rbt_nil; \ + while (true) { \ + int cmp = (a_cmp)(node, tnode); \ + if (cmp < 0) { \ + ret = tnode; \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + break; \ + } \ + assert(tnode != &rbtree->rbt_nil); \ + } \ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = (NULL); \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ + a_type *ret; \ + if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ + rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ + a_field, node), ret); \ + } else { \ + a_type *tnode = rbtree->rbt_root; \ + assert(tnode != &rbtree->rbt_nil); \ + ret = &rbtree->rbt_nil; \ + while (true) { \ + int cmp = (a_cmp)(node, tnode); \ + if (cmp < 0) { \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + ret = tnode; \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + break; \ + } \ + assert(tnode != &rbtree->rbt_nil); \ + } \ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = (NULL); \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ + a_type *ret; \ + int cmp; \ + ret = rbtree->rbt_root; \ + while (ret != &rbtree->rbt_nil \ + && (cmp = (a_cmp)(key, ret)) != 0) { \ + if (cmp < 0) { \ + ret = rbtn_left_get(a_type, a_field, ret); \ + } else { \ + ret = rbtn_right_get(a_type, a_field, ret); \ + } \ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = (NULL); \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ + a_type *ret; \ + a_type *tnode = rbtree->rbt_root; \ + ret = &rbtree->rbt_nil; \ + while (tnode != &rbtree->rbt_nil) { \ + int cmp = (a_cmp)(key, tnode); \ + if (cmp < 0) { \ + ret = tnode; \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + ret = tnode; \ + break; \ + } \ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = (NULL); \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ + a_type *ret; \ + a_type *tnode = rbtree->rbt_root; \ + ret = &rbtree->rbt_nil; \ + while (tnode != &rbtree->rbt_nil) { \ + int cmp = (a_cmp)(key, tnode); \ + if (cmp < 0) { \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + ret = tnode; \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + ret = tnode; \ + break; \ + } \ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = (NULL); \ + } \ + return (ret); \ +} \ +a_attr void \ +a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ + struct { \ + a_type *node; \ + int cmp; \ + } path[sizeof(void *) << 4], *pathp; \ + rbt_node_new(a_type, a_field, rbtree, node); \ + /* Wind. */ \ + path->node = rbtree->rbt_root; \ + for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ + int cmp = pathp->cmp = a_cmp(node, pathp->node); \ + assert(cmp != 0); \ + if (cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + } \ + } \ + pathp->node = node; \ + /* Unwind. */ \ + for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ + a_type *cnode = pathp->node; \ + if (pathp->cmp < 0) { \ + a_type *left = pathp[1].node; \ + rbtn_left_set(a_type, a_field, cnode, left); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (rbtn_red_get(a_type, a_field, leftleft)) { \ + /* Fix up 4-node. */ \ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, cnode, tnode); \ + cnode = tnode; \ + } \ + } else { \ + return; \ + } \ + } else { \ + a_type *right = pathp[1].node; \ + rbtn_right_set(a_type, a_field, cnode, right); \ + if (rbtn_red_get(a_type, a_field, right)) { \ + a_type *left = rbtn_left_get(a_type, a_field, cnode); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + /* Split 4-node. */ \ + rbtn_black_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, right); \ + rbtn_red_set(a_type, a_field, cnode); \ + } else { \ + /* Lean left. */ \ + a_type *tnode; \ + bool tred = rbtn_red_get(a_type, a_field, cnode); \ + rbtn_rotate_left(a_type, a_field, cnode, tnode); \ + rbtn_color_set(a_type, a_field, tnode, tred); \ + rbtn_red_set(a_type, a_field, cnode); \ + cnode = tnode; \ + } \ + } else { \ + return; \ + } \ + } \ + pathp->node = cnode; \ + } \ + /* Set root, and make it black. */ \ + rbtree->rbt_root = path->node; \ + rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ +} \ +a_attr void \ +a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ + struct { \ + a_type *node; \ + int cmp; \ + } *pathp, *nodep, path[sizeof(void *) << 4]; \ + /* Wind. */ \ + nodep = NULL; /* Silence compiler warning. */ \ + path->node = rbtree->rbt_root; \ + for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ + int cmp = pathp->cmp = a_cmp(node, pathp->node); \ + if (cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + if (cmp == 0) { \ + /* Find node's successor, in preparation for swap. */ \ + pathp->cmp = 1; \ + nodep = pathp; \ + for (pathp++; pathp->node != &rbtree->rbt_nil; \ + pathp++) { \ + pathp->cmp = -1; \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } \ + break; \ + } \ + } \ + } \ + assert(nodep->node == node); \ + pathp--; \ + if (pathp->node != node) { \ + /* Swap node with its successor. */ \ + bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ + rbtn_color_set(a_type, a_field, pathp->node, \ + rbtn_red_get(a_type, a_field, node)); \ + rbtn_left_set(a_type, a_field, pathp->node, \ + rbtn_left_get(a_type, a_field, node)); \ + /* If node's successor is its right child, the following code */\ + /* will do the wrong thing for the right child pointer. */\ + /* However, it doesn't matter, because the pointer will be */\ + /* properly set when the successor is pruned. */\ + rbtn_right_set(a_type, a_field, pathp->node, \ + rbtn_right_get(a_type, a_field, node)); \ + rbtn_color_set(a_type, a_field, node, tred); \ + /* The pruned leaf node's child pointers are never accessed */\ + /* again, so don't bother setting them to nil. */\ + nodep->node = pathp->node; \ + pathp->node = node; \ + if (nodep == path) { \ + rbtree->rbt_root = nodep->node; \ + } else { \ + if (nodep[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, nodep[-1].node, \ + nodep->node); \ + } else { \ + rbtn_right_set(a_type, a_field, nodep[-1].node, \ + nodep->node); \ + } \ + } \ + } else { \ + a_type *left = rbtn_left_get(a_type, a_field, node); \ + if (left != &rbtree->rbt_nil) { \ + /* node has no successor, but it has a left child. */\ + /* Splice node out, without losing the left child. */\ + assert(!rbtn_red_get(a_type, a_field, node)); \ + assert(rbtn_red_get(a_type, a_field, left)); \ + rbtn_black_set(a_type, a_field, left); \ + if (pathp == path) { \ + rbtree->rbt_root = left; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + left); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + left); \ + } \ + } \ + return; \ + } else if (pathp == path) { \ + /* The tree only contained one node. */ \ + rbtree->rbt_root = &rbtree->rbt_nil; \ + return; \ + } \ + } \ + if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + /* Prune red node, which requires no fixup. */ \ + assert(pathp[-1].cmp < 0); \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + &rbtree->rbt_nil); \ + return; \ + } \ + /* The node to be pruned is black, so unwind until balance is */\ + /* restored. */\ + pathp->node = &rbtree->rbt_nil; \ + for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ + assert(pathp->cmp != 0); \ + if (pathp->cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp->node, \ + pathp[1].node); \ + assert(!rbtn_red_get(a_type, a_field, pathp[1].node)); \ + if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + a_type *right = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + a_type *rightleft = rbtn_left_get(a_type, a_field, \ + right); \ + a_type *tnode; \ + if (rbtn_red_get(a_type, a_field, rightleft)) { \ + /* In the following diagrams, ||, //, and \\ */\ + /* indicate the path to the removed node. */\ + /* */\ + /* || */\ + /* pathp(r) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + /* */\ + rbtn_black_set(a_type, a_field, pathp->node); \ + rbtn_rotate_right(a_type, a_field, right, tnode); \ + rbtn_right_set(a_type, a_field, pathp->node, tnode);\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + } else { \ + /* || */\ + /* pathp(r) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + /* */\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + } \ + /* Balance restored, but rotation modified subtree */\ + /* root. */\ + assert((uintptr_t)pathp > (uintptr_t)path); \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + return; \ + } else { \ + a_type *right = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + a_type *rightleft = rbtn_left_get(a_type, a_field, \ + right); \ + if (rbtn_red_get(a_type, a_field, rightleft)) { \ + /* || */\ + /* pathp(b) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, rightleft); \ + rbtn_rotate_right(a_type, a_field, right, tnode); \ + rbtn_right_set(a_type, a_field, pathp->node, tnode);\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root, which may actually be the tree */\ + /* root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + a_type *tnode; \ + rbtn_red_set(a_type, a_field, pathp->node); \ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + pathp->node = tnode; \ + } \ + } \ + } else { \ + a_type *left; \ + rbtn_right_set(a_type, a_field, pathp->node, \ + pathp[1].node); \ + left = rbtn_left_get(a_type, a_field, pathp->node); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + a_type *tnode; \ + a_type *leftright = rbtn_right_get(a_type, a_field, \ + left); \ + a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ + leftright); \ + if (rbtn_red_get(a_type, a_field, leftrightleft)) { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (r) (b) */\ + /* \ */\ + /* (b) */\ + /* / */\ + /* (r) */\ + a_type *unode; \ + rbtn_black_set(a_type, a_field, leftrightleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + unode); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + rbtn_right_set(a_type, a_field, unode, tnode); \ + rbtn_rotate_left(a_type, a_field, unode, tnode); \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (r) (b) */\ + /* \ */\ + /* (b) */\ + /* / */\ + /* (b) */\ + assert(leftright != &rbtree->rbt_nil); \ + rbtn_red_set(a_type, a_field, leftright); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + rbtn_black_set(a_type, a_field, tnode); \ + } \ + /* Balance restored, but rotation modified subtree */\ + /* root, which may actually be the tree root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + } \ + return; \ + } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (rbtn_red_get(a_type, a_field, leftleft)) { \ + /* || */\ + /* pathp(r) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, pathp->node); \ + rbtn_red_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root. */\ + assert((uintptr_t)pathp > (uintptr_t)path); \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(r) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + rbtn_red_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, pathp->node); \ + /* Balance restored. */ \ + return; \ + } \ + } else { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (rbtn_red_get(a_type, a_field, leftleft)) { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root, which may actually be the tree */\ + /* root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + rbtn_red_set(a_type, a_field, left); \ + } \ + } \ + } \ + } \ + /* Set root. */ \ + rbtree->rbt_root = path->node; \ + assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ +} \ +a_attr a_type * \ +a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + if (node == &rbtree->rbt_nil) { \ + return (&rbtree->rbt_nil); \ + } else { \ + a_type *ret; \ + if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ + a_field, node), cb, arg)) != &rbtree->rbt_nil \ + || (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + int cmp = a_cmp(start, node); \ + if (cmp < 0) { \ + a_type *ret; \ + if ((ret = a_prefix##iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg)) != \ + &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg)); \ + } else if (cmp > 0) { \ + return (a_prefix##iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg)); \ + } else { \ + a_type *ret; \ + if ((ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ + a_rbt_type *, a_type *, void *), void *arg) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ + cb, arg); \ + } else { \ + ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = NULL; \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + if (node == &rbtree->rbt_nil) { \ + return (&rbtree->rbt_nil); \ + } else { \ + a_type *ret; \ + if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != \ + &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ + a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ + void *arg) { \ + int cmp = a_cmp(start, node); \ + if (cmp > 0) { \ + a_type *ret; \ + if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != \ + &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } else if (cmp < 0) { \ + return (a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } else { \ + a_type *ret; \ + if ((ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##reverse_iter_start(rbtree, start, \ + rbtree->rbt_root, cb, arg); \ + } else { \ + ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ + cb, arg); \ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = NULL; \ + } \ + return (ret); \ +} + +#endif /* RB_H_ */ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/rtree.h b/redis.submodule/jemalloc/include/jemalloc/internal/rtree.h new file mode 100644 index 0000000..28ae9d1 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/rtree.h @@ -0,0 +1,294 @@ +/* + * This radix tree implementation is tailored to the singular purpose of + * associating metadata with chunks that are currently owned by jemalloc. + * + ******************************************************************************* + */ +#ifdef JEMALLOC_H_TYPES + +typedef struct rtree_node_elm_s rtree_node_elm_t; +typedef struct rtree_level_s rtree_level_t; +typedef struct rtree_s rtree_t; + +/* + * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the + * machine address width. + */ +#define LG_RTREE_BITS_PER_LEVEL 4 +#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL) +#define RTREE_HEIGHT_MAX \ + ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL) + +/* Used for two-stage lock-free node initialization. */ +#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1) + +/* + * The node allocation callback function's argument is the number of contiguous + * rtree_node_elm_t structures to allocate, and the resulting memory must be + * zeroed. + */ +typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t); +typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *); + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct rtree_node_elm_s { + union { + void *pun; + rtree_node_elm_t *child; + extent_node_t *val; + }; +}; + +struct rtree_level_s { + /* + * A non-NULL subtree points to a subtree rooted along the hypothetical + * path to the leaf node corresponding to key 0. Depending on what keys + * have been used to store to the tree, an arbitrary combination of + * subtree pointers may remain NULL. + * + * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4. + * This results in a 3-level tree, and the leftmost leaf can be directly + * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding + * 0x00000000) can be accessed via subtrees[1], and the remainder of the + * tree can be accessed via subtrees[0]. + * + * levels[0] : [ | 0x0001******** | 0x0002******** | ...] + * + * levels[1] : [ | 0x00000001**** | 0x00000002**** | ... ] + * + * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...] + * + * This has practical implications on x64, which currently uses only the + * lower 47 bits of virtual address space in userland, thus leaving + * subtrees[0] unused and avoiding a level of tree traversal. + */ + union { + void *subtree_pun; + rtree_node_elm_t *subtree; + }; + /* Number of key bits distinguished by this level. */ + unsigned bits; + /* + * Cumulative number of key bits distinguished by traversing to + * corresponding tree level. + */ + unsigned cumbits; +}; + +struct rtree_s { + rtree_node_alloc_t *alloc; + rtree_node_dalloc_t *dalloc; + unsigned height; + /* + * Precomputed table used to convert from the number of leading 0 key + * bits to which subtree level to start at. + */ + unsigned start_level[RTREE_HEIGHT_MAX]; + rtree_level_t levels[RTREE_HEIGHT_MAX]; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, + rtree_node_dalloc_t *dalloc); +void rtree_delete(rtree_t *rtree); +rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree, + unsigned level); +rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree, + rtree_node_elm_t *elm, unsigned level); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +unsigned rtree_start_level(rtree_t *rtree, uintptr_t key); +uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level); + +bool rtree_node_valid(rtree_node_elm_t *node); +rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm); +rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level); +extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, + bool dependent); +void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, + const extent_node_t *val); +rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level); +rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level); + +extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent); +bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) +JEMALLOC_INLINE unsigned +rtree_start_level(rtree_t *rtree, uintptr_t key) +{ + unsigned start_level; + + if (unlikely(key == 0)) + return (rtree->height - 1); + + start_level = rtree->start_level[lg_floor(key) >> + LG_RTREE_BITS_PER_LEVEL]; + assert(start_level < rtree->height); + return (start_level); +} + +JEMALLOC_INLINE uintptr_t +rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) +{ + + return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - + rtree->levels[level].cumbits)) & ((ZU(1) << + rtree->levels[level].bits) - 1)); +} + +JEMALLOC_INLINE bool +rtree_node_valid(rtree_node_elm_t *node) +{ + + return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING); +} + +JEMALLOC_INLINE rtree_node_elm_t * +rtree_child_tryread(rtree_node_elm_t *elm) +{ + rtree_node_elm_t *child; + + /* Double-checked read (first read may be stale. */ + child = elm->child; + if (!rtree_node_valid(child)) + child = atomic_read_p(&elm->pun); + return (child); +} + +JEMALLOC_INLINE rtree_node_elm_t * +rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) +{ + rtree_node_elm_t *child; + + child = rtree_child_tryread(elm); + if (unlikely(!rtree_node_valid(child))) + child = rtree_child_read_hard(rtree, elm, level); + return (child); +} + +JEMALLOC_INLINE extent_node_t * +rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent) +{ + + if (dependent) { + /* + * Reading a val on behalf of a pointer to a valid allocation is + * guaranteed to be a clean read even without synchronization, + * because the rtree update became visible in memory before the + * pointer came into existence. + */ + return (elm->val); + } else { + /* + * An arbitrary read, e.g. on behalf of ivsalloc(), may not be + * dependent on a previous rtree write, which means a stale read + * could result if synchronization were omitted here. + */ + return (atomic_read_p(&elm->pun)); + } +} + +JEMALLOC_INLINE void +rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val) +{ + + atomic_write_p(&elm->pun, val); +} + +JEMALLOC_INLINE rtree_node_elm_t * +rtree_subtree_tryread(rtree_t *rtree, unsigned level) +{ + rtree_node_elm_t *subtree; + + /* Double-checked read (first read may be stale. */ + subtree = rtree->levels[level].subtree; + if (!rtree_node_valid(subtree)) + subtree = atomic_read_p(&rtree->levels[level].subtree_pun); + return (subtree); +} + +JEMALLOC_INLINE rtree_node_elm_t * +rtree_subtree_read(rtree_t *rtree, unsigned level) +{ + rtree_node_elm_t *subtree; + + subtree = rtree_subtree_tryread(rtree, level); + if (unlikely(!rtree_node_valid(subtree))) + subtree = rtree_subtree_read_hard(rtree, level); + return (subtree); +} + +JEMALLOC_INLINE extent_node_t * +rtree_get(rtree_t *rtree, uintptr_t key, bool dependent) +{ + uintptr_t subkey; + unsigned i, start_level; + rtree_node_elm_t *node, *child; + + start_level = rtree_start_level(rtree, key); + + for (i = start_level, node = rtree_subtree_tryread(rtree, start_level); + /**/; i++, node = child) { + if (!dependent && unlikely(!rtree_node_valid(node))) + return (NULL); + subkey = rtree_subkey(rtree, key, i); + if (i == rtree->height - 1) { + /* + * node is a leaf, so it contains values rather than + * child pointers. + */ + return (rtree_val_read(rtree, &node[subkey], + dependent)); + } + assert(i < rtree->height - 1); + child = rtree_child_tryread(&node[subkey]); + } + not_reached(); +} + +JEMALLOC_INLINE bool +rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val) +{ + uintptr_t subkey; + unsigned i, start_level; + rtree_node_elm_t *node, *child; + + start_level = rtree_start_level(rtree, key); + + node = rtree_subtree_read(rtree, start_level); + if (node == NULL) + return (true); + for (i = start_level; /**/; i++, node = child) { + subkey = rtree_subkey(rtree, key, i); + if (i == rtree->height - 1) { + /* + * node is a leaf, so it contains values rather than + * child pointers. + */ + rtree_val_write(rtree, &node[subkey], val); + return (false); + } + assert(i + 1 < rtree->height); + child = rtree_child_read(rtree, &node[subkey], i); + if (child == NULL) + return (true); + } + not_reached(); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/size_classes.sh b/redis.submodule/jemalloc/include/jemalloc/internal/size_classes.sh new file mode 100755 index 0000000..fc82036 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/size_classes.sh @@ -0,0 +1,286 @@ +#!/bin/sh +# +# Usage: size_classes.sh + +# The following limits are chosen such that they cover all supported platforms. + +# Pointer sizes. +lg_zarr="2 3" + +# Quanta. +lg_qarr=$1 + +# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)]. +lg_tmin=$2 + +# Maximum lookup size. +lg_kmax=12 + +# Page sizes. +lg_parr=`echo $3 | tr ',' ' '` + +# Size class group size (number of size classes for each size doubling). +lg_g=$4 + +pow2() { + e=$1 + pow2_result=1 + while [ ${e} -gt 0 ] ; do + pow2_result=$((${pow2_result} + ${pow2_result})) + e=$((${e} - 1)) + done +} + +lg() { + x=$1 + lg_result=0 + while [ ${x} -gt 1 ] ; do + lg_result=$((${lg_result} + 1)) + x=$((${x} / 2)) + done +} + +size_class() { + index=$1 + lg_grp=$2 + lg_delta=$3 + ndelta=$4 + lg_p=$5 + lg_kmax=$6 + + lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta} + if [ ${pow2_result} -lt ${ndelta} ] ; then + rem="yes" + else + rem="no" + fi + + lg_size=${lg_grp} + if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then + lg_size=$((${lg_grp} + 1)) + else + lg_size=${lg_grp} + rem="yes" + fi + + if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then + bin="yes" + else + bin="no" + fi + if [ ${lg_size} -lt ${lg_kmax} \ + -o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then + lg_delta_lookup=${lg_delta} + else + lg_delta_lookup="no" + fi + printf ' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${lg_delta_lookup} + # Defined upon return: + # - lg_delta_lookup (${lg_delta} or "no") + # - bin ("yes" or "no") +} + +sep_line() { + echo " \\" +} + +size_classes() { + lg_z=$1 + lg_q=$2 + lg_t=$3 + lg_p=$4 + lg_g=$5 + + pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result} + pow2 ${lg_g}; g=${pow2_result} + + echo "#define SIZE_CLASSES \\" + echo " /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \\" + + ntbins=0 + nlbins=0 + lg_tiny_maxclass='"NA"' + nbins=0 + + # Tiny size classes. + ndelta=0 + index=0 + lg_grp=${lg_t} + lg_delta=${lg_grp} + while [ ${lg_grp} -lt ${lg_q} ] ; do + size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} + if [ ${lg_delta_lookup} != "no" ] ; then + nlbins=$((${index} + 1)) + fi + if [ ${bin} != "no" ] ; then + nbins=$((${index} + 1)) + fi + ntbins=$((${ntbins} + 1)) + lg_tiny_maxclass=${lg_grp} # Final written value is correct. + index=$((${index} + 1)) + lg_delta=${lg_grp} + lg_grp=$((${lg_grp} + 1)) + done + + # First non-tiny group. + if [ ${ntbins} -gt 0 ] ; then + sep_line + # The first size class has an unusual encoding, because the size has to be + # split between grp and delta*ndelta. + lg_grp=$((${lg_grp} - 1)) + ndelta=1 + size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} + index=$((${index} + 1)) + lg_grp=$((${lg_grp} + 1)) + lg_delta=$((${lg_delta} + 1)) + fi + while [ ${ndelta} -lt ${g} ] ; do + size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} + index=$((${index} + 1)) + ndelta=$((${ndelta} + 1)) + done + + # All remaining groups. + lg_grp=$((${lg_grp} + ${lg_g})) + while [ ${lg_grp} -lt ${ptr_bits} ] ; do + sep_line + ndelta=1 + if [ ${lg_grp} -eq $((${ptr_bits} - 1)) ] ; then + ndelta_limit=$((${g} - 1)) + else + ndelta_limit=${g} + fi + while [ ${ndelta} -le ${ndelta_limit} ] ; do + size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} + if [ ${lg_delta_lookup} != "no" ] ; then + nlbins=$((${index} + 1)) + # Final written value is correct: + lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" + fi + if [ ${bin} != "no" ] ; then + nbins=$((${index} + 1)) + # Final written value is correct: + small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" + if [ ${lg_g} -gt 0 ] ; then + lg_large_minclass=$((${lg_grp} + 1)) + else + lg_large_minclass=$((${lg_grp} + 2)) + fi + fi + # Final written value is correct: + huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" + index=$((${index} + 1)) + ndelta=$((${ndelta} + 1)) + done + lg_grp=$((${lg_grp} + 1)) + lg_delta=$((${lg_delta} + 1)) + done + echo + nsizes=${index} + + # Defined upon completion: + # - ntbins + # - nlbins + # - nbins + # - nsizes + # - lg_tiny_maxclass + # - lookup_maxclass + # - small_maxclass + # - lg_large_minclass + # - huge_maxclass +} + +cat < 255) +# error "Too many small size classes" +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ +EOF diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/stats.h b/redis.submodule/jemalloc/include/jemalloc/internal/stats.h new file mode 100644 index 0000000..c91dba9 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/stats.h @@ -0,0 +1,183 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct tcache_bin_stats_s tcache_bin_stats_t; +typedef struct malloc_bin_stats_s malloc_bin_stats_t; +typedef struct malloc_large_stats_s malloc_large_stats_t; +typedef struct malloc_huge_stats_s malloc_huge_stats_t; +typedef struct arena_stats_s arena_stats_t; +typedef struct chunk_stats_s chunk_stats_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct tcache_bin_stats_s { + /* + * Number of allocation requests that corresponded to the size of this + * bin. + */ + uint64_t nrequests; +}; + +struct malloc_bin_stats_s { + /* + * Total number of allocation/deallocation requests served directly by + * the bin. Note that tcache may allocate an object, then recycle it + * many times, resulting many increments to nrequests, but only one + * each to nmalloc and ndalloc. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* + * Number of allocation requests that correspond to the size of this + * bin. This includes requests served by tcache, though tcache only + * periodically merges into this counter. + */ + uint64_t nrequests; + + /* + * Current number of regions of this size class, including regions + * currently cached by tcache. + */ + size_t curregs; + + /* Number of tcache fills from this bin. */ + uint64_t nfills; + + /* Number of tcache flushes to this bin. */ + uint64_t nflushes; + + /* Total number of runs created for this bin's size class. */ + uint64_t nruns; + + /* + * Total number of runs reused by extracting them from the runs tree for + * this bin's size class. + */ + uint64_t reruns; + + /* Current number of runs in this bin. */ + size_t curruns; +}; + +struct malloc_large_stats_s { + /* + * Total number of allocation/deallocation requests served directly by + * the arena. Note that tcache may allocate an object, then recycle it + * many times, resulting many increments to nrequests, but only one + * each to nmalloc and ndalloc. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* + * Number of allocation requests that correspond to this size class. + * This includes requests served by tcache, though tcache only + * periodically merges into this counter. + */ + uint64_t nrequests; + + /* + * Current number of runs of this size class, including runs currently + * cached by tcache. + */ + size_t curruns; +}; + +struct malloc_huge_stats_s { + /* + * Total number of allocation/deallocation requests served directly by + * the arena. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* Current number of (multi-)chunk allocations of this size class. */ + size_t curhchunks; +}; + +struct arena_stats_s { + /* Number of bytes currently mapped. */ + size_t mapped; + + /* + * Total number of purge sweeps, total number of madvise calls made, + * and total pages purged in order to keep dirty unused memory under + * control. + */ + uint64_t npurge; + uint64_t nmadvise; + uint64_t purged; + + /* + * Number of bytes currently mapped purely for metadata purposes, and + * number of bytes currently allocated for internal metadata. + */ + size_t metadata_mapped; + size_t metadata_allocated; /* Protected via atomic_*_z(). */ + + /* Per-size-category statistics. */ + size_t allocated_large; + uint64_t nmalloc_large; + uint64_t ndalloc_large; + uint64_t nrequests_large; + + size_t allocated_huge; + uint64_t nmalloc_huge; + uint64_t ndalloc_huge; + + /* One element for each large size class. */ + malloc_large_stats_t *lstats; + + /* One element for each huge size class. */ + malloc_huge_stats_t *hstats; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern bool opt_stats_print; + +extern size_t stats_cactive; + +void stats_print(void (*write)(void *, const char *), void *cbopaque, + const char *opts); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +size_t stats_cactive_get(void); +void stats_cactive_add(size_t size); +void stats_cactive_sub(size_t size); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) +JEMALLOC_INLINE size_t +stats_cactive_get(void) +{ + + return (atomic_read_z(&stats_cactive)); +} + +JEMALLOC_INLINE void +stats_cactive_add(size_t size) +{ + + atomic_add_z(&stats_cactive, size); +} + +JEMALLOC_INLINE void +stats_cactive_sub(size_t size) +{ + + atomic_sub_z(&stats_cactive, size); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/tcache.h b/redis.submodule/jemalloc/include/jemalloc/internal/tcache.h new file mode 100644 index 0000000..5079cd2 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/tcache.h @@ -0,0 +1,426 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct tcache_bin_info_s tcache_bin_info_t; +typedef struct tcache_bin_s tcache_bin_t; +typedef struct tcache_s tcache_t; +typedef struct tcaches_s tcaches_t; + +/* + * tcache pointers close to NULL are used to encode state information that is + * used for two purposes: preventing thread caching on a per thread basis and + * cleaning up during thread shutdown. + */ +#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) +#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) +#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) +#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY + +/* + * Absolute minimum number of cache slots for each small bin. + */ +#define TCACHE_NSLOTS_SMALL_MIN 20 + +/* + * Absolute maximum number of cache slots for each small bin in the thread + * cache. This is an additional constraint beyond that imposed as: twice the + * number of regions per run for this size class. + * + * This constant must be an even number. + */ +#define TCACHE_NSLOTS_SMALL_MAX 200 + +/* Number of cache slots for large size classes. */ +#define TCACHE_NSLOTS_LARGE 20 + +/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ +#define LG_TCACHE_MAXCLASS_DEFAULT 15 + +/* + * TCACHE_GC_SWEEP is the approximate number of allocation events between + * full GC sweeps. Integer rounding may cause the actual number to be + * slightly higher, since GC is performed incrementally. + */ +#define TCACHE_GC_SWEEP 8192 + +/* Number of tcache allocation/deallocation events between incremental GCs. */ +#define TCACHE_GC_INCR \ + ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +typedef enum { + tcache_enabled_false = 0, /* Enable cast to/from bool. */ + tcache_enabled_true = 1, + tcache_enabled_default = 2 +} tcache_enabled_t; + +/* + * Read-only information associated with each element of tcache_t's tbins array + * is stored separately, mainly to reduce memory usage. + */ +struct tcache_bin_info_s { + unsigned ncached_max; /* Upper limit on ncached. */ +}; + +struct tcache_bin_s { + tcache_bin_stats_t tstats; + int low_water; /* Min # cached since last GC. */ + unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ + unsigned ncached; /* # of cached objects. */ + void **avail; /* Stack of available objects. */ +}; + +struct tcache_s { + ql_elm(tcache_t) link; /* Used for aggregating stats. */ + uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */ + unsigned ev_cnt; /* Event count since incremental GC. */ + szind_t next_gc_bin; /* Next bin to GC. */ + tcache_bin_t tbins[1]; /* Dynamically sized. */ + /* + * The pointer stacks associated with tbins follow as a contiguous + * array. During tcache initialization, the avail pointer in each + * element of tbins is initialized to point to the proper offset within + * this array. + */ +}; + +/* Linkage for list of available (previously used) explicit tcache IDs. */ +struct tcaches_s { + union { + tcache_t *tcache; + tcaches_t *next; + }; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern bool opt_tcache; +extern ssize_t opt_lg_tcache_max; + +extern tcache_bin_info_t *tcache_bin_info; + +/* + * Number of tcache bins. There are NBINS small-object bins, plus 0 or more + * large-object bins. + */ +extern size_t nhbins; + +/* Maximum cached size class. */ +extern size_t tcache_maxclass; + +/* + * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and + * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are + * completely disjoint from this data structure. tcaches starts off as a sparse + * array, so it has no physical memory footprint until individual pages are + * touched. This allows the entire array to be allocated the first time an + * explicit tcache is created without a disproportionate impact on memory usage. + */ +extern tcaches_t *tcaches; + +size_t tcache_salloc(const void *ptr); +void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); +void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, szind_t binind); +void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, + szind_t binind, unsigned rem); +void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache); +void tcache_arena_associate(tcache_t *tcache, arena_t *arena); +void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, + arena_t *newarena); +void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena); +tcache_t *tcache_get_hard(tsd_t *tsd); +tcache_t *tcache_create(tsd_t *tsd, arena_t *arena); +void tcache_cleanup(tsd_t *tsd); +void tcache_enabled_cleanup(tsd_t *tsd); +void tcache_stats_merge(tcache_t *tcache, arena_t *arena); +bool tcaches_create(tsd_t *tsd, unsigned *r_ind); +void tcaches_flush(tsd_t *tsd, unsigned ind); +void tcaches_destroy(tsd_t *tsd, unsigned ind); +bool tcache_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void tcache_event(tsd_t *tsd, tcache_t *tcache); +void tcache_flush(void); +bool tcache_enabled_get(void); +tcache_t *tcache_get(tsd_t *tsd, bool create); +void tcache_enabled_set(bool enabled); +void *tcache_alloc_easy(tcache_bin_t *tbin); +void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + size_t size, bool zero); +void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + size_t size, bool zero); +void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, + szind_t binind); +void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, + size_t size); +tcache_t *tcaches_get(tsd_t *tsd, unsigned ind); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) +JEMALLOC_INLINE void +tcache_flush(void) +{ + tsd_t *tsd; + + cassert(config_tcache); + + tsd = tsd_fetch(); + tcache_cleanup(tsd); +} + +JEMALLOC_INLINE bool +tcache_enabled_get(void) +{ + tsd_t *tsd; + tcache_enabled_t tcache_enabled; + + cassert(config_tcache); + + tsd = tsd_fetch(); + tcache_enabled = tsd_tcache_enabled_get(tsd); + if (tcache_enabled == tcache_enabled_default) { + tcache_enabled = (tcache_enabled_t)opt_tcache; + tsd_tcache_enabled_set(tsd, tcache_enabled); + } + + return ((bool)tcache_enabled); +} + +JEMALLOC_INLINE void +tcache_enabled_set(bool enabled) +{ + tsd_t *tsd; + tcache_enabled_t tcache_enabled; + + cassert(config_tcache); + + tsd = tsd_fetch(); + + tcache_enabled = (tcache_enabled_t)enabled; + tsd_tcache_enabled_set(tsd, tcache_enabled); + + if (!enabled) + tcache_cleanup(tsd); +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcache_get(tsd_t *tsd, bool create) +{ + tcache_t *tcache; + + if (!config_tcache) + return (NULL); + + tcache = tsd_tcache_get(tsd); + if (!create) + return (tcache); + if (unlikely(tcache == NULL) && tsd_nominal(tsd)) { + tcache = tcache_get_hard(tsd); + tsd_tcache_set(tsd, tcache); + } + + return (tcache); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_event(tsd_t *tsd, tcache_t *tcache) +{ + + if (TCACHE_GC_INCR == 0) + return; + + tcache->ev_cnt++; + assert(tcache->ev_cnt <= TCACHE_GC_INCR); + if (unlikely(tcache->ev_cnt == TCACHE_GC_INCR)) + tcache_event_hard(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_easy(tcache_bin_t *tbin) +{ + void *ret; + + if (unlikely(tbin->ncached == 0)) { + tbin->low_water = -1; + return (NULL); + } + tbin->ncached--; + if (unlikely((int)tbin->ncached < tbin->low_water)) + tbin->low_water = tbin->ncached; + ret = tbin->avail[tbin->ncached]; + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + bool zero) +{ + void *ret; + szind_t binind; + size_t usize; + tcache_bin_t *tbin; + + binind = size2index(size); + assert(binind < NBINS); + tbin = &tcache->tbins[binind]; + usize = index2size(binind); + ret = tcache_alloc_easy(tbin); + if (unlikely(ret == NULL)) { + ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind); + if (ret == NULL) + return (NULL); + } + assert(tcache_salloc(ret) == usize); + + if (likely(!zero)) { + if (config_fill) { + if (unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, + &arena_bin_info[binind], false); + } else if (unlikely(opt_zero)) + memset(ret, 0, usize); + } + } else { + if (config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, &arena_bin_info[binind], + true); + } + memset(ret, 0, usize); + } + + if (config_stats) + tbin->tstats.nrequests++; + if (config_prof) + tcache->prof_accumbytes += usize; + tcache_event(tsd, tcache); + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + bool zero) +{ + void *ret; + szind_t binind; + size_t usize; + tcache_bin_t *tbin; + + binind = size2index(size); + usize = index2size(binind); + assert(usize <= tcache_maxclass); + assert(binind < nhbins); + tbin = &tcache->tbins[binind]; + ret = tcache_alloc_easy(tbin); + if (unlikely(ret == NULL)) { + /* + * Only allocate one large object at a time, because it's quite + * expensive to create one and not use it. + */ + ret = arena_malloc_large(arena, usize, zero); + if (ret == NULL) + return (NULL); + } else { + if (config_prof && usize == LARGE_MINCLASS) { + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(ret); + size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> + LG_PAGE); + arena_mapbits_large_binind_set(chunk, pageind, + BININD_INVALID); + } + if (likely(!zero)) { + if (config_fill) { + if (unlikely(opt_junk_alloc)) + memset(ret, 0xa5, usize); + else if (unlikely(opt_zero)) + memset(ret, 0, usize); + } + } else + memset(ret, 0, usize); + + if (config_stats) + tbin->tstats.nrequests++; + if (config_prof) + tcache->prof_accumbytes += usize; + } + + tcache_event(tsd, tcache); + return (ret); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind) +{ + tcache_bin_t *tbin; + tcache_bin_info_t *tbin_info; + + assert(tcache_salloc(ptr) <= SMALL_MAXCLASS); + + if (config_fill && unlikely(opt_junk_free)) + arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); + + tbin = &tcache->tbins[binind]; + tbin_info = &tcache_bin_info[binind]; + if (unlikely(tbin->ncached == tbin_info->ncached_max)) { + tcache_bin_flush_small(tsd, tcache, tbin, binind, + (tbin_info->ncached_max >> 1)); + } + assert(tbin->ncached < tbin_info->ncached_max); + tbin->avail[tbin->ncached] = ptr; + tbin->ncached++; + + tcache_event(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size) +{ + szind_t binind; + tcache_bin_t *tbin; + tcache_bin_info_t *tbin_info; + + assert((size & PAGE_MASK) == 0); + assert(tcache_salloc(ptr) > SMALL_MAXCLASS); + assert(tcache_salloc(ptr) <= tcache_maxclass); + + binind = size2index(size); + + if (config_fill && unlikely(opt_junk_free)) + arena_dalloc_junk_large(ptr, size); + + tbin = &tcache->tbins[binind]; + tbin_info = &tcache_bin_info[binind]; + if (unlikely(tbin->ncached == tbin_info->ncached_max)) { + tcache_bin_flush_large(tsd, tbin, binind, + (tbin_info->ncached_max >> 1), tcache); + } + assert(tbin->ncached < tbin_info->ncached_max); + tbin->avail[tbin->ncached] = ptr; + tbin->ncached++; + + tcache_event(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcaches_get(tsd_t *tsd, unsigned ind) +{ + tcaches_t *elm = &tcaches[ind]; + if (unlikely(elm->tcache == NULL)) + elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL)); + return (elm->tcache); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/tsd.h b/redis.submodule/jemalloc/include/jemalloc/internal/tsd.h new file mode 100644 index 0000000..eed7aa0 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/tsd.h @@ -0,0 +1,665 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* Maximum number of malloc_tsd users with cleanup functions. */ +#define MALLOC_TSD_CLEANUPS_MAX 2 + +typedef bool (*malloc_tsd_cleanup_t)(void); + +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +typedef struct tsd_init_block_s tsd_init_block_t; +typedef struct tsd_init_head_s tsd_init_head_t; +#endif + +typedef struct tsd_s tsd_t; + +typedef enum { + tsd_state_uninitialized, + tsd_state_nominal, + tsd_state_purgatory, + tsd_state_reincarnated +} tsd_state_t; + +/* + * TLS/TSD-agnostic macro-based implementation of thread-specific data. There + * are five macros that support (at least) three use cases: file-private, + * library-private, and library-private inlined. Following is an example + * library-private tsd variable: + * + * In example.h: + * typedef struct { + * int x; + * int y; + * } example_t; + * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) + * malloc_tsd_types(example_, example_t) + * malloc_tsd_protos(, example_, example_t) + * malloc_tsd_externs(example_, example_t) + * In example.c: + * malloc_tsd_data(, example_, example_t, EX_INITIALIZER) + * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER, + * example_tsd_cleanup) + * + * The result is a set of generated functions, e.g.: + * + * bool example_tsd_boot(void) {...} + * example_t *example_tsd_get() {...} + * void example_tsd_set(example_t *val) {...} + * + * Note that all of the functions deal in terms of (a_type *) rather than + * (a_type) so that it is possible to support non-pointer types (unlike + * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is + * cast to (void *). This means that the cleanup function needs to cast the + * function argument to (a_type *), then dereference the resulting pointer to + * access fields, e.g. + * + * void + * example_tsd_cleanup(void *arg) + * { + * example_t *example = (example_t *)arg; + * + * example->x = 42; + * [...] + * if ([want the cleanup function to be called again]) + * example_tsd_set(example); + * } + * + * If example_tsd_set() is called within example_tsd_cleanup(), it will be + * called again. This is similar to how pthreads TSD destruction works, except + * that pthreads only calls the cleanup function again if the value was set to + * non-NULL. + */ + +/* malloc_tsd_types(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_types(a_name, a_type) +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_types(a_name, a_type) +#elif (defined(_WIN32)) +#define malloc_tsd_types(a_name, a_type) \ +typedef struct { \ + bool initialized; \ + a_type val; \ +} a_name##tsd_wrapper_t; +#else +#define malloc_tsd_types(a_name, a_type) \ +typedef struct { \ + bool initialized; \ + a_type val; \ +} a_name##tsd_wrapper_t; +#endif + +/* malloc_tsd_protos(). */ +#define malloc_tsd_protos(a_attr, a_name, a_type) \ +a_attr bool \ +a_name##tsd_boot0(void); \ +a_attr void \ +a_name##tsd_boot1(void); \ +a_attr bool \ +a_name##tsd_boot(void); \ +a_attr a_type * \ +a_name##tsd_get(void); \ +a_attr void \ +a_name##tsd_set(a_type *val); + +/* malloc_tsd_externs(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_externs(a_name, a_type) \ +extern __thread a_type a_name##tsd_tls; \ +extern __thread bool a_name##tsd_initialized; \ +extern bool a_name##tsd_booted; +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_externs(a_name, a_type) \ +extern __thread a_type a_name##tsd_tls; \ +extern pthread_key_t a_name##tsd_tsd; \ +extern bool a_name##tsd_booted; +#elif (defined(_WIN32)) +#define malloc_tsd_externs(a_name, a_type) \ +extern DWORD a_name##tsd_tsd; \ +extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ +extern bool a_name##tsd_booted; +#else +#define malloc_tsd_externs(a_name, a_type) \ +extern pthread_key_t a_name##tsd_tsd; \ +extern tsd_init_head_t a_name##tsd_init_head; \ +extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ +extern bool a_name##tsd_booted; +#endif + +/* malloc_tsd_data(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr __thread a_type JEMALLOC_TLS_MODEL \ + a_name##tsd_tls = a_initializer; \ +a_attr __thread bool JEMALLOC_TLS_MODEL \ + a_name##tsd_initialized = false; \ +a_attr bool a_name##tsd_booted = false; +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr __thread a_type JEMALLOC_TLS_MODEL \ + a_name##tsd_tls = a_initializer; \ +a_attr pthread_key_t a_name##tsd_tsd; \ +a_attr bool a_name##tsd_booted = false; +#elif (defined(_WIN32)) +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr DWORD a_name##tsd_tsd; \ +a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ + false, \ + a_initializer \ +}; \ +a_attr bool a_name##tsd_booted = false; +#else +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr pthread_key_t a_name##tsd_tsd; \ +a_attr tsd_init_head_t a_name##tsd_init_head = { \ + ql_head_initializer(blocks), \ + MALLOC_MUTEX_INITIALIZER \ +}; \ +a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ + false, \ + a_initializer \ +}; \ +a_attr bool a_name##tsd_booted = false; +#endif + +/* malloc_tsd_funcs(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr bool \ +a_name##tsd_cleanup_wrapper(void) \ +{ \ + \ + if (a_name##tsd_initialized) { \ + a_name##tsd_initialized = false; \ + a_cleanup(&a_name##tsd_tls); \ + } \ + return (a_name##tsd_initialized); \ +} \ +a_attr bool \ +a_name##tsd_boot0(void) \ +{ \ + \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + malloc_tsd_cleanup_register( \ + &a_name##tsd_cleanup_wrapper); \ + } \ + a_name##tsd_booted = true; \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_boot1(void) \ +{ \ + \ + /* Do nothing. */ \ +} \ +a_attr bool \ +a_name##tsd_boot(void) \ +{ \ + \ + return (a_name##tsd_boot0()); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##tsd_get(void) \ +{ \ + \ + assert(a_name##tsd_booted); \ + return (&a_name##tsd_tls); \ +} \ +a_attr void \ +a_name##tsd_set(a_type *val) \ +{ \ + \ + assert(a_name##tsd_booted); \ + a_name##tsd_tls = (*val); \ + if (a_cleanup != malloc_tsd_no_cleanup) \ + a_name##tsd_initialized = true; \ +} +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr bool \ +a_name##tsd_boot0(void) \ +{ \ + \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \ + 0) \ + return (true); \ + } \ + a_name##tsd_booted = true; \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_boot1(void) \ +{ \ + \ + /* Do nothing. */ \ +} \ +a_attr bool \ +a_name##tsd_boot(void) \ +{ \ + \ + return (a_name##tsd_boot0()); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##tsd_get(void) \ +{ \ + \ + assert(a_name##tsd_booted); \ + return (&a_name##tsd_tls); \ +} \ +a_attr void \ +a_name##tsd_set(a_type *val) \ +{ \ + \ + assert(a_name##tsd_booted); \ + a_name##tsd_tls = (*val); \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + if (pthread_setspecific(a_name##tsd_tsd, \ + (void *)(&a_name##tsd_tls))) { \ + malloc_write(": Error" \ + " setting TSD for "#a_name"\n"); \ + if (opt_abort) \ + abort(); \ + } \ + } \ +} +#elif (defined(_WIN32)) +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr bool \ +a_name##tsd_cleanup_wrapper(void) \ +{ \ + DWORD error = GetLastError(); \ + a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ + TlsGetValue(a_name##tsd_tsd); \ + SetLastError(error); \ + \ + if (wrapper == NULL) \ + return (false); \ + if (a_cleanup != malloc_tsd_no_cleanup && \ + wrapper->initialized) { \ + wrapper->initialized = false; \ + a_cleanup(&wrapper->val); \ + if (wrapper->initialized) { \ + /* Trigger another cleanup round. */ \ + return (true); \ + } \ + } \ + malloc_tsd_dalloc(wrapper); \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ +{ \ + \ + if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \ + malloc_write(": Error setting" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ +} \ +a_attr a_name##tsd_wrapper_t * \ +a_name##tsd_wrapper_get(void) \ +{ \ + DWORD error = GetLastError(); \ + a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ + TlsGetValue(a_name##tsd_tsd); \ + SetLastError(error); \ + \ + if (unlikely(wrapper == NULL)) { \ + wrapper = (a_name##tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } else { \ + wrapper->initialized = false; \ + wrapper->val = a_initializer; \ + } \ + a_name##tsd_wrapper_set(wrapper); \ + } \ + return (wrapper); \ +} \ +a_attr bool \ +a_name##tsd_boot0(void) \ +{ \ + \ + a_name##tsd_tsd = TlsAlloc(); \ + if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \ + return (true); \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + malloc_tsd_cleanup_register( \ + &a_name##tsd_cleanup_wrapper); \ + } \ + a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ + a_name##tsd_booted = true; \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_boot1(void) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + wrapper = (a_name##tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ + memcpy(wrapper, &a_name##tsd_boot_wrapper, \ + sizeof(a_name##tsd_wrapper_t)); \ + a_name##tsd_wrapper_set(wrapper); \ +} \ +a_attr bool \ +a_name##tsd_boot(void) \ +{ \ + \ + if (a_name##tsd_boot0()) \ + return (true); \ + a_name##tsd_boot1(); \ + return (false); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##tsd_get(void) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + \ + assert(a_name##tsd_booted); \ + wrapper = a_name##tsd_wrapper_get(); \ + return (&wrapper->val); \ +} \ +a_attr void \ +a_name##tsd_set(a_type *val) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + \ + assert(a_name##tsd_booted); \ + wrapper = a_name##tsd_wrapper_get(); \ + wrapper->val = *(val); \ + if (a_cleanup != malloc_tsd_no_cleanup) \ + wrapper->initialized = true; \ +} +#else +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr void \ +a_name##tsd_cleanup_wrapper(void *arg) \ +{ \ + a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \ + \ + if (a_cleanup != malloc_tsd_no_cleanup && \ + wrapper->initialized) { \ + wrapper->initialized = false; \ + a_cleanup(&wrapper->val); \ + if (wrapper->initialized) { \ + /* Trigger another cleanup round. */ \ + if (pthread_setspecific(a_name##tsd_tsd, \ + (void *)wrapper)) { \ + malloc_write(": Error" \ + " setting TSD for "#a_name"\n"); \ + if (opt_abort) \ + abort(); \ + } \ + return; \ + } \ + } \ + malloc_tsd_dalloc(wrapper); \ +} \ +a_attr void \ +a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ +{ \ + \ + if (pthread_setspecific(a_name##tsd_tsd, \ + (void *)wrapper)) { \ + malloc_write(": Error setting" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ +} \ +a_attr a_name##tsd_wrapper_t * \ +a_name##tsd_wrapper_get(void) \ +{ \ + a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ + pthread_getspecific(a_name##tsd_tsd); \ + \ + if (unlikely(wrapper == NULL)) { \ + tsd_init_block_t block; \ + wrapper = tsd_init_check_recursion( \ + &a_name##tsd_init_head, &block); \ + if (wrapper) \ + return (wrapper); \ + wrapper = (a_name##tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ + block.data = wrapper; \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } else { \ + wrapper->initialized = false; \ + wrapper->val = a_initializer; \ + } \ + a_name##tsd_wrapper_set(wrapper); \ + tsd_init_finish(&a_name##tsd_init_head, &block); \ + } \ + return (wrapper); \ +} \ +a_attr bool \ +a_name##tsd_boot0(void) \ +{ \ + \ + if (pthread_key_create(&a_name##tsd_tsd, \ + a_name##tsd_cleanup_wrapper) != 0) \ + return (true); \ + a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ + a_name##tsd_booted = true; \ + return (false); \ +} \ +a_attr void \ +a_name##tsd_boot1(void) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + wrapper = (a_name##tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ + if (wrapper == NULL) { \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + abort(); \ + } \ + memcpy(wrapper, &a_name##tsd_boot_wrapper, \ + sizeof(a_name##tsd_wrapper_t)); \ + a_name##tsd_wrapper_set(wrapper); \ +} \ +a_attr bool \ +a_name##tsd_boot(void) \ +{ \ + \ + if (a_name##tsd_boot0()) \ + return (true); \ + a_name##tsd_boot1(); \ + return (false); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##tsd_get(void) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + \ + assert(a_name##tsd_booted); \ + wrapper = a_name##tsd_wrapper_get(); \ + return (&wrapper->val); \ +} \ +a_attr void \ +a_name##tsd_set(a_type *val) \ +{ \ + a_name##tsd_wrapper_t *wrapper; \ + \ + assert(a_name##tsd_booted); \ + wrapper = a_name##tsd_wrapper_get(); \ + wrapper->val = *(val); \ + if (a_cleanup != malloc_tsd_no_cleanup) \ + wrapper->initialized = true; \ +} +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +struct tsd_init_block_s { + ql_elm(tsd_init_block_t) link; + pthread_t thread; + void *data; +}; +struct tsd_init_head_s { + ql_head(tsd_init_block_t) blocks; + malloc_mutex_t lock; +}; +#endif + +#define MALLOC_TSD \ +/* O(name, type) */ \ + O(tcache, tcache_t *) \ + O(thread_allocated, uint64_t) \ + O(thread_deallocated, uint64_t) \ + O(prof_tdata, prof_tdata_t *) \ + O(arena, arena_t *) \ + O(arenas_cache, arena_t **) \ + O(narenas_cache, unsigned) \ + O(arenas_cache_bypass, bool) \ + O(tcache_enabled, tcache_enabled_t) \ + O(quarantine, quarantine_t *) \ + +#define TSD_INITIALIZER { \ + tsd_state_uninitialized, \ + NULL, \ + 0, \ + 0, \ + NULL, \ + NULL, \ + NULL, \ + 0, \ + false, \ + tcache_enabled_default, \ + NULL \ +} + +struct tsd_s { + tsd_state_t state; +#define O(n, t) \ + t n; +MALLOC_TSD +#undef O +}; + +static const tsd_t tsd_initializer = TSD_INITIALIZER; + +malloc_tsd_types(, tsd_t) + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *malloc_tsd_malloc(size_t size); +void malloc_tsd_dalloc(void *wrapper); +void malloc_tsd_no_cleanup(void *arg); +void malloc_tsd_cleanup_register(bool (*f)(void)); +bool malloc_tsd_boot0(void); +void malloc_tsd_boot1(void); +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +void *tsd_init_check_recursion(tsd_init_head_t *head, + tsd_init_block_t *block); +void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); +#endif +void tsd_cleanup(void *arg); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t) + +tsd_t *tsd_fetch(void); +bool tsd_nominal(tsd_t *tsd); +#define O(n, t) \ +t *tsd_##n##p_get(tsd_t *tsd); \ +t tsd_##n##_get(tsd_t *tsd); \ +void tsd_##n##_set(tsd_t *tsd, t n); +MALLOC_TSD +#undef O +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_)) +malloc_tsd_externs(, tsd_t) +malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup) + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch(void) +{ + tsd_t *tsd = tsd_get(); + + if (unlikely(tsd->state != tsd_state_nominal)) { + if (tsd->state == tsd_state_uninitialized) { + tsd->state = tsd_state_nominal; + /* Trigger cleanup handler registration. */ + tsd_set(tsd); + } else if (tsd->state == tsd_state_purgatory) { + tsd->state = tsd_state_reincarnated; + tsd_set(tsd); + } else + assert(tsd->state == tsd_state_reincarnated); + } + + return (tsd); +} + +JEMALLOC_INLINE bool +tsd_nominal(tsd_t *tsd) +{ + + return (tsd->state == tsd_state_nominal); +} + +#define O(n, t) \ +JEMALLOC_ALWAYS_INLINE t * \ +tsd_##n##p_get(tsd_t *tsd) \ +{ \ + \ + return (&tsd->n); \ +} \ + \ +JEMALLOC_ALWAYS_INLINE t \ +tsd_##n##_get(tsd_t *tsd) \ +{ \ + \ + return (*tsd_##n##p_get(tsd)); \ +} \ + \ +JEMALLOC_ALWAYS_INLINE void \ +tsd_##n##_set(tsd_t *tsd, t n) \ +{ \ + \ + assert(tsd->state == tsd_state_nominal); \ + tsd->n = n; \ +} +MALLOC_TSD +#undef O +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/util.h b/redis.submodule/jemalloc/include/jemalloc/internal/util.h new file mode 100644 index 0000000..b2ea740 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/util.h @@ -0,0 +1,314 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#ifdef _WIN32 +# ifdef _WIN64 +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "ll" +# else +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "" +# endif +# define FMTd32 "d" +# define FMTu32 "u" +# define FMTx32 "x" +# define FMTd64 FMT64_PREFIX "d" +# define FMTu64 FMT64_PREFIX "u" +# define FMTx64 FMT64_PREFIX "x" +# define FMTdPTR FMTPTR_PREFIX "d" +# define FMTuPTR FMTPTR_PREFIX "u" +# define FMTxPTR FMTPTR_PREFIX "x" +#else +# include +# define FMTd32 PRId32 +# define FMTu32 PRIu32 +# define FMTx32 PRIx32 +# define FMTd64 PRId64 +# define FMTu64 PRIu64 +# define FMTx64 PRIx64 +# define FMTdPTR PRIdPTR +# define FMTuPTR PRIuPTR +# define FMTxPTR PRIxPTR +#endif + +/* Size of stack-allocated buffer passed to buferror(). */ +#define BUFERROR_BUF 64 + +/* + * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be + * large enough for all possible uses within jemalloc. + */ +#define MALLOC_PRINTF_BUFSIZE 4096 + +/* + * Wrap a cpp argument that contains commas such that it isn't broken up into + * multiple arguments. + */ +#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ + +/* + * Silence compiler warnings due to uninitialized values. This is used + * wherever the compiler fails to recognize that the variable is never used + * uninitialized. + */ +#ifdef JEMALLOC_CC_SILENCE +# define JEMALLOC_CC_SILENCE_INIT(v) = v +#else +# define JEMALLOC_CC_SILENCE_INIT(v) +#endif + +#define JEMALLOC_GNUC_PREREQ(major, minor) \ + (!defined(__clang__) && \ + (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))) +#ifndef __has_builtin +# define __has_builtin(builtin) (0) +#endif +#define JEMALLOC_CLANG_HAS_BUILTIN(builtin) \ + (defined(__clang__) && __has_builtin(builtin)) + +#ifdef __GNUC__ +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +# if JEMALLOC_GNUC_PREREQ(4, 6) || \ + JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable) +# define unreachable() __builtin_unreachable() +# else +# define unreachable() +# endif +#else +# define likely(x) !!(x) +# define unlikely(x) !!(x) +# define unreachable() +#endif + +/* + * Define a custom assert() in order to reduce the chances of deadlock during + * assertion failure. + */ +#ifndef assert +#define assert(e) do { \ + if (unlikely(config_debug && !(e))) { \ + malloc_printf( \ + ": %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef not_reached +#define not_reached() do { \ + if (config_debug) { \ + malloc_printf( \ + ": %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ + unreachable(); \ +} while (0) +#endif + +#ifndef not_implemented +#define not_implemented() do { \ + if (config_debug) { \ + malloc_printf(": %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef assert_not_implemented +#define assert_not_implemented(e) do { \ + if (unlikely(config_debug && !(e))) \ + not_implemented(); \ +} while (0) +#endif + +/* Use to assert a particular configuration, e.g., cassert(config_debug). */ +#define cassert(c) do { \ + if (unlikely(!(c))) \ + not_reached(); \ +} while (0) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +int buferror(int err, char *buf, size_t buflen); +uintmax_t malloc_strtoumax(const char *restrict nptr, + char **restrict endptr, int base); +void malloc_write(const char *s); + +/* + * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating + * point math. + */ +int malloc_vsnprintf(char *str, size_t size, const char *format, + va_list ap); +int malloc_snprintf(char *str, size_t size, const char *format, ...) + JEMALLOC_FORMAT_PRINTF(3, 4); +void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap); +void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, + const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); +void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +int jemalloc_ffsl(long bitmap); +int jemalloc_ffs(int bitmap); +size_t pow2_ceil(size_t x); +size_t lg_floor(size_t x); +void set_errno(int errnum); +int get_errno(void); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) + +/* Sanity check. */ +#if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS) +# error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure +#endif + +JEMALLOC_ALWAYS_INLINE int +jemalloc_ffsl(long bitmap) +{ + + return (JEMALLOC_INTERNAL_FFSL(bitmap)); +} + +JEMALLOC_ALWAYS_INLINE int +jemalloc_ffs(int bitmap) +{ + + return (JEMALLOC_INTERNAL_FFS(bitmap)); +} + +/* Compute the smallest power of 2 that is >= x. */ +JEMALLOC_INLINE size_t +pow2_ceil(size_t x) +{ + + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; +#if (LG_SIZEOF_PTR == 3) + x |= x >> 32; +#endif + x++; + return (x); +} + +#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE size_t +lg_floor(size_t x) +{ + size_t ret; + + assert(x != 0); + + asm ("bsr %1, %0" + : "=r"(ret) // Outputs. + : "r"(x) // Inputs. + ); + return (ret); +} +#elif (defined(_MSC_VER)) +JEMALLOC_INLINE size_t +lg_floor(size_t x) +{ + unsigned long ret; + + assert(x != 0); + +#if (LG_SIZEOF_PTR == 3) + _BitScanReverse64(&ret, x); +#elif (LG_SIZEOF_PTR == 2) + _BitScanReverse(&ret, x); +#else +# error "Unsupported type sizes for lg_floor()" +#endif + return (ret); +} +#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) +JEMALLOC_INLINE size_t +lg_floor(size_t x) +{ + + assert(x != 0); + +#if (LG_SIZEOF_PTR == LG_SIZEOF_INT) + return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x)); +#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) + return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x)); +#else +# error "Unsupported type sizes for lg_floor()" +#endif +} +#else +JEMALLOC_INLINE size_t +lg_floor(size_t x) +{ + + assert(x != 0); + + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); +#if (LG_SIZEOF_PTR == 3 && LG_SIZEOF_PTR == LG_SIZEOF_LONG) + x |= (x >> 32); + if (x == KZU(0xffffffffffffffff)) + return (63); + x++; + return (jemalloc_ffsl(x) - 2); +#elif (LG_SIZEOF_PTR == 2) + if (x == KZU(0xffffffff)) + return (31); + x++; + return (jemalloc_ffs(x) - 2); +#else +# error "Unsupported type sizes for lg_floor()" +#endif +} +#endif + +/* Set error code. */ +JEMALLOC_INLINE void +set_errno(int errnum) +{ + +#ifdef _WIN32 + SetLastError(errnum); +#else + errno = errnum; +#endif +} + +/* Get last error code. */ +JEMALLOC_INLINE int +get_errno(void) +{ + +#ifdef _WIN32 + return (GetLastError()); +#else + return (errno); +#endif +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/include/jemalloc/internal/valgrind.h b/redis.submodule/jemalloc/include/jemalloc/internal/valgrind.h new file mode 100644 index 0000000..a3380df --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/internal/valgrind.h @@ -0,0 +1,112 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#ifdef JEMALLOC_VALGRIND +#include + +/* + * The size that is reported to Valgrind must be consistent through a chain of + * malloc..realloc..realloc calls. Request size isn't recorded anywhere in + * jemalloc, so it is critical that all callers of these macros provide usize + * rather than request size. As a result, buffer overflow detection is + * technically weakened for the standard API, though it is generally accepted + * practice to consider any extra bytes reported by malloc_usable_size() as + * usable space. + */ +#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \ + if (unlikely(in_valgrind)) \ + valgrind_make_mem_noaccess(ptr, usize); \ +} while (0) +#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \ + if (unlikely(in_valgrind)) \ + valgrind_make_mem_undefined(ptr, usize); \ +} while (0) +#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \ + if (unlikely(in_valgrind)) \ + valgrind_make_mem_defined(ptr, usize); \ +} while (0) +/* + * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro + * calls must be embedded in macros rather than in functions so that when + * Valgrind reports errors, there are no extra stack frames in the backtraces. + */ +#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ + if (unlikely(in_valgrind && cond)) \ + VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ +} while (0) +#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \ + ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ + zero) do { \ + if (unlikely(in_valgrind)) { \ + size_t rzsize = p2rz(ptr); \ + \ + if (!maybe_moved || ptr == old_ptr) { \ + VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ + usize, rzsize); \ + if (zero && old_usize < usize) { \ + valgrind_make_mem_defined( \ + (void *)((uintptr_t)ptr + \ + old_usize), usize - old_usize); \ + } \ + } else { \ + if (!old_ptr_maybe_null || old_ptr != NULL) { \ + valgrind_freelike_block(old_ptr, \ + old_rzsize); \ + } \ + if (!ptr_maybe_null || ptr != NULL) { \ + size_t copy_size = (old_usize < usize) \ + ? old_usize : usize; \ + size_t tail_size = usize - copy_size; \ + VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ + rzsize, false); \ + if (copy_size > 0) { \ + valgrind_make_mem_defined(ptr, \ + copy_size); \ + } \ + if (zero && tail_size > 0) { \ + valgrind_make_mem_defined( \ + (void *)((uintptr_t)ptr + \ + copy_size), tail_size); \ + } \ + } \ + } \ + } \ +} while (0) +#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ + if (unlikely(in_valgrind)) \ + valgrind_freelike_block(ptr, rzsize); \ +} while (0) +#else +#define RUNNING_ON_VALGRIND ((unsigned)0) +#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0) +#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0) +#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0) +#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0) +#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \ + ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ + zero) do {} while (0) +#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#ifdef JEMALLOC_VALGRIND +void valgrind_make_mem_noaccess(void *ptr, size_t usize); +void valgrind_make_mem_undefined(void *ptr, size_t usize); +void valgrind_make_mem_defined(void *ptr, size_t usize); +void valgrind_freelike_block(void *ptr, size_t usize); +#endif + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/redis.submodule/jemalloc/include/jemalloc/jemalloc.sh b/redis.submodule/jemalloc/include/jemalloc/jemalloc.sh new file mode 100755 index 0000000..c085814 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/jemalloc.sh @@ -0,0 +1,28 @@ +#!/bin/sh + +objroot=$1 + +cat < +#include +#include +#include +#include + +#define JEMALLOC_VERSION "@jemalloc_version@" +#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ +#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ +#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ +#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ +#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" + +# define MALLOCX_LG_ALIGN(la) (la) +# if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) (ffs(a)-1) +# else +# define MALLOCX_ALIGN(a) \ + ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) +# endif +# define MALLOCX_ZERO ((int)0x40) +/* + * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 + * encodes MALLOCX_TCACHE_NONE. + */ +# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) +# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) +/* + * Bias arena index bits so that 0 encodes "use an automatically chosen arena". + */ +# define MALLOCX_ARENA(a) ((int)(((a)+1) << 20)) + +#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) +# define JEMALLOC_CXX_THROW throw() +#else +# define JEMALLOC_CXX_THROW +#endif + +#ifdef JEMALLOC_HAVE_ATTR +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE +# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) +# else +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# endif +# ifndef JEMALLOC_EXPORT +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) +# endif +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) +# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) +# else +# define JEMALLOC_FORMAT_PRINTF(s, i) +# endif +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#elif _MSC_VER +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) __declspec(align(s)) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# ifndef JEMALLOC_EXPORT +# ifdef DLLEXPORT +# define JEMALLOC_EXPORT __declspec(dllexport) +# else +# define JEMALLOC_EXPORT __declspec(dllimport) +# endif +# endif +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE __declspec(noinline) +# ifdef __cplusplus +# define JEMALLOC_NOTHROW __declspec(nothrow) +# else +# define JEMALLOC_NOTHROW +# endif +# define JEMALLOC_SECTION(s) __declspec(allocate(s)) +# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) +# if _MSC_VER >= 1900 && !defined(__EDG__) +# define JEMALLOC_ALLOCATOR __declspec(allocator) +# else +# define JEMALLOC_ALLOCATOR +# endif +#else +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# define JEMALLOC_EXPORT +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE +# define JEMALLOC_NOTHROW +# define JEMALLOC_SECTION(s) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#endif diff --git a/redis.submodule/jemalloc/include/jemalloc/jemalloc_mangle.sh b/redis.submodule/jemalloc/include/jemalloc/jemalloc_mangle.sh new file mode 100755 index 0000000..df328b7 --- /dev/null +++ b/redis.submodule/jemalloc/include/jemalloc/jemalloc_mangle.sh @@ -0,0 +1,45 @@ +#!/bin/sh + +public_symbols_txt=$1 +symbol_prefix=$2 + +cat < + +/* MSVC doesn't define _Bool or bool in C, but does have BOOL */ +/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ +/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as + * a built-in type. */ +#ifndef __clang__ +typedef BOOL _Bool; +#endif + +#define bool _Bool +#define true 1 +#define false 0 + +#define __bool_true_false_are_defined 1 + +#endif /* stdbool_h */ diff --git a/redis.submodule/jemalloc/include/msvc_compat/C99/stdint.h b/redis.submodule/jemalloc/include/msvc_compat/C99/stdint.h new file mode 100644 index 0000000..d02608a --- /dev/null +++ b/redis.submodule/jemalloc/include/msvc_compat/C99/stdint.h @@ -0,0 +1,247 @@ +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_STDINT_H_ // [ +#define _MSC_STDINT_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +#include + +// For Visual Studio 6 in C++ mode and for many Visual Studio versions when +// compiling for ARM we should wrap include with 'extern "C++" {}' +// or compiler give many errors like this: +// error C2733: second C linkage of overloaded function 'wmemchr' not allowed +#ifdef __cplusplus +extern "C" { +#endif +# include +#ifdef __cplusplus +} +#endif + +// Define _W64 macros to mark types changing their size, like intptr_t. +#ifndef _W64 +# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 +# define _W64 __w64 +# else +# define _W64 +# endif +#endif + + +// 7.18.1 Integer types + +// 7.18.1.1 Exact-width integer types + +// Visual Studio 6 and Embedded Visual C++ 4 doesn't +// realize that, e.g. char has the same size as __int8 +// so we give up on __intX for them. +#if (_MSC_VER < 1300) + typedef signed char int8_t; + typedef signed short int16_t; + typedef signed int int32_t; + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; +#else + typedef signed __int8 int8_t; + typedef signed __int16 int16_t; + typedef signed __int32 int32_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; +#endif +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; + + +// 7.18.1.2 Minimum-width integer types +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; + +// 7.18.1.3 Fastest minimum-width integer types +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; + +// 7.18.1.4 Integer types capable of holding object pointers +#ifdef _WIN64 // [ + typedef signed __int64 intptr_t; + typedef unsigned __int64 uintptr_t; +#else // _WIN64 ][ + typedef _W64 signed int intptr_t; + typedef _W64 unsigned int uintptr_t; +#endif // _WIN64 ] + +// 7.18.1.5 Greatest-width integer types +typedef int64_t intmax_t; +typedef uint64_t uintmax_t; + + +// 7.18.2 Limits of specified-width integer types + +#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 + +// 7.18.2.1 Limits of exact-width integer types +#define INT8_MIN ((int8_t)_I8_MIN) +#define INT8_MAX _I8_MAX +#define INT16_MIN ((int16_t)_I16_MIN) +#define INT16_MAX _I16_MAX +#define INT32_MIN ((int32_t)_I32_MIN) +#define INT32_MAX _I32_MAX +#define INT64_MIN ((int64_t)_I64_MIN) +#define INT64_MAX _I64_MAX +#define UINT8_MAX _UI8_MAX +#define UINT16_MAX _UI16_MAX +#define UINT32_MAX _UI32_MAX +#define UINT64_MAX _UI64_MAX + +// 7.18.2.2 Limits of minimum-width integer types +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MIN INT64_MIN +#define INT_LEAST64_MAX INT64_MAX +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +// 7.18.2.3 Limits of fastest minimum-width integer types +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MIN INT16_MIN +#define INT_FAST16_MAX INT16_MAX +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MIN INT64_MIN +#define INT_FAST64_MAX INT64_MAX +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT16_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +// 7.18.2.4 Limits of integer types capable of holding object pointers +#ifdef _WIN64 // [ +# define INTPTR_MIN INT64_MIN +# define INTPTR_MAX INT64_MAX +# define UINTPTR_MAX UINT64_MAX +#else // _WIN64 ][ +# define INTPTR_MIN INT32_MIN +# define INTPTR_MAX INT32_MAX +# define UINTPTR_MAX UINT32_MAX +#endif // _WIN64 ] + +// 7.18.2.5 Limits of greatest-width integer types +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX +#define UINTMAX_MAX UINT64_MAX + +// 7.18.3 Limits of other integer types + +#ifdef _WIN64 // [ +# define PTRDIFF_MIN _I64_MIN +# define PTRDIFF_MAX _I64_MAX +#else // _WIN64 ][ +# define PTRDIFF_MIN _I32_MIN +# define PTRDIFF_MAX _I32_MAX +#endif // _WIN64 ] + +#define SIG_ATOMIC_MIN INT_MIN +#define SIG_ATOMIC_MAX INT_MAX + +#ifndef SIZE_MAX // [ +# ifdef _WIN64 // [ +# define SIZE_MAX _UI64_MAX +# else // _WIN64 ][ +# define SIZE_MAX _UI32_MAX +# endif // _WIN64 ] +#endif // SIZE_MAX ] + +// WCHAR_MIN and WCHAR_MAX are also defined in +#ifndef WCHAR_MIN // [ +# define WCHAR_MIN 0 +#endif // WCHAR_MIN ] +#ifndef WCHAR_MAX // [ +# define WCHAR_MAX _UI16_MAX +#endif // WCHAR_MAX ] + +#define WINT_MIN 0 +#define WINT_MAX _UI16_MAX + +#endif // __STDC_LIMIT_MACROS ] + + +// 7.18.4 Limits of other integer types + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +#define INTMAX_C INT64_C +#define UINTMAX_C UINT64_C + +#endif // __STDC_CONSTANT_MACROS ] + + +#endif // _MSC_STDINT_H_ ] diff --git a/redis.submodule/jemalloc/include/msvc_compat/strings.h b/redis.submodule/jemalloc/include/msvc_compat/strings.h new file mode 100644 index 0000000..f01ffdd --- /dev/null +++ b/redis.submodule/jemalloc/include/msvc_compat/strings.h @@ -0,0 +1,29 @@ +#ifndef strings_h +#define strings_h + +/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided + * for both */ +#ifdef _MSC_VER +# include +# pragma intrinsic(_BitScanForward) +static __forceinline int ffsl(long x) +{ + unsigned long i; + + if (_BitScanForward(&i, x)) + return (i + 1); + return (0); +} + +static __forceinline int ffs(int x) +{ + + return (ffsl(x)); +} + +#else +# define ffsl(x) __builtin_ffsl(x) +# define ffs(x) __builtin_ffs(x) +#endif + +#endif /* strings_h */ diff --git a/redis.submodule/jemalloc/include/msvc_compat/windows_extra.h b/redis.submodule/jemalloc/include/msvc_compat/windows_extra.h new file mode 100644 index 0000000..0c5e323 --- /dev/null +++ b/redis.submodule/jemalloc/include/msvc_compat/windows_extra.h @@ -0,0 +1,26 @@ +#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H +#define MSVC_COMPAT_WINDOWS_EXTRA_H + +#ifndef ENOENT +# define ENOENT ERROR_PATH_NOT_FOUND +#endif +#ifndef EINVAL +# define EINVAL ERROR_BAD_ARGUMENTS +#endif +#ifndef EAGAIN +# define EAGAIN ERROR_OUTOFMEMORY +#endif +#ifndef EPERM +# define EPERM ERROR_WRITE_FAULT +#endif +#ifndef EFAULT +# define EFAULT ERROR_INVALID_ADDRESS +#endif +#ifndef ENOMEM +# define ENOMEM ERROR_NOT_ENOUGH_MEMORY +#endif +#ifndef ERANGE +# define ERANGE ERROR_INVALID_DATA +#endif + +#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ diff --git a/redis.submodule/jemalloc/install-sh b/redis.submodule/jemalloc/install-sh new file mode 100755 index 0000000..ebc6691 --- /dev/null +++ b/redis.submodule/jemalloc/install-sh @@ -0,0 +1,250 @@ +#! /bin/sh +# +# install - install a program, script, or datafile +# This comes from X11R5 (mit/util/scripts/install.sh). +# +# Copyright 1991 by the Massachusetts Institute of Technology +# +# Permission to use, copy, modify, distribute, and sell this software and its +# documentation for any purpose is hereby granted without fee, provided that +# the above copyright notice appear in all copies and that both that +# copyright notice and this permission notice appear in supporting +# documentation, and that the name of M.I.T. not be used in advertising or +# publicity pertaining to distribution of the software without specific, +# written prior permission. M.I.T. makes no representations about the +# suitability of this software for any purpose. It is provided "as is" +# without express or implied warranty. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. It can only install one file at a time, a restriction +# shared with many OS's install programs. + + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit="${DOITPROG-}" + + +# put in absolute paths if you don't have them in your path; or use env. vars. + +mvprog="${MVPROG-mv}" +cpprog="${CPPROG-cp}" +chmodprog="${CHMODPROG-chmod}" +chownprog="${CHOWNPROG-chown}" +chgrpprog="${CHGRPPROG-chgrp}" +stripprog="${STRIPPROG-strip}" +rmprog="${RMPROG-rm}" +mkdirprog="${MKDIRPROG-mkdir}" + +transformbasename="" +transform_arg="" +instcmd="$mvprog" +chmodcmd="$chmodprog 0755" +chowncmd="" +chgrpcmd="" +stripcmd="" +rmcmd="$rmprog -f" +mvcmd="$mvprog" +src="" +dst="" +dir_arg="" + +while [ x"$1" != x ]; do + case $1 in + -c) instcmd="$cpprog" + shift + continue;; + + -d) dir_arg=true + shift + continue;; + + -m) chmodcmd="$chmodprog $2" + shift + shift + continue;; + + -o) chowncmd="$chownprog $2" + shift + shift + continue;; + + -g) chgrpcmd="$chgrpprog $2" + shift + shift + continue;; + + -s) stripcmd="$stripprog" + shift + continue;; + + -t=*) transformarg=`echo $1 | sed 's/-t=//'` + shift + continue;; + + -b=*) transformbasename=`echo $1 | sed 's/-b=//'` + shift + continue;; + + *) if [ x"$src" = x ] + then + src=$1 + else + # this colon is to work around a 386BSD /bin/sh bug + : + dst=$1 + fi + shift + continue;; + esac +done + +if [ x"$src" = x ] +then + echo "install: no input file specified" + exit 1 +else + true +fi + +if [ x"$dir_arg" != x ]; then + dst=$src + src="" + + if [ -d $dst ]; then + instcmd=: + else + instcmd=mkdir + fi +else + +# Waiting for this to be detected by the "$instcmd $src $dsttmp" command +# might cause directories to be created, which would be especially bad +# if $src (and thus $dsttmp) contains '*'. + + if [ -f $src -o -d $src ] + then + true + else + echo "install: $src does not exist" + exit 1 + fi + + if [ x"$dst" = x ] + then + echo "install: no destination specified" + exit 1 + else + true + fi + +# If destination is a directory, append the input filename; if your system +# does not like double slashes in filenames, you may need to add some logic + + if [ -d $dst ] + then + dst="$dst"/`basename $src` + else + true + fi +fi + +## this sed command emulates the dirname command +dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` + +# Make sure that the destination directory exists. +# this part is taken from Noah Friedman's mkinstalldirs script + +# Skip lots of stat calls in the usual case. +if [ ! -d "$dstdir" ]; then +defaultIFS=' +' +IFS="${IFS-${defaultIFS}}" + +oIFS="${IFS}" +# Some sh's can't handle IFS=/ for some reason. +IFS='%' +set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` +IFS="${oIFS}" + +pathcomp='' + +while [ $# -ne 0 ] ; do + pathcomp="${pathcomp}${1}" + shift + + if [ ! -d "${pathcomp}" ] ; + then + $mkdirprog "${pathcomp}" + else + true + fi + + pathcomp="${pathcomp}/" +done +fi + +if [ x"$dir_arg" != x ] +then + $doit $instcmd $dst && + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi +else + +# If we're going to rename the final executable, determine the name now. + + if [ x"$transformarg" = x ] + then + dstfile=`basename $dst` + else + dstfile=`basename $dst $transformbasename | + sed $transformarg`$transformbasename + fi + +# don't allow the sed command to completely eliminate the filename + + if [ x"$dstfile" = x ] + then + dstfile=`basename $dst` + else + true + fi + +# Make a temp file name in the proper directory. + + dsttmp=$dstdir/#inst.$$# + +# Move or copy the file name to the temp name + + $doit $instcmd $src $dsttmp && + + trap "rm -f ${dsttmp}" 0 && + +# and set any options; do chmod last to preserve setuid bits + +# If any of these fail, we abort the whole thing. If we want to +# ignore errors from any of these, just make sure not to ignore +# errors from the above "$doit $instcmd $src $dsttmp" command. + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && + +# Now rename the file to the real destination. + + $doit $rmcmd -f $dstdir/$dstfile && + $doit $mvcmd $dsttmp $dstdir/$dstfile + +fi && + + +exit 0 diff --git a/redis.submodule/jemalloc/jemalloc.pc.in b/redis.submodule/jemalloc/jemalloc.pc.in new file mode 100644 index 0000000..1a3ad9b --- /dev/null +++ b/redis.submodule/jemalloc/jemalloc.pc.in @@ -0,0 +1,12 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ +install_suffix=@install_suffix@ + +Name: jemalloc +Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. +URL: http://www.canonware.com/jemalloc +Version: @jemalloc_version@ +Cflags: -I${includedir} +Libs: -L${libdir} -ljemalloc${install_suffix} diff --git a/redis.submodule/jemalloc/src/arena.c b/redis.submodule/jemalloc/src/arena.c new file mode 100644 index 0000000..43733cc --- /dev/null +++ b/redis.submodule/jemalloc/src/arena.c @@ -0,0 +1,3324 @@ +#define JEMALLOC_ARENA_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; +static ssize_t lg_dirty_mult_default; +arena_bin_info_t arena_bin_info[NBINS]; + +size_t map_bias; +size_t map_misc_offset; +size_t arena_maxrun; /* Max run size for arenas. */ +size_t large_maxclass; /* Max large size class. */ +static size_t small_maxrun; /* Max run size used for small size classes. */ +static bool *small_run_tab; /* Valid small run page multiples. */ +unsigned nlclasses; /* Number of large size classes. */ +unsigned nhclasses; /* Number of huge size classes. */ + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void arena_purge(arena_t *arena, bool all); +static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, + bool cleaned, bool decommitted); +static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, arena_bin_t *bin); +static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, arena_bin_t *bin); + +/******************************************************************************/ + +#define CHUNK_MAP_KEY ((uintptr_t)0x1U) + +JEMALLOC_INLINE_C arena_chunk_map_misc_t * +arena_miscelm_key_create(size_t size) +{ + + return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) | + CHUNK_MAP_KEY)); +} + +JEMALLOC_INLINE_C bool +arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm) +{ + + return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0); +} + +#undef CHUNK_MAP_KEY + +JEMALLOC_INLINE_C size_t +arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm) +{ + + assert(arena_miscelm_is_key(miscelm)); + + return (arena_mapbits_size_decode((uintptr_t)miscelm)); +} + +JEMALLOC_INLINE_C size_t +arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm) +{ + arena_chunk_t *chunk; + size_t pageind, mapbits; + + assert(!arena_miscelm_is_key(miscelm)); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); + pageind = arena_miscelm_to_pageind(miscelm); + mapbits = arena_mapbits_get(chunk, pageind); + return (arena_mapbits_size_decode(mapbits)); +} + +JEMALLOC_INLINE_C int +arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) +{ + uintptr_t a_miscelm = (uintptr_t)a; + uintptr_t b_miscelm = (uintptr_t)b; + + assert(a != NULL); + assert(b != NULL); + + return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); +} + +/* Generate red-black tree functions. */ +rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, + rb_link, arena_run_comp) + +static size_t +run_quantize(size_t size) +{ + size_t qsize; + + assert(size != 0); + assert(size == PAGE_CEILING(size)); + + /* Don't change sizes that are valid small run sizes. */ + if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) + return (size); + + /* + * Round down to the nearest run size that can actually be requested + * during normal large allocation. Add large_pad so that cache index + * randomization can offset the allocation from the page boundary. + */ + qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; + if (qsize <= SMALL_MAXCLASS + large_pad) + return (run_quantize(size - large_pad)); + assert(qsize <= size); + return (qsize); +} + +static size_t +run_quantize_next(size_t size) +{ + size_t large_run_size_next; + + assert(size != 0); + assert(size == PAGE_CEILING(size)); + + /* + * Return the next quantized size greater than the input size. + * Quantized sizes comprise the union of run sizes that back small + * region runs, and run sizes that back large regions with no explicit + * alignment constraints. + */ + + if (size > SMALL_MAXCLASS) { + large_run_size_next = PAGE_CEILING(index2size(size2index(size - + large_pad) + 1) + large_pad); + } else + large_run_size_next = SIZE_T_MAX; + if (size >= small_maxrun) + return (large_run_size_next); + + while (true) { + size += PAGE; + assert(size <= small_maxrun); + if (small_run_tab[size >> LG_PAGE]) { + if (large_run_size_next < size) + return (large_run_size_next); + return (size); + } + } +} + +static size_t +run_quantize_first(size_t size) +{ + size_t qsize = run_quantize(size); + + if (qsize < size) { + /* + * Skip a quantization that may have an adequately large run, + * because under-sized runs may be mixed in. This only happens + * when an unusual size is requested, i.e. for aligned + * allocation, and is just one of several places where linear + * search would potentially find sufficiently aligned available + * memory somewhere lower. + */ + qsize = run_quantize_next(size); + } + return (qsize); +} + +JEMALLOC_INLINE_C int +arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) +{ + int ret; + uintptr_t a_miscelm = (uintptr_t)a; + size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ? + arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a)); + size_t b_qsize = run_quantize(arena_miscelm_size_get(b)); + + /* + * Compare based on quantized size rather than size, in order to sort + * equally useful runs only by address. + */ + ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); + if (ret == 0) { + if (!arena_miscelm_is_key(a)) { + uintptr_t b_miscelm = (uintptr_t)b; + + ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm); + } else { + /* + * Treat keys as if they are lower than anything else. + */ + ret = -1; + } + } + + return (ret); +} + +/* Generate red-black tree functions. */ +rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, + arena_chunk_map_misc_t, rb_link, arena_avail_comp) + +static void +arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, + size_t npages) +{ + + assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> + LG_PAGE)); + arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk, + pageind)); +} + +static void +arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, + size_t npages) +{ + + assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> + LG_PAGE)); + arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk, + pageind)); +} + +static void +arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, + size_t npages) +{ + arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + + assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> + LG_PAGE)); + assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); + assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == + CHUNK_MAP_DIRTY); + + qr_new(&miscelm->rd, rd_link); + qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); + arena->ndirty += npages; +} + +static void +arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, + size_t npages) +{ + arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + + assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> + LG_PAGE)); + assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); + assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == + CHUNK_MAP_DIRTY); + + qr_remove(&miscelm->rd, rd_link); + assert(arena->ndirty >= npages); + arena->ndirty -= npages; +} + +static size_t +arena_chunk_dirty_npages(const extent_node_t *node) +{ + + return (extent_node_size_get(node) >> LG_PAGE); +} + +void +arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) +{ + + if (cache) { + extent_node_dirty_linkage_init(node); + extent_node_dirty_insert(node, &arena->runs_dirty, + &arena->chunks_cache); + arena->ndirty += arena_chunk_dirty_npages(node); + } +} + +void +arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) +{ + + if (dirty) { + extent_node_dirty_remove(node); + assert(arena->ndirty >= arena_chunk_dirty_npages(node)); + arena->ndirty -= arena_chunk_dirty_npages(node); + } +} + +JEMALLOC_INLINE_C void * +arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) +{ + void *ret; + unsigned regind; + arena_chunk_map_misc_t *miscelm; + void *rpages; + + assert(run->nfree > 0); + assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); + + regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); + miscelm = arena_run_to_miscelm(run); + rpages = arena_miscelm_to_rpages(miscelm); + ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + + (uintptr_t)(bin_info->reg_interval * regind)); + run->nfree--; + return (ret); +} + +JEMALLOC_INLINE_C void +arena_run_reg_dalloc(arena_run_t *run, void *ptr) +{ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + size_t mapbits = arena_mapbits_get(chunk, pageind); + szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + unsigned regind = arena_run_regind(run, bin_info, ptr); + + assert(run->nfree < bin_info->nregs); + /* Freeing an interior pointer can cause assertion failure. */ + assert(((uintptr_t)ptr - + ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + + (uintptr_t)bin_info->reg0_offset)) % + (uintptr_t)bin_info->reg_interval == 0); + assert((uintptr_t)ptr >= + (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + + (uintptr_t)bin_info->reg0_offset); + /* Freeing an unallocated pointer can cause assertion failure. */ + assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); + + bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); + run->nfree++; +} + +JEMALLOC_INLINE_C void +arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) +{ + + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + + (run_ind << LG_PAGE)), (npages << LG_PAGE)); + memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, + (npages << LG_PAGE)); +} + +JEMALLOC_INLINE_C void +arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) +{ + + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind + << LG_PAGE)), PAGE); +} + +JEMALLOC_INLINE_C void +arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) +{ + size_t i; + UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); + + arena_run_page_mark_zeroed(chunk, run_ind); + for (i = 0; i < PAGE / sizeof(size_t); i++) + assert(p[i] == 0); +} + +static void +arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) +{ + + if (config_stats) { + ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages + - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << + LG_PAGE); + if (cactive_diff != 0) + stats_cactive_add(cactive_diff); + } +} + +static void +arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, + size_t flag_dirty, size_t flag_decommitted, size_t need_pages) +{ + size_t total_pages, rem_pages; + + assert(flag_dirty == 0 || flag_decommitted == 0); + + total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> + LG_PAGE; + assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == + flag_dirty); + assert(need_pages <= total_pages); + rem_pages = total_pages - need_pages; + + arena_avail_remove(arena, chunk, run_ind, total_pages); + if (flag_dirty != 0) + arena_run_dirty_remove(arena, chunk, run_ind, total_pages); + arena_cactive_update(arena, need_pages, 0); + arena->nactive += need_pages; + + /* Keep track of trailing unused pages for later use. */ + if (rem_pages > 0) { + size_t flags = flag_dirty | flag_decommitted; + size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : + 0; + + arena_mapbits_unallocated_set(chunk, run_ind+need_pages, + (rem_pages << LG_PAGE), flags | + (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & + flag_unzeroed_mask)); + arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, + (rem_pages << LG_PAGE), flags | + (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & + flag_unzeroed_mask)); + if (flag_dirty != 0) { + arena_run_dirty_insert(arena, chunk, run_ind+need_pages, + rem_pages); + } + arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); + } +} + +static bool +arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, + bool remove, bool zero) +{ + arena_chunk_t *chunk; + arena_chunk_map_misc_t *miscelm; + size_t flag_dirty, flag_decommitted, run_ind, need_pages; + size_t flag_unzeroed_mask; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + miscelm = arena_run_to_miscelm(run); + run_ind = arena_miscelm_to_pageind(miscelm); + flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); + flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); + need_pages = (size >> LG_PAGE); + assert(need_pages > 0); + + if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, + run_ind << LG_PAGE, size, arena->ind)) + return (true); + + if (remove) { + arena_run_split_remove(arena, chunk, run_ind, flag_dirty, + flag_decommitted, need_pages); + } + + if (zero) { + if (flag_decommitted != 0) { + /* The run is untouched, and therefore zeroed. */ + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void + *)((uintptr_t)chunk + (run_ind << LG_PAGE)), + (need_pages << LG_PAGE)); + } else if (flag_dirty != 0) { + /* The run is dirty, so all pages must be zeroed. */ + arena_run_zero(chunk, run_ind, need_pages); + } else { + /* + * The run is clean, so some pages may be zeroed (i.e. + * never before touched). + */ + size_t i; + for (i = 0; i < need_pages; i++) { + if (arena_mapbits_unzeroed_get(chunk, run_ind+i) + != 0) + arena_run_zero(chunk, run_ind+i, 1); + else if (config_debug) { + arena_run_page_validate_zeroed(chunk, + run_ind+i); + } else { + arena_run_page_mark_zeroed(chunk, + run_ind+i); + } + } + } + } else { + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); + } + + /* + * Set the last element first, in case the run only contains one page + * (i.e. both statements set the same element). + */ + flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? + CHUNK_MAP_UNZEROED : 0; + arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + run_ind+need_pages-1))); + arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); + return (false); +} + +static bool +arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) +{ + + return (arena_run_split_large_helper(arena, run, size, true, zero)); +} + +static bool +arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) +{ + + return (arena_run_split_large_helper(arena, run, size, false, zero)); +} + +static bool +arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, + szind_t binind) +{ + arena_chunk_t *chunk; + arena_chunk_map_misc_t *miscelm; + size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; + + assert(binind != BININD_INVALID); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + miscelm = arena_run_to_miscelm(run); + run_ind = arena_miscelm_to_pageind(miscelm); + flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); + flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); + need_pages = (size >> LG_PAGE); + assert(need_pages > 0); + + if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, + run_ind << LG_PAGE, size, arena->ind)) + return (true); + + arena_run_split_remove(arena, chunk, run_ind, flag_dirty, + flag_decommitted, need_pages); + + for (i = 0; i < need_pages; i++) { + size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, + run_ind+i); + arena_mapbits_small_set(chunk, run_ind+i, i, binind, + flag_unzeroed); + if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) + arena_run_page_validate_zeroed(chunk, run_ind+i); + } + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + + (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); + return (false); +} + +static arena_chunk_t * +arena_chunk_init_spare(arena_t *arena) +{ + arena_chunk_t *chunk; + + assert(arena->spare != NULL); + + chunk = arena->spare; + arena->spare = NULL; + + assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); + assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); + assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == + arena_maxrun); + assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == + arena_maxrun); + assert(arena_mapbits_dirty_get(chunk, map_bias) == + arena_mapbits_dirty_get(chunk, chunk_npages-1)); + + return (chunk); +} + +static bool +arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) +{ + + /* + * The extent node notion of "committed" doesn't directly apply to + * arena chunks. Arbitrarily mark them as committed. The commit state + * of runs is tracked individually, and upon chunk deallocation the + * entire chunk is in a consistent commit state. + */ + extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); + extent_node_achunk_set(&chunk->node, true); + return (chunk_register(chunk, &chunk->node)); +} + +static arena_chunk_t * +arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, + bool *zero, bool *commit) +{ + arena_chunk_t *chunk; + + malloc_mutex_unlock(&arena->lock); + + chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL, + chunksize, chunksize, zero, commit); + if (chunk != NULL && !*commit) { + /* Commit header. */ + if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << + LG_PAGE, arena->ind)) { + chunk_dalloc_wrapper(arena, chunk_hooks, + (void *)chunk, chunksize, *commit); + chunk = NULL; + } + } + if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) { + if (!*commit) { + /* Undo commit of header. */ + chunk_hooks->decommit(chunk, chunksize, 0, map_bias << + LG_PAGE, arena->ind); + } + chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, + chunksize, *commit); + chunk = NULL; + } + + malloc_mutex_lock(&arena->lock); + return (chunk); +} + +static arena_chunk_t * +arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit) +{ + arena_chunk_t *chunk; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + + chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize, + chunksize, zero, true); + if (chunk != NULL) { + if (arena_chunk_register(arena, chunk, *zero)) { + chunk_dalloc_cache(arena, &chunk_hooks, chunk, + chunksize, true); + return (NULL); + } + *commit = true; + } + if (chunk == NULL) { + chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks, + zero, commit); + } + + if (config_stats && chunk != NULL) { + arena->stats.mapped += chunksize; + arena->stats.metadata_mapped += (map_bias << LG_PAGE); + } + + return (chunk); +} + +static arena_chunk_t * +arena_chunk_init_hard(arena_t *arena) +{ + arena_chunk_t *chunk; + bool zero, commit; + size_t flag_unzeroed, flag_decommitted, i; + + assert(arena->spare == NULL); + + zero = false; + commit = false; + chunk = arena_chunk_alloc_internal(arena, &zero, &commit); + if (chunk == NULL) + return (NULL); + + /* + * Initialize the map to contain one maximal free untouched run. Mark + * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted + * chunk. + */ + flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; + flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; + arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, + flag_unzeroed | flag_decommitted); + /* + * There is no need to initialize the internal page map entries unless + * the chunk is not zeroed. + */ + if (!zero) { + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( + (void *)arena_bitselm_get(chunk, map_bias+1), + (size_t)((uintptr_t) arena_bitselm_get(chunk, + chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, + map_bias+1))); + for (i = map_bias+1; i < chunk_npages-1; i++) + arena_mapbits_internal_set(chunk, i, flag_unzeroed); + } else { + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void + *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) + arena_bitselm_get(chunk, chunk_npages-1) - + (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); + if (config_debug) { + for (i = map_bias+1; i < chunk_npages-1; i++) { + assert(arena_mapbits_unzeroed_get(chunk, i) == + flag_unzeroed); + } + } + } + arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, + flag_unzeroed); + + return (chunk); +} + +static arena_chunk_t * +arena_chunk_alloc(arena_t *arena) +{ + arena_chunk_t *chunk; + + if (arena->spare != NULL) + chunk = arena_chunk_init_spare(arena); + else { + chunk = arena_chunk_init_hard(arena); + if (chunk == NULL) + return (NULL); + } + + /* Insert the run into the runs_avail tree. */ + arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); + + return (chunk); +} + +static void +arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) +{ + + assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); + assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); + assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == + arena_maxrun); + assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == + arena_maxrun); + assert(arena_mapbits_dirty_get(chunk, map_bias) == + arena_mapbits_dirty_get(chunk, chunk_npages-1)); + assert(arena_mapbits_decommitted_get(chunk, map_bias) == + arena_mapbits_decommitted_get(chunk, chunk_npages-1)); + + /* + * Remove run from the runs_avail tree, so that the arena does not use + * it. + */ + arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); + + if (arena->spare != NULL) { + arena_chunk_t *spare = arena->spare; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + bool committed; + + arena->spare = chunk; + if (arena_mapbits_dirty_get(spare, map_bias) != 0) { + arena_run_dirty_remove(arena, spare, map_bias, + chunk_npages-map_bias); + } + + chunk_deregister(spare, &spare->node); + + committed = (arena_mapbits_decommitted_get(spare, map_bias) == + 0); + if (!committed) { + /* + * Decommit the header. Mark the chunk as decommitted + * even if header decommit fails, since treating a + * partially committed chunk as committed has a high + * potential for causing later access of decommitted + * memory. + */ + chunk_hooks = chunk_hooks_get(arena); + chunk_hooks.decommit(spare, chunksize, 0, map_bias << + LG_PAGE, arena->ind); + } + + chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare, + chunksize, committed); + + if (config_stats) { + arena->stats.mapped -= chunksize; + arena->stats.metadata_mapped -= (map_bias << LG_PAGE); + } + } else + arena->spare = chunk; +} + +static void +arena_huge_malloc_stats_update(arena_t *arena, size_t usize) +{ + szind_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.nmalloc_huge++; + arena->stats.allocated_huge += usize; + arena->stats.hstats[index].nmalloc++; + arena->stats.hstats[index].curhchunks++; +} + +static void +arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) +{ + szind_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.nmalloc_huge--; + arena->stats.allocated_huge -= usize; + arena->stats.hstats[index].nmalloc--; + arena->stats.hstats[index].curhchunks--; +} + +static void +arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) +{ + szind_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.ndalloc_huge++; + arena->stats.allocated_huge -= usize; + arena->stats.hstats[index].ndalloc++; + arena->stats.hstats[index].curhchunks--; +} + +static void +arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) +{ + szind_t index = size2index(usize) - nlclasses - NBINS; + + cassert(config_stats); + + arena->stats.ndalloc_huge--; + arena->stats.allocated_huge += usize; + arena->stats.hstats[index].ndalloc--; + arena->stats.hstats[index].curhchunks++; +} + +static void +arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) +{ + + arena_huge_dalloc_stats_update(arena, oldsize); + arena_huge_malloc_stats_update(arena, usize); +} + +static void +arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, + size_t usize) +{ + + arena_huge_dalloc_stats_update_undo(arena, oldsize); + arena_huge_malloc_stats_update_undo(arena, usize); +} + +extent_node_t * +arena_node_alloc(arena_t *arena) +{ + extent_node_t *node; + + malloc_mutex_lock(&arena->node_cache_mtx); + node = ql_last(&arena->node_cache, ql_link); + if (node == NULL) { + malloc_mutex_unlock(&arena->node_cache_mtx); + return (base_alloc(sizeof(extent_node_t))); + } + ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); + malloc_mutex_unlock(&arena->node_cache_mtx); + return (node); +} + +void +arena_node_dalloc(arena_t *arena, extent_node_t *node) +{ + + malloc_mutex_lock(&arena->node_cache_mtx); + ql_elm_new(node, ql_link); + ql_tail_insert(&arena->node_cache, node, ql_link); + malloc_mutex_unlock(&arena->node_cache_mtx); +} + +static void * +arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, + size_t usize, size_t alignment, bool *zero, size_t csize) +{ + void *ret; + bool commit = true; + + ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment, + zero, &commit); + if (ret == NULL) { + /* Revert optimistic stats updates. */ + malloc_mutex_lock(&arena->lock); + if (config_stats) { + arena_huge_malloc_stats_update_undo(arena, usize); + arena->stats.mapped -= usize; + } + arena->nactive -= (usize >> LG_PAGE); + malloc_mutex_unlock(&arena->lock); + } + + return (ret); +} + +void * +arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, + bool *zero) +{ + void *ret; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + size_t csize = CHUNK_CEILING(usize); + + malloc_mutex_lock(&arena->lock); + + /* Optimistically update stats. */ + if (config_stats) { + arena_huge_malloc_stats_update(arena, usize); + arena->stats.mapped += usize; + } + arena->nactive += (usize >> LG_PAGE); + + ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment, + zero, true); + malloc_mutex_unlock(&arena->lock); + if (ret == NULL) { + ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize, + alignment, zero, csize); + } + + if (config_stats && ret != NULL) + stats_cactive_add(usize); + return (ret); +} + +void +arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) +{ + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + size_t csize; + + csize = CHUNK_CEILING(usize); + malloc_mutex_lock(&arena->lock); + if (config_stats) { + arena_huge_dalloc_stats_update(arena, usize); + arena->stats.mapped -= usize; + stats_cactive_sub(usize); + } + arena->nactive -= (usize >> LG_PAGE); + + chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true); + malloc_mutex_unlock(&arena->lock); +} + +void +arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, + size_t usize) +{ + + assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); + assert(oldsize != usize); + + malloc_mutex_lock(&arena->lock); + if (config_stats) + arena_huge_ralloc_stats_update(arena, oldsize, usize); + if (oldsize < usize) { + size_t udiff = usize - oldsize; + arena->nactive += udiff >> LG_PAGE; + if (config_stats) + stats_cactive_add(udiff); + } else { + size_t udiff = oldsize - usize; + arena->nactive -= udiff >> LG_PAGE; + if (config_stats) + stats_cactive_sub(udiff); + } + malloc_mutex_unlock(&arena->lock); +} + +void +arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, + size_t usize) +{ + size_t udiff = oldsize - usize; + size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); + + malloc_mutex_lock(&arena->lock); + if (config_stats) { + arena_huge_ralloc_stats_update(arena, oldsize, usize); + if (cdiff != 0) { + arena->stats.mapped -= cdiff; + stats_cactive_sub(udiff); + } + } + arena->nactive -= udiff >> LG_PAGE; + + if (cdiff != 0) { + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + void *nchunk = (void *)((uintptr_t)chunk + + CHUNK_CEILING(usize)); + + chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true); + } + malloc_mutex_unlock(&arena->lock); +} + +static bool +arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, + void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk, + size_t udiff, size_t cdiff) +{ + bool err; + bool commit = true; + + err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize, + zero, &commit) == NULL); + if (err) { + /* Revert optimistic stats updates. */ + malloc_mutex_lock(&arena->lock); + if (config_stats) { + arena_huge_ralloc_stats_update_undo(arena, oldsize, + usize); + arena->stats.mapped -= cdiff; + } + arena->nactive -= (udiff >> LG_PAGE); + malloc_mutex_unlock(&arena->lock); + } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, + cdiff, true, arena->ind)) { + chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero, + true); + err = true; + } + return (err); +} + +bool +arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, + size_t usize, bool *zero) +{ + bool err; + chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); + void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); + size_t udiff = usize - oldsize; + size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); + + malloc_mutex_lock(&arena->lock); + + /* Optimistically update stats. */ + if (config_stats) { + arena_huge_ralloc_stats_update(arena, oldsize, usize); + arena->stats.mapped += cdiff; + } + arena->nactive += (udiff >> LG_PAGE); + + err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff, + chunksize, zero, true) == NULL); + malloc_mutex_unlock(&arena->lock); + if (err) { + err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks, + chunk, oldsize, usize, zero, nchunk, udiff, + cdiff); + } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, + cdiff, true, arena->ind)) { + chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero, + true); + err = true; + } + + if (config_stats && !err) + stats_cactive_add(udiff); + return (err); +} + +/* + * Do first-best-fit run selection, i.e. select the lowest run that best fits. + * Run sizes are quantized, so not all candidate runs are necessarily exactly + * the same size. + */ +static arena_run_t * +arena_run_first_best_fit(arena_t *arena, size_t size) +{ + size_t search_size = run_quantize_first(size); + arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size); + arena_chunk_map_misc_t *miscelm = + arena_avail_tree_nsearch(&arena->runs_avail, key); + if (miscelm == NULL) + return (NULL); + return (&miscelm->run); +} + +static arena_run_t * +arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) +{ + arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); + if (run != NULL) { + if (arena_run_split_large(arena, run, size, zero)) + run = NULL; + } + return (run); +} + +static arena_run_t * +arena_run_alloc_large(arena_t *arena, size_t size, bool zero) +{ + arena_chunk_t *chunk; + arena_run_t *run; + + assert(size <= arena_maxrun); + assert(size == PAGE_CEILING(size)); + + /* Search the arena's chunks for the lowest best fit. */ + run = arena_run_alloc_large_helper(arena, size, zero); + if (run != NULL) + return (run); + + /* + * No usable runs. Create a new chunk from which to allocate the run. + */ + chunk = arena_chunk_alloc(arena); + if (chunk != NULL) { + run = &arena_miscelm_get(chunk, map_bias)->run; + if (arena_run_split_large(arena, run, size, zero)) + run = NULL; + return (run); + } + + /* + * arena_chunk_alloc() failed, but another thread may have made + * sufficient memory available while this one dropped arena->lock in + * arena_chunk_alloc(), so search one more time. + */ + return (arena_run_alloc_large_helper(arena, size, zero)); +} + +static arena_run_t * +arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) +{ + arena_run_t *run = arena_run_first_best_fit(arena, size); + if (run != NULL) { + if (arena_run_split_small(arena, run, size, binind)) + run = NULL; + } + return (run); +} + +static arena_run_t * +arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind) +{ + arena_chunk_t *chunk; + arena_run_t *run; + + assert(size <= arena_maxrun); + assert(size == PAGE_CEILING(size)); + assert(binind != BININD_INVALID); + + /* Search the arena's chunks for the lowest best fit. */ + run = arena_run_alloc_small_helper(arena, size, binind); + if (run != NULL) + return (run); + + /* + * No usable runs. Create a new chunk from which to allocate the run. + */ + chunk = arena_chunk_alloc(arena); + if (chunk != NULL) { + run = &arena_miscelm_get(chunk, map_bias)->run; + if (arena_run_split_small(arena, run, size, binind)) + run = NULL; + return (run); + } + + /* + * arena_chunk_alloc() failed, but another thread may have made + * sufficient memory available while this one dropped arena->lock in + * arena_chunk_alloc(), so search one more time. + */ + return (arena_run_alloc_small_helper(arena, size, binind)); +} + +static bool +arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) +{ + + return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) + << 3)); +} + +ssize_t +arena_lg_dirty_mult_get(arena_t *arena) +{ + ssize_t lg_dirty_mult; + + malloc_mutex_lock(&arena->lock); + lg_dirty_mult = arena->lg_dirty_mult; + malloc_mutex_unlock(&arena->lock); + + return (lg_dirty_mult); +} + +bool +arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult) +{ + + if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) + return (true); + + malloc_mutex_lock(&arena->lock); + arena->lg_dirty_mult = lg_dirty_mult; + arena_maybe_purge(arena); + malloc_mutex_unlock(&arena->lock); + + return (false); +} + +void +arena_maybe_purge(arena_t *arena) +{ + + /* Don't purge if the option is disabled. */ + if (arena->lg_dirty_mult < 0) + return; + /* Don't recursively purge. */ + if (arena->purging) + return; + /* + * Iterate, since preventing recursive purging could otherwise leave too + * many dirty pages. + */ + while (true) { + size_t threshold = (arena->nactive >> arena->lg_dirty_mult); + if (threshold < chunk_npages) + threshold = chunk_npages; + /* + * Don't purge unless the number of purgeable pages exceeds the + * threshold. + */ + if (arena->ndirty <= threshold) + return; + arena_purge(arena, false); + } +} + +static size_t +arena_dirty_count(arena_t *arena) +{ + size_t ndirty = 0; + arena_runs_dirty_link_t *rdelm; + extent_node_t *chunkselm; + + for (rdelm = qr_next(&arena->runs_dirty, rd_link), + chunkselm = qr_next(&arena->chunks_cache, cc_link); + rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { + size_t npages; + + if (rdelm == &chunkselm->rd) { + npages = extent_node_size_get(chunkselm) >> LG_PAGE; + chunkselm = qr_next(chunkselm, cc_link); + } else { + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( + rdelm); + arena_chunk_map_misc_t *miscelm = + arena_rd_to_miscelm(rdelm); + size_t pageind = arena_miscelm_to_pageind(miscelm); + assert(arena_mapbits_allocated_get(chunk, pageind) == + 0); + assert(arena_mapbits_large_get(chunk, pageind) == 0); + assert(arena_mapbits_dirty_get(chunk, pageind) != 0); + npages = arena_mapbits_unallocated_size_get(chunk, + pageind) >> LG_PAGE; + } + ndirty += npages; + } + + return (ndirty); +} + +static size_t +arena_compute_npurge(arena_t *arena, bool all) +{ + size_t npurge; + + /* + * Compute the minimum number of pages that this thread should try to + * purge. + */ + if (!all) { + size_t threshold = (arena->nactive >> arena->lg_dirty_mult); + threshold = threshold < chunk_npages ? chunk_npages : threshold; + + npurge = arena->ndirty - threshold; + } else + npurge = arena->ndirty; + + return (npurge); +} + +static size_t +arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, + size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel, + extent_node_t *purge_chunks_sentinel) +{ + arena_runs_dirty_link_t *rdelm, *rdelm_next; + extent_node_t *chunkselm; + size_t nstashed = 0; + + /* Stash at least npurge pages. */ + for (rdelm = qr_next(&arena->runs_dirty, rd_link), + chunkselm = qr_next(&arena->chunks_cache, cc_link); + rdelm != &arena->runs_dirty; rdelm = rdelm_next) { + size_t npages; + rdelm_next = qr_next(rdelm, rd_link); + + if (rdelm == &chunkselm->rd) { + extent_node_t *chunkselm_next; + bool zero; + UNUSED void *chunk; + + chunkselm_next = qr_next(chunkselm, cc_link); + /* + * Allocate. chunkselm remains valid due to the + * dalloc_node=false argument to chunk_alloc_cache(). + */ + zero = false; + chunk = chunk_alloc_cache(arena, chunk_hooks, + extent_node_addr_get(chunkselm), + extent_node_size_get(chunkselm), chunksize, &zero, + false); + assert(chunk == extent_node_addr_get(chunkselm)); + assert(zero == extent_node_zeroed_get(chunkselm)); + extent_node_dirty_insert(chunkselm, purge_runs_sentinel, + purge_chunks_sentinel); + npages = extent_node_size_get(chunkselm) >> LG_PAGE; + chunkselm = chunkselm_next; + } else { + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); + arena_chunk_map_misc_t *miscelm = + arena_rd_to_miscelm(rdelm); + size_t pageind = arena_miscelm_to_pageind(miscelm); + arena_run_t *run = &miscelm->run; + size_t run_size = + arena_mapbits_unallocated_size_get(chunk, pageind); + + npages = run_size >> LG_PAGE; + + assert(pageind + npages <= chunk_npages); + assert(arena_mapbits_dirty_get(chunk, pageind) == + arena_mapbits_dirty_get(chunk, pageind+npages-1)); + + /* + * If purging the spare chunk's run, make it available + * prior to allocation. + */ + if (chunk == arena->spare) + arena_chunk_alloc(arena); + + /* Temporarily allocate the free dirty run. */ + arena_run_split_large(arena, run, run_size, false); + /* Stash. */ + if (false) + qr_new(rdelm, rd_link); /* Redundant. */ + else { + assert(qr_next(rdelm, rd_link) == rdelm); + assert(qr_prev(rdelm, rd_link) == rdelm); + } + qr_meld(purge_runs_sentinel, rdelm, rd_link); + } + + nstashed += npages; + if (!all && nstashed >= npurge) + break; + } + + return (nstashed); +} + +static size_t +arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, + arena_runs_dirty_link_t *purge_runs_sentinel, + extent_node_t *purge_chunks_sentinel) +{ + size_t npurged, nmadvise; + arena_runs_dirty_link_t *rdelm; + extent_node_t *chunkselm; + + if (config_stats) + nmadvise = 0; + npurged = 0; + + malloc_mutex_unlock(&arena->lock); + for (rdelm = qr_next(purge_runs_sentinel, rd_link), + chunkselm = qr_next(purge_chunks_sentinel, cc_link); + rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { + size_t npages; + + if (rdelm == &chunkselm->rd) { + /* + * Don't actually purge the chunk here because 1) + * chunkselm is embedded in the chunk and must remain + * valid, and 2) we deallocate the chunk in + * arena_unstash_purged(), where it is destroyed, + * decommitted, or purged, depending on chunk + * deallocation policy. + */ + size_t size = extent_node_size_get(chunkselm); + npages = size >> LG_PAGE; + chunkselm = qr_next(chunkselm, cc_link); + } else { + size_t pageind, run_size, flag_unzeroed, flags, i; + bool decommitted; + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); + arena_chunk_map_misc_t *miscelm = + arena_rd_to_miscelm(rdelm); + pageind = arena_miscelm_to_pageind(miscelm); + run_size = arena_mapbits_large_size_get(chunk, pageind); + npages = run_size >> LG_PAGE; + + assert(pageind + npages <= chunk_npages); + assert(!arena_mapbits_decommitted_get(chunk, pageind)); + assert(!arena_mapbits_decommitted_get(chunk, + pageind+npages-1)); + decommitted = !chunk_hooks->decommit(chunk, chunksize, + pageind << LG_PAGE, npages << LG_PAGE, arena->ind); + if (decommitted) { + flag_unzeroed = 0; + flags = CHUNK_MAP_DECOMMITTED; + } else { + flag_unzeroed = chunk_purge_wrapper(arena, + chunk_hooks, chunk, chunksize, pageind << + LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; + flags = flag_unzeroed; + } + arena_mapbits_large_set(chunk, pageind+npages-1, 0, + flags); + arena_mapbits_large_set(chunk, pageind, run_size, + flags); + + /* + * Set the unzeroed flag for internal pages, now that + * chunk_purge_wrapper() has returned whether the pages + * were zeroed as a side effect of purging. This chunk + * map modification is safe even though the arena mutex + * isn't currently owned by this thread, because the run + * is marked as allocated, thus protecting it from being + * modified by any other thread. As long as these + * writes don't perturb the first and last elements' + * CHUNK_MAP_ALLOCATED bits, behavior is well defined. + */ + for (i = 1; i < npages-1; i++) { + arena_mapbits_internal_set(chunk, pageind+i, + flag_unzeroed); + } + } + + npurged += npages; + if (config_stats) + nmadvise++; + } + malloc_mutex_lock(&arena->lock); + + if (config_stats) { + arena->stats.nmadvise += nmadvise; + arena->stats.purged += npurged; + } + + return (npurged); +} + +static void +arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, + arena_runs_dirty_link_t *purge_runs_sentinel, + extent_node_t *purge_chunks_sentinel) +{ + arena_runs_dirty_link_t *rdelm, *rdelm_next; + extent_node_t *chunkselm; + + /* Deallocate chunks/runs. */ + for (rdelm = qr_next(purge_runs_sentinel, rd_link), + chunkselm = qr_next(purge_chunks_sentinel, cc_link); + rdelm != purge_runs_sentinel; rdelm = rdelm_next) { + rdelm_next = qr_next(rdelm, rd_link); + if (rdelm == &chunkselm->rd) { + extent_node_t *chunkselm_next = qr_next(chunkselm, + cc_link); + void *addr = extent_node_addr_get(chunkselm); + size_t size = extent_node_size_get(chunkselm); + bool zeroed = extent_node_zeroed_get(chunkselm); + bool committed = extent_node_committed_get(chunkselm); + extent_node_dirty_remove(chunkselm); + arena_node_dalloc(arena, chunkselm); + chunkselm = chunkselm_next; + chunk_dalloc_arena(arena, chunk_hooks, addr, size, + zeroed, committed); + } else { + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); + arena_chunk_map_misc_t *miscelm = + arena_rd_to_miscelm(rdelm); + size_t pageind = arena_miscelm_to_pageind(miscelm); + bool decommitted = (arena_mapbits_decommitted_get(chunk, + pageind) != 0); + arena_run_t *run = &miscelm->run; + qr_remove(rdelm, rd_link); + arena_run_dalloc(arena, run, false, true, decommitted); + } + } +} + +static void +arena_purge(arena_t *arena, bool all) +{ + chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); + size_t npurge, npurgeable, npurged; + arena_runs_dirty_link_t purge_runs_sentinel; + extent_node_t purge_chunks_sentinel; + + arena->purging = true; + + /* + * Calls to arena_dirty_count() are disabled even for debug builds + * because overhead grows nonlinearly as memory usage increases. + */ + if (false && config_debug) { + size_t ndirty = arena_dirty_count(arena); + assert(ndirty == arena->ndirty); + } + assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all); + + if (config_stats) + arena->stats.npurge++; + + npurge = arena_compute_npurge(arena, all); + qr_new(&purge_runs_sentinel, rd_link); + extent_node_dirty_linkage_init(&purge_chunks_sentinel); + + npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge, + &purge_runs_sentinel, &purge_chunks_sentinel); + assert(npurgeable >= npurge); + npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel, + &purge_chunks_sentinel); + assert(npurged == npurgeable); + arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel, + &purge_chunks_sentinel); + + arena->purging = false; +} + +void +arena_purge_all(arena_t *arena) +{ + + malloc_mutex_lock(&arena->lock); + arena_purge(arena, true); + malloc_mutex_unlock(&arena->lock); +} + +static void +arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, + size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, + size_t flag_decommitted) +{ + size_t size = *p_size; + size_t run_ind = *p_run_ind; + size_t run_pages = *p_run_pages; + + /* Try to coalesce forward. */ + if (run_ind + run_pages < chunk_npages && + arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && + arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && + arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == + flag_decommitted) { + size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, + run_ind+run_pages); + size_t nrun_pages = nrun_size >> LG_PAGE; + + /* + * Remove successor from runs_avail; the coalesced run is + * inserted later. + */ + assert(arena_mapbits_unallocated_size_get(chunk, + run_ind+run_pages+nrun_pages-1) == nrun_size); + assert(arena_mapbits_dirty_get(chunk, + run_ind+run_pages+nrun_pages-1) == flag_dirty); + assert(arena_mapbits_decommitted_get(chunk, + run_ind+run_pages+nrun_pages-1) == flag_decommitted); + arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); + + /* + * If the successor is dirty, remove it from the set of dirty + * pages. + */ + if (flag_dirty != 0) { + arena_run_dirty_remove(arena, chunk, run_ind+run_pages, + nrun_pages); + } + + size += nrun_size; + run_pages += nrun_pages; + + arena_mapbits_unallocated_size_set(chunk, run_ind, size); + arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, + size); + } + + /* Try to coalesce backward. */ + if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, + run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == + flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == + flag_decommitted) { + size_t prun_size = arena_mapbits_unallocated_size_get(chunk, + run_ind-1); + size_t prun_pages = prun_size >> LG_PAGE; + + run_ind -= prun_pages; + + /* + * Remove predecessor from runs_avail; the coalesced run is + * inserted later. + */ + assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == + prun_size); + assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); + assert(arena_mapbits_decommitted_get(chunk, run_ind) == + flag_decommitted); + arena_avail_remove(arena, chunk, run_ind, prun_pages); + + /* + * If the predecessor is dirty, remove it from the set of dirty + * pages. + */ + if (flag_dirty != 0) { + arena_run_dirty_remove(arena, chunk, run_ind, + prun_pages); + } + + size += prun_size; + run_pages += prun_pages; + + arena_mapbits_unallocated_size_set(chunk, run_ind, size); + arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, + size); + } + + *p_size = size; + *p_run_ind = run_ind; + *p_run_pages = run_pages; +} + +static size_t +arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + size_t run_ind) +{ + size_t size; + + assert(run_ind >= map_bias); + assert(run_ind < chunk_npages); + + if (arena_mapbits_large_get(chunk, run_ind) != 0) { + size = arena_mapbits_large_size_get(chunk, run_ind); + assert(size == PAGE || arena_mapbits_large_size_get(chunk, + run_ind+(size>>LG_PAGE)-1) == 0); + } else { + arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; + size = bin_info->run_size; + } + + return (size); +} + +static bool +arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run) +{ + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + size_t run_ind = arena_miscelm_to_pageind(miscelm); + size_t offset = run_ind << LG_PAGE; + size_t length = arena_run_size_get(arena, chunk, run, run_ind); + + return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length, + arena->ind)); +} + +static void +arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, + bool decommitted) +{ + arena_chunk_t *chunk; + arena_chunk_map_misc_t *miscelm; + size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + miscelm = arena_run_to_miscelm(run); + run_ind = arena_miscelm_to_pageind(miscelm); + assert(run_ind >= map_bias); + assert(run_ind < chunk_npages); + size = arena_run_size_get(arena, chunk, run, run_ind); + run_pages = (size >> LG_PAGE); + arena_cactive_update(arena, 0, run_pages); + arena->nactive -= run_pages; + + /* + * The run is dirty if the caller claims to have dirtied it, as well as + * if it was already dirty before being allocated and the caller + * doesn't claim to have cleaned it. + */ + assert(arena_mapbits_dirty_get(chunk, run_ind) == + arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); + if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) + != 0) + dirty = true; + flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; + flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; + + /* Mark pages as unallocated in the chunk map. */ + if (dirty || decommitted) { + size_t flags = flag_dirty | flag_decommitted; + arena_mapbits_unallocated_set(chunk, run_ind, size, flags); + arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, + flags); + } else { + arena_mapbits_unallocated_set(chunk, run_ind, size, + arena_mapbits_unzeroed_get(chunk, run_ind)); + arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, + arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); + } + + arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, + flag_dirty, flag_decommitted); + + /* Insert into runs_avail, now that coalescing is complete. */ + assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == + arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); + assert(arena_mapbits_dirty_get(chunk, run_ind) == + arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); + assert(arena_mapbits_decommitted_get(chunk, run_ind) == + arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); + arena_avail_insert(arena, chunk, run_ind, run_pages); + + if (dirty) + arena_run_dirty_insert(arena, chunk, run_ind, run_pages); + + /* Deallocate chunk if it is now completely unused. */ + if (size == arena_maxrun) { + assert(run_ind == map_bias); + assert(run_pages == (arena_maxrun >> LG_PAGE)); + arena_chunk_dalloc(arena, chunk); + } + + /* + * It is okay to do dirty page processing here even if the chunk was + * deallocated above, since in that case it is the spare. Waiting + * until after possible chunk deallocation to do dirty processing + * allows for an old spare to be fully deallocated, thus decreasing the + * chances of spuriously crossing the dirty page purging threshold. + */ + if (dirty) + arena_maybe_purge(arena); +} + +static void +arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run) +{ + bool committed = arena_run_decommit(arena, chunk, run); + + arena_run_dalloc(arena, run, committed, false, !committed); +} + +static void +arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + size_t oldsize, size_t newsize) +{ + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + size_t pageind = arena_miscelm_to_pageind(miscelm); + size_t head_npages = (oldsize - newsize) >> LG_PAGE; + size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); + size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); + size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? + CHUNK_MAP_UNZEROED : 0; + + assert(oldsize > newsize); + + /* + * Update the chunk map so that arena_run_dalloc() can treat the + * leading run as separately allocated. Set the last element of each + * run first, in case of single-page runs. + */ + assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); + arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+head_npages-1))); + arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); + + if (config_debug) { + UNUSED size_t tail_npages = newsize >> LG_PAGE; + assert(arena_mapbits_large_size_get(chunk, + pageind+head_npages+tail_npages-1) == 0); + assert(arena_mapbits_dirty_get(chunk, + pageind+head_npages+tail_npages-1) == flag_dirty); + } + arena_mapbits_large_set(chunk, pageind+head_npages, newsize, + flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+head_npages))); + + arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0)); +} + +static void +arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + size_t oldsize, size_t newsize, bool dirty) +{ + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + size_t pageind = arena_miscelm_to_pageind(miscelm); + size_t head_npages = newsize >> LG_PAGE; + size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); + size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); + size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? + CHUNK_MAP_UNZEROED : 0; + arena_chunk_map_misc_t *tail_miscelm; + arena_run_t *tail_run; + + assert(oldsize > newsize); + + /* + * Update the chunk map so that arena_run_dalloc() can treat the + * trailing run as separately allocated. Set the last element of each + * run first, in case of single-page runs. + */ + assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); + arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+head_npages-1))); + arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); + + if (config_debug) { + UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; + assert(arena_mapbits_large_size_get(chunk, + pageind+head_npages+tail_npages-1) == 0); + assert(arena_mapbits_dirty_get(chunk, + pageind+head_npages+tail_npages-1) == flag_dirty); + } + arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, + flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+head_npages))); + + tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); + tail_run = &tail_miscelm->run; + arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted != + 0)); +} + +static arena_run_t * +arena_bin_runs_first(arena_bin_t *bin) +{ + arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); + if (miscelm != NULL) + return (&miscelm->run); + + return (NULL); +} + +static void +arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) +{ + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + + assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); + + arena_run_tree_insert(&bin->runs, miscelm); +} + +static void +arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) +{ + arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); + + assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); + + arena_run_tree_remove(&bin->runs, miscelm); +} + +static arena_run_t * +arena_bin_nonfull_run_tryget(arena_bin_t *bin) +{ + arena_run_t *run = arena_bin_runs_first(bin); + if (run != NULL) { + arena_bin_runs_remove(bin, run); + if (config_stats) + bin->stats.reruns++; + } + return (run); +} + +static arena_run_t * +arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) +{ + arena_run_t *run; + szind_t binind; + arena_bin_info_t *bin_info; + + /* Look for a usable run. */ + run = arena_bin_nonfull_run_tryget(bin); + if (run != NULL) + return (run); + /* No existing runs have any space available. */ + + binind = arena_bin_index(arena, bin); + bin_info = &arena_bin_info[binind]; + + /* Allocate a new run. */ + malloc_mutex_unlock(&bin->lock); + /******************************/ + malloc_mutex_lock(&arena->lock); + run = arena_run_alloc_small(arena, bin_info->run_size, binind); + if (run != NULL) { + /* Initialize run internals. */ + run->binind = binind; + run->nfree = bin_info->nregs; + bitmap_init(run->bitmap, &bin_info->bitmap_info); + } + malloc_mutex_unlock(&arena->lock); + /********************************/ + malloc_mutex_lock(&bin->lock); + if (run != NULL) { + if (config_stats) { + bin->stats.nruns++; + bin->stats.curruns++; + } + return (run); + } + + /* + * arena_run_alloc_small() failed, but another thread may have made + * sufficient memory available while this one dropped bin->lock above, + * so search one more time. + */ + run = arena_bin_nonfull_run_tryget(bin); + if (run != NULL) + return (run); + + return (NULL); +} + +/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ +static void * +arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) +{ + szind_t binind; + arena_bin_info_t *bin_info; + arena_run_t *run; + + binind = arena_bin_index(arena, bin); + bin_info = &arena_bin_info[binind]; + bin->runcur = NULL; + run = arena_bin_nonfull_run_get(arena, bin); + if (bin->runcur != NULL && bin->runcur->nfree > 0) { + /* + * Another thread updated runcur while this one ran without the + * bin lock in arena_bin_nonfull_run_get(). + */ + void *ret; + assert(bin->runcur->nfree > 0); + ret = arena_run_reg_alloc(bin->runcur, bin_info); + if (run != NULL) { + arena_chunk_t *chunk; + + /* + * arena_run_alloc_small() may have allocated run, or + * it may have pulled run from the bin's run tree. + * Therefore it is unsafe to make any assumptions about + * how run has previously been used, and + * arena_bin_lower_run() must be called, as if a region + * were just deallocated from the run. + */ + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + if (run->nfree == bin_info->nregs) + arena_dalloc_bin_run(arena, chunk, run, bin); + else + arena_bin_lower_run(arena, chunk, run, bin); + } + return (ret); + } + + if (run == NULL) + return (NULL); + + bin->runcur = run; + + assert(bin->runcur->nfree > 0); + + return (arena_run_reg_alloc(bin->runcur, bin_info)); +} + +void +arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, + uint64_t prof_accumbytes) +{ + unsigned i, nfill; + arena_bin_t *bin; + + assert(tbin->ncached == 0); + + if (config_prof && arena_prof_accum(arena, prof_accumbytes)) + prof_idump(); + bin = &arena->bins[binind]; + malloc_mutex_lock(&bin->lock); + for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> + tbin->lg_fill_div); i < nfill; i++) { + arena_run_t *run; + void *ptr; + if ((run = bin->runcur) != NULL && run->nfree > 0) + ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); + else + ptr = arena_bin_malloc_hard(arena, bin); + if (ptr == NULL) { + /* + * OOM. tbin->avail isn't yet filled down to its first + * element, so the successful allocations (if any) must + * be moved to the base of tbin->avail before bailing + * out. + */ + if (i > 0) { + memmove(tbin->avail, &tbin->avail[nfill - i], + i * sizeof(void *)); + } + break; + } + if (config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ptr, &arena_bin_info[binind], + true); + } + /* Insert such that low regions get used first. */ + tbin->avail[nfill - 1 - i] = ptr; + } + if (config_stats) { + bin->stats.nmalloc += i; + bin->stats.nrequests += tbin->tstats.nrequests; + bin->stats.curregs += i; + bin->stats.nfills++; + tbin->tstats.nrequests = 0; + } + malloc_mutex_unlock(&bin->lock); + tbin->ncached = i; +} + +void +arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) +{ + + if (zero) { + size_t redzone_size = bin_info->redzone_size; + memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, + redzone_size); + memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, + redzone_size); + } else { + memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, + bin_info->reg_interval); + } +} + +#ifdef JEMALLOC_JET +#undef arena_redzone_corruption +#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) +#endif +static void +arena_redzone_corruption(void *ptr, size_t usize, bool after, + size_t offset, uint8_t byte) +{ + + malloc_printf(": Corrupt redzone %zu byte%s %s %p " + "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", + after ? "after" : "before", ptr, usize, byte); +} +#ifdef JEMALLOC_JET +#undef arena_redzone_corruption +#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) +arena_redzone_corruption_t *arena_redzone_corruption = + JEMALLOC_N(arena_redzone_corruption_impl); +#endif + +static void +arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) +{ + bool error = false; + + if (opt_junk_alloc) { + size_t size = bin_info->reg_size; + size_t redzone_size = bin_info->redzone_size; + size_t i; + + for (i = 1; i <= redzone_size; i++) { + uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); + if (*byte != 0xa5) { + error = true; + arena_redzone_corruption(ptr, size, false, i, + *byte); + if (reset) + *byte = 0xa5; + } + } + for (i = 0; i < redzone_size; i++) { + uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); + if (*byte != 0xa5) { + error = true; + arena_redzone_corruption(ptr, size, true, i, + *byte); + if (reset) + *byte = 0xa5; + } + } + } + + if (opt_abort && error) + abort(); +} + +#ifdef JEMALLOC_JET +#undef arena_dalloc_junk_small +#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) +#endif +void +arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) +{ + size_t redzone_size = bin_info->redzone_size; + + arena_redzones_validate(ptr, bin_info, false); + memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, + bin_info->reg_interval); +} +#ifdef JEMALLOC_JET +#undef arena_dalloc_junk_small +#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) +arena_dalloc_junk_small_t *arena_dalloc_junk_small = + JEMALLOC_N(arena_dalloc_junk_small_impl); +#endif + +void +arena_quarantine_junk_small(void *ptr, size_t usize) +{ + szind_t binind; + arena_bin_info_t *bin_info; + cassert(config_fill); + assert(opt_junk_free); + assert(opt_quarantine); + assert(usize <= SMALL_MAXCLASS); + + binind = size2index(usize); + bin_info = &arena_bin_info[binind]; + arena_redzones_validate(ptr, bin_info, true); +} + +void * +arena_malloc_small(arena_t *arena, size_t size, bool zero) +{ + void *ret; + arena_bin_t *bin; + arena_run_t *run; + szind_t binind; + + binind = size2index(size); + assert(binind < NBINS); + bin = &arena->bins[binind]; + size = index2size(binind); + + malloc_mutex_lock(&bin->lock); + if ((run = bin->runcur) != NULL && run->nfree > 0) + ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); + else + ret = arena_bin_malloc_hard(arena, bin); + + if (ret == NULL) { + malloc_mutex_unlock(&bin->lock); + return (NULL); + } + + if (config_stats) { + bin->stats.nmalloc++; + bin->stats.nrequests++; + bin->stats.curregs++; + } + malloc_mutex_unlock(&bin->lock); + if (config_prof && !isthreaded && arena_prof_accum(arena, size)) + prof_idump(); + + if (!zero) { + if (config_fill) { + if (unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, + &arena_bin_info[binind], false); + } else if (unlikely(opt_zero)) + memset(ret, 0, size); + } + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + } else { + if (config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, &arena_bin_info[binind], + true); + } + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + memset(ret, 0, size); + } + + return (ret); +} + +void * +arena_malloc_large(arena_t *arena, size_t size, bool zero) +{ + void *ret; + size_t usize; + uintptr_t random_offset; + arena_run_t *run; + arena_chunk_map_misc_t *miscelm; + UNUSED bool idump; + + /* Large allocation. */ + usize = s2u(size); + malloc_mutex_lock(&arena->lock); + if (config_cache_oblivious) { + uint64_t r; + + /* + * Compute a uniformly distributed offset within the first page + * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 + * for 4 KiB pages and 64-byte cachelines. + */ + prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state, + UINT64_C(6364136223846793009), + UINT64_C(1442695040888963409)); + random_offset = ((uintptr_t)r) << LG_CACHELINE; + } else + random_offset = 0; + run = arena_run_alloc_large(arena, usize + large_pad, zero); + if (run == NULL) { + malloc_mutex_unlock(&arena->lock); + return (NULL); + } + miscelm = arena_run_to_miscelm(run); + ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + + random_offset); + if (config_stats) { + szind_t index = size2index(usize) - NBINS; + + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += usize; + arena->stats.lstats[index].nmalloc++; + arena->stats.lstats[index].nrequests++; + arena->stats.lstats[index].curruns++; + } + if (config_prof) + idump = arena_prof_accum_locked(arena, usize); + malloc_mutex_unlock(&arena->lock); + if (config_prof && idump) + prof_idump(); + + if (!zero) { + if (config_fill) { + if (unlikely(opt_junk_alloc)) + memset(ret, 0xa5, usize); + else if (unlikely(opt_zero)) + memset(ret, 0, usize); + } + } + + return (ret); +} + +/* Only handles large allocations that require more than page alignment. */ +static void * +arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, + bool zero) +{ + void *ret; + size_t alloc_size, leadsize, trailsize; + arena_run_t *run; + arena_chunk_t *chunk; + arena_chunk_map_misc_t *miscelm; + void *rpages; + + assert(usize == PAGE_CEILING(usize)); + + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) + return (NULL); + + alignment = PAGE_CEILING(alignment); + alloc_size = usize + large_pad + alignment - PAGE; + + malloc_mutex_lock(&arena->lock); + run = arena_run_alloc_large(arena, alloc_size, false); + if (run == NULL) { + malloc_mutex_unlock(&arena->lock); + return (NULL); + } + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + miscelm = arena_run_to_miscelm(run); + rpages = arena_miscelm_to_rpages(miscelm); + + leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - + (uintptr_t)rpages; + assert(alloc_size >= leadsize + usize); + trailsize = alloc_size - leadsize - usize - large_pad; + if (leadsize != 0) { + arena_chunk_map_misc_t *head_miscelm = miscelm; + arena_run_t *head_run = run; + + miscelm = arena_miscelm_get(chunk, + arena_miscelm_to_pageind(head_miscelm) + (leadsize >> + LG_PAGE)); + run = &miscelm->run; + + arena_run_trim_head(arena, chunk, head_run, alloc_size, + alloc_size - leadsize); + } + if (trailsize != 0) { + arena_run_trim_tail(arena, chunk, run, usize + large_pad + + trailsize, usize + large_pad, false); + } + if (arena_run_init_large(arena, run, usize + large_pad, zero)) { + size_t run_ind = + arena_miscelm_to_pageind(arena_run_to_miscelm(run)); + bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); + bool decommitted = (arena_mapbits_decommitted_get(chunk, + run_ind) != 0); + + assert(decommitted); /* Cause of OOM. */ + arena_run_dalloc(arena, run, dirty, false, decommitted); + malloc_mutex_unlock(&arena->lock); + return (NULL); + } + ret = arena_miscelm_to_rpages(miscelm); + + if (config_stats) { + szind_t index = size2index(usize) - NBINS; + + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += usize; + arena->stats.lstats[index].nmalloc++; + arena->stats.lstats[index].nrequests++; + arena->stats.lstats[index].curruns++; + } + malloc_mutex_unlock(&arena->lock); + + if (config_fill && !zero) { + if (unlikely(opt_junk_alloc)) + memset(ret, 0xa5, usize); + else if (unlikely(opt_zero)) + memset(ret, 0, usize); + } + return (ret); +} + +void * +arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, + bool zero, tcache_t *tcache) +{ + void *ret; + + if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE + && (usize & PAGE_MASK) == 0))) { + /* Small; alignment doesn't require special run placement. */ + ret = arena_malloc(tsd, arena, usize, zero, tcache); + } else if (usize <= large_maxclass && alignment <= PAGE) { + /* + * Large; alignment doesn't require special run placement. + * However, the cached pointer may be at a random offset from + * the base of the run, so do some bit manipulation to retrieve + * the base. + */ + ret = arena_malloc(tsd, arena, usize, zero, tcache); + if (config_cache_oblivious) + ret = (void *)((uintptr_t)ret & ~PAGE_MASK); + } else { + if (likely(usize <= large_maxclass)) { + ret = arena_palloc_large(tsd, arena, usize, alignment, + zero); + } else if (likely(alignment <= chunksize)) + ret = huge_malloc(tsd, arena, usize, zero, tcache); + else { + ret = huge_palloc(tsd, arena, usize, alignment, zero, + tcache); + } + } + return (ret); +} + +void +arena_prof_promoted(const void *ptr, size_t size) +{ + arena_chunk_t *chunk; + size_t pageind; + szind_t binind; + + cassert(config_prof); + assert(ptr != NULL); + assert(CHUNK_ADDR2BASE(ptr) != ptr); + assert(isalloc(ptr, false) == LARGE_MINCLASS); + assert(isalloc(ptr, true) == LARGE_MINCLASS); + assert(size <= SMALL_MAXCLASS); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + binind = size2index(size); + assert(binind < NBINS); + arena_mapbits_large_binind_set(chunk, pageind, binind); + + assert(isalloc(ptr, false) == LARGE_MINCLASS); + assert(isalloc(ptr, true) == size); +} + +static void +arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, + arena_bin_t *bin) +{ + + /* Dissociate run from bin. */ + if (run == bin->runcur) + bin->runcur = NULL; + else { + szind_t binind = arena_bin_index(extent_node_arena_get( + &chunk->node), bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + + if (bin_info->nregs != 1) { + /* + * This block's conditional is necessary because if the + * run only contains one region, then it never gets + * inserted into the non-full runs tree. + */ + arena_bin_runs_remove(bin, run); + } + } +} + +static void +arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + arena_bin_t *bin) +{ + + assert(run != bin->runcur); + assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == + NULL); + + malloc_mutex_unlock(&bin->lock); + /******************************/ + malloc_mutex_lock(&arena->lock); + arena_run_dalloc_decommit(arena, chunk, run); + malloc_mutex_unlock(&arena->lock); + /****************************/ + malloc_mutex_lock(&bin->lock); + if (config_stats) + bin->stats.curruns--; +} + +static void +arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + arena_bin_t *bin) +{ + + /* + * Make sure that if bin->runcur is non-NULL, it refers to the lowest + * non-full run. It is okay to NULL runcur out rather than proactively + * keeping it pointing at the lowest non-full run. + */ + if ((uintptr_t)run < (uintptr_t)bin->runcur) { + /* Switch runcur. */ + if (bin->runcur->nfree > 0) + arena_bin_runs_insert(bin, bin->runcur); + bin->runcur = run; + if (config_stats) + bin->stats.reruns++; + } else + arena_bin_runs_insert(bin, run); +} + +static void +arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, + arena_chunk_map_bits_t *bitselm, bool junked) +{ + size_t pageind, rpages_ind; + arena_run_t *run; + arena_bin_t *bin; + arena_bin_info_t *bin_info; + szind_t binind; + + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); + run = &arena_miscelm_get(chunk, rpages_ind)->run; + binind = run->binind; + bin = &arena->bins[binind]; + bin_info = &arena_bin_info[binind]; + + if (!junked && config_fill && unlikely(opt_junk_free)) + arena_dalloc_junk_small(ptr, bin_info); + + arena_run_reg_dalloc(run, ptr); + if (run->nfree == bin_info->nregs) { + arena_dissociate_bin_run(chunk, run, bin); + arena_dalloc_bin_run(arena, chunk, run, bin); + } else if (run->nfree == 1 && run != bin->runcur) + arena_bin_lower_run(arena, chunk, run, bin); + + if (config_stats) { + bin->stats.ndalloc++; + bin->stats.curregs--; + } +} + +void +arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, + arena_chunk_map_bits_t *bitselm) +{ + + arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); +} + +void +arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t pageind, arena_chunk_map_bits_t *bitselm) +{ + arena_run_t *run; + arena_bin_t *bin; + size_t rpages_ind; + + rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); + run = &arena_miscelm_get(chunk, rpages_ind)->run; + bin = &arena->bins[run->binind]; + malloc_mutex_lock(&bin->lock); + arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); + malloc_mutex_unlock(&bin->lock); +} + +void +arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t pageind) +{ + arena_chunk_map_bits_t *bitselm; + + if (config_debug) { + /* arena_ptr_small_binind_get() does extra sanity checking. */ + assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, + pageind)) != BININD_INVALID); + } + bitselm = arena_bitselm_get(chunk, pageind); + arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); +} + +#ifdef JEMALLOC_JET +#undef arena_dalloc_junk_large +#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) +#endif +void +arena_dalloc_junk_large(void *ptr, size_t usize) +{ + + if (config_fill && unlikely(opt_junk_free)) + memset(ptr, 0x5a, usize); +} +#ifdef JEMALLOC_JET +#undef arena_dalloc_junk_large +#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) +arena_dalloc_junk_large_t *arena_dalloc_junk_large = + JEMALLOC_N(arena_dalloc_junk_large_impl); +#endif + +static void +arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, + void *ptr, bool junked) +{ + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + arena_run_t *run = &miscelm->run; + + if (config_fill || config_stats) { + size_t usize = arena_mapbits_large_size_get(chunk, pageind) - + large_pad; + + if (!junked) + arena_dalloc_junk_large(ptr, usize); + if (config_stats) { + szind_t index = size2index(usize) - NBINS; + + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= usize; + arena->stats.lstats[index].ndalloc++; + arena->stats.lstats[index].curruns--; + } + } + + arena_run_dalloc_decommit(arena, chunk, run); +} + +void +arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, + void *ptr) +{ + + arena_dalloc_large_locked_impl(arena, chunk, ptr, true); +} + +void +arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) +{ + + malloc_mutex_lock(&arena->lock); + arena_dalloc_large_locked_impl(arena, chunk, ptr, false); + malloc_mutex_unlock(&arena->lock); +} + +static void +arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t oldsize, size_t size) +{ + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); + arena_run_t *run = &miscelm->run; + + assert(size < oldsize); + + /* + * Shrink the run, and make trailing pages available for other + * allocations. + */ + malloc_mutex_lock(&arena->lock); + arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size + + large_pad, true); + if (config_stats) { + szind_t oldindex = size2index(oldsize) - NBINS; + szind_t index = size2index(size) - NBINS; + + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= oldsize; + arena->stats.lstats[oldindex].ndalloc++; + arena->stats.lstats[oldindex].curruns--; + + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += size; + arena->stats.lstats[index].nmalloc++; + arena->stats.lstats[index].nrequests++; + arena->stats.lstats[index].curruns++; + } + malloc_mutex_unlock(&arena->lock); +} + +static bool +arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t oldsize, size_t usize_min, size_t usize_max, bool zero) +{ + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + size_t npages = (oldsize + large_pad) >> LG_PAGE; + size_t followsize; + + assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - + large_pad); + + /* Try to extend the run. */ + malloc_mutex_lock(&arena->lock); + if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, + pageind+npages) != 0) + goto label_fail; + followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); + if (oldsize + followsize >= usize_min) { + /* + * The next run is available and sufficiently large. Split the + * following run, then merge the first part with the existing + * allocation. + */ + arena_run_t *run; + size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; + + usize = usize_max; + while (oldsize + followsize < usize) + usize = index2size(size2index(usize)-1); + assert(usize >= usize_min); + assert(usize >= oldsize); + splitsize = usize - oldsize; + if (splitsize == 0) + goto label_fail; + + run = &arena_miscelm_get(chunk, pageind+npages)->run; + if (arena_run_split_large(arena, run, splitsize, zero)) + goto label_fail; + + if (config_cache_oblivious && zero) { + /* + * Zero the trailing bytes of the original allocation's + * last page, since they are in an indeterminate state. + * There will always be trailing bytes, because ptr's + * offset from the beginning of the run is a multiple of + * CACHELINE in [0 .. PAGE). + */ + void *zbase = (void *)((uintptr_t)ptr + oldsize); + void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + + PAGE)); + size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; + assert(nzero > 0); + memset(zbase, 0, nzero); + } + + size = oldsize + splitsize; + npages = (size + large_pad) >> LG_PAGE; + + /* + * Mark the extended run as dirty if either portion of the run + * was dirty before allocation. This is rather pedantic, + * because there's not actually any sequence of events that + * could cause the resulting run to be passed to + * arena_run_dalloc() with the dirty argument set to false + * (which is when dirty flag consistency would really matter). + */ + flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | + arena_mapbits_dirty_get(chunk, pageind+npages-1); + flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; + arena_mapbits_large_set(chunk, pageind, size + large_pad, + flag_dirty | (flag_unzeroed_mask & + arena_mapbits_unzeroed_get(chunk, pageind))); + arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | + (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, + pageind+npages-1))); + + if (config_stats) { + szind_t oldindex = size2index(oldsize) - NBINS; + szind_t index = size2index(size) - NBINS; + + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= oldsize; + arena->stats.lstats[oldindex].ndalloc++; + arena->stats.lstats[oldindex].curruns--; + + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += size; + arena->stats.lstats[index].nmalloc++; + arena->stats.lstats[index].nrequests++; + arena->stats.lstats[index].curruns++; + } + malloc_mutex_unlock(&arena->lock); + return (false); + } +label_fail: + malloc_mutex_unlock(&arena->lock); + return (true); +} + +#ifdef JEMALLOC_JET +#undef arena_ralloc_junk_large +#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) +#endif +static void +arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) +{ + + if (config_fill && unlikely(opt_junk_free)) { + memset((void *)((uintptr_t)ptr + usize), 0x5a, + old_usize - usize); + } +} +#ifdef JEMALLOC_JET +#undef arena_ralloc_junk_large +#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) +arena_ralloc_junk_large_t *arena_ralloc_junk_large = + JEMALLOC_N(arena_ralloc_junk_large_impl); +#endif + +/* + * Try to resize a large allocation, in order to avoid copying. This will + * always fail if growing an object, and the following run is already in use. + */ +static bool +arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, + size_t usize_max, bool zero) +{ + arena_chunk_t *chunk; + arena_t *arena; + + if (oldsize == usize_max) { + /* Current size class is compatible and maximal. */ + return (false); + } + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena = extent_node_arena_get(&chunk->node); + + if (oldsize < usize_max) { + bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, + usize_min, usize_max, zero); + if (config_fill && !ret && !zero) { + if (unlikely(opt_junk_alloc)) { + memset((void *)((uintptr_t)ptr + oldsize), 0xa5, + isalloc(ptr, config_prof) - oldsize); + } else if (unlikely(opt_zero)) { + memset((void *)((uintptr_t)ptr + oldsize), 0, + isalloc(ptr, config_prof) - oldsize); + } + } + return (ret); + } + + assert(oldsize > usize_max); + /* Fill before shrinking in order avoid a race. */ + arena_ralloc_junk_large(ptr, oldsize, usize_max); + arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); + return (false); +} + +bool +arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, + bool zero) +{ + size_t usize_min, usize_max; + + usize_min = s2u(size); + usize_max = s2u(size + extra); + if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { + /* + * Avoid moving the allocation if the size class can be left the + * same. + */ + if (oldsize <= SMALL_MAXCLASS) { + assert(arena_bin_info[size2index(oldsize)].reg_size == + oldsize); + if ((usize_max <= SMALL_MAXCLASS && + size2index(usize_max) == size2index(oldsize)) || + (size <= oldsize && usize_max >= oldsize)) + return (false); + } else { + if (usize_max > SMALL_MAXCLASS) { + if (!arena_ralloc_large(ptr, oldsize, usize_min, + usize_max, zero)) + return (false); + } + } + + /* Reallocation would require a move. */ + return (true); + } else { + return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max, + zero)); + } +} + +static void * +arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) +{ + + if (alignment == 0) + return (arena_malloc(tsd, arena, usize, zero, tcache)); + usize = sa2u(usize, alignment); + if (usize == 0) + return (NULL); + return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); +} + +void * +arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, + size_t alignment, bool zero, tcache_t *tcache) +{ + void *ret; + size_t usize; + + usize = s2u(size); + if (usize == 0) + return (NULL); + + if (likely(usize <= large_maxclass)) { + size_t copysize; + + /* Try to avoid moving the allocation. */ + if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero)) + return (ptr); + + /* + * size and oldsize are different enough that we need to move + * the object. In that case, fall back to allocating new space + * and copying. + */ + ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, + zero, tcache); + if (ret == NULL) + return (NULL); + + /* + * Junk/zero-filling were already done by + * ipalloc()/arena_malloc(). + */ + + copysize = (usize < oldsize) ? usize : oldsize; + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); + memcpy(ret, ptr, copysize); + isqalloc(tsd, ptr, oldsize, tcache); + } else { + ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, + zero, tcache); + } + return (ret); +} + +dss_prec_t +arena_dss_prec_get(arena_t *arena) +{ + dss_prec_t ret; + + malloc_mutex_lock(&arena->lock); + ret = arena->dss_prec; + malloc_mutex_unlock(&arena->lock); + return (ret); +} + +bool +arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) +{ + + if (!have_dss) + return (dss_prec != dss_prec_disabled); + malloc_mutex_lock(&arena->lock); + arena->dss_prec = dss_prec; + malloc_mutex_unlock(&arena->lock); + return (false); +} + +ssize_t +arena_lg_dirty_mult_default_get(void) +{ + + return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); +} + +bool +arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) +{ + + if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) + return (true); + atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); + return (false); +} + +void +arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, + size_t *nactive, size_t *ndirty, arena_stats_t *astats, + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, + malloc_huge_stats_t *hstats) +{ + unsigned i; + + malloc_mutex_lock(&arena->lock); + *dss = dss_prec_names[arena->dss_prec]; + *lg_dirty_mult = arena->lg_dirty_mult; + *nactive += arena->nactive; + *ndirty += arena->ndirty; + + astats->mapped += arena->stats.mapped; + astats->npurge += arena->stats.npurge; + astats->nmadvise += arena->stats.nmadvise; + astats->purged += arena->stats.purged; + astats->metadata_mapped += arena->stats.metadata_mapped; + astats->metadata_allocated += arena_metadata_allocated_get(arena); + astats->allocated_large += arena->stats.allocated_large; + astats->nmalloc_large += arena->stats.nmalloc_large; + astats->ndalloc_large += arena->stats.ndalloc_large; + astats->nrequests_large += arena->stats.nrequests_large; + astats->allocated_huge += arena->stats.allocated_huge; + astats->nmalloc_huge += arena->stats.nmalloc_huge; + astats->ndalloc_huge += arena->stats.ndalloc_huge; + + for (i = 0; i < nlclasses; i++) { + lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; + lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; + lstats[i].nrequests += arena->stats.lstats[i].nrequests; + lstats[i].curruns += arena->stats.lstats[i].curruns; + } + + for (i = 0; i < nhclasses; i++) { + hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; + hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; + hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; + } + malloc_mutex_unlock(&arena->lock); + + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + + malloc_mutex_lock(&bin->lock); + bstats[i].nmalloc += bin->stats.nmalloc; + bstats[i].ndalloc += bin->stats.ndalloc; + bstats[i].nrequests += bin->stats.nrequests; + bstats[i].curregs += bin->stats.curregs; + if (config_tcache) { + bstats[i].nfills += bin->stats.nfills; + bstats[i].nflushes += bin->stats.nflushes; + } + bstats[i].nruns += bin->stats.nruns; + bstats[i].reruns += bin->stats.reruns; + bstats[i].curruns += bin->stats.curruns; + malloc_mutex_unlock(&bin->lock); + } +} + +arena_t * +arena_new(unsigned ind) +{ + arena_t *arena; + unsigned i; + arena_bin_t *bin; + + /* + * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly + * because there is no way to clean up if base_alloc() OOMs. + */ + if (config_stats) { + arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) + + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + + nhclasses) * sizeof(malloc_huge_stats_t)); + } else + arena = (arena_t *)base_alloc(sizeof(arena_t)); + if (arena == NULL) + return (NULL); + + arena->ind = ind; + arena->nthreads = 0; + if (malloc_mutex_init(&arena->lock)) + return (NULL); + + if (config_stats) { + memset(&arena->stats, 0, sizeof(arena_stats_t)); + arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena + + CACHELINE_CEILING(sizeof(arena_t))); + memset(arena->stats.lstats, 0, nlclasses * + sizeof(malloc_large_stats_t)); + arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena + + CACHELINE_CEILING(sizeof(arena_t)) + + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); + memset(arena->stats.hstats, 0, nhclasses * + sizeof(malloc_huge_stats_t)); + if (config_tcache) + ql_new(&arena->tcache_ql); + } + + if (config_prof) + arena->prof_accumbytes = 0; + + if (config_cache_oblivious) { + /* + * A nondeterministic seed based on the address of arena reduces + * the likelihood of lockstep non-uniform cache index + * utilization among identical concurrent processes, but at the + * cost of test repeatability. For debug builds, instead use a + * deterministic seed. + */ + arena->offset_state = config_debug ? ind : + (uint64_t)(uintptr_t)arena; + } + + arena->dss_prec = chunk_dss_prec_get(); + + arena->spare = NULL; + + arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); + arena->purging = false; + arena->nactive = 0; + arena->ndirty = 0; + + arena_avail_tree_new(&arena->runs_avail); + qr_new(&arena->runs_dirty, rd_link); + qr_new(&arena->chunks_cache, cc_link); + + ql_new(&arena->huge); + if (malloc_mutex_init(&arena->huge_mtx)) + return (NULL); + + extent_tree_szad_new(&arena->chunks_szad_cached); + extent_tree_ad_new(&arena->chunks_ad_cached); + extent_tree_szad_new(&arena->chunks_szad_retained); + extent_tree_ad_new(&arena->chunks_ad_retained); + if (malloc_mutex_init(&arena->chunks_mtx)) + return (NULL); + ql_new(&arena->node_cache); + if (malloc_mutex_init(&arena->node_cache_mtx)) + return (NULL); + + arena->chunk_hooks = chunk_hooks_default; + + /* Initialize bins. */ + for (i = 0; i < NBINS; i++) { + bin = &arena->bins[i]; + if (malloc_mutex_init(&bin->lock)) + return (NULL); + bin->runcur = NULL; + arena_run_tree_new(&bin->runs); + if (config_stats) + memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); + } + + return (arena); +} + +/* + * Calculate bin_info->run_size such that it meets the following constraints: + * + * *) bin_info->run_size <= arena_maxrun + * *) bin_info->nregs <= RUN_MAXREGS + * + * bin_info->nregs and bin_info->reg0_offset are also calculated here, since + * these settings are all interdependent. + */ +static void +bin_info_run_size_calc(arena_bin_info_t *bin_info) +{ + size_t pad_size; + size_t try_run_size, perfect_run_size, actual_run_size; + uint32_t try_nregs, perfect_nregs, actual_nregs; + + /* + * Determine redzone size based on minimum alignment and minimum + * redzone size. Add padding to the end of the run if it is needed to + * align the regions. The padding allows each redzone to be half the + * minimum alignment; without the padding, each redzone would have to + * be twice as large in order to maintain alignment. + */ + if (config_fill && unlikely(opt_redzone)) { + size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - + 1); + if (align_min <= REDZONE_MINSIZE) { + bin_info->redzone_size = REDZONE_MINSIZE; + pad_size = 0; + } else { + bin_info->redzone_size = align_min >> 1; + pad_size = bin_info->redzone_size; + } + } else { + bin_info->redzone_size = 0; + pad_size = 0; + } + bin_info->reg_interval = bin_info->reg_size + + (bin_info->redzone_size << 1); + + /* + * Compute run size under ideal conditions (no redzones, no limit on run + * size). + */ + try_run_size = PAGE; + try_nregs = try_run_size / bin_info->reg_size; + do { + perfect_run_size = try_run_size; + perfect_nregs = try_nregs; + + try_run_size += PAGE; + try_nregs = try_run_size / bin_info->reg_size; + } while (perfect_run_size != perfect_nregs * bin_info->reg_size); + assert(perfect_nregs <= RUN_MAXREGS); + + actual_run_size = perfect_run_size; + actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; + + /* + * Redzones can require enough padding that not even a single region can + * fit within the number of pages that would normally be dedicated to a + * run for this size class. Increase the run size until at least one + * region fits. + */ + while (actual_nregs == 0) { + assert(config_fill && unlikely(opt_redzone)); + + actual_run_size += PAGE; + actual_nregs = (actual_run_size - pad_size) / + bin_info->reg_interval; + } + + /* + * Make sure that the run will fit within an arena chunk. + */ + while (actual_run_size > arena_maxrun) { + actual_run_size -= PAGE; + actual_nregs = (actual_run_size - pad_size) / + bin_info->reg_interval; + } + assert(actual_nregs > 0); + assert(actual_run_size == s2u(actual_run_size)); + + /* Copy final settings. */ + bin_info->run_size = actual_run_size; + bin_info->nregs = actual_nregs; + bin_info->reg0_offset = actual_run_size - (actual_nregs * + bin_info->reg_interval) - pad_size + bin_info->redzone_size; + + if (actual_run_size > small_maxrun) + small_maxrun = actual_run_size; + + assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs + * bin_info->reg_interval) + pad_size == bin_info->run_size); +} + +static void +bin_info_init(void) +{ + arena_bin_info_t *bin_info; + +#define BIN_INFO_INIT_bin_yes(index, size) \ + bin_info = &arena_bin_info[index]; \ + bin_info->reg_size = size; \ + bin_info_run_size_calc(bin_info); \ + bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); +#define BIN_INFO_INIT_bin_no(index, size) +#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ + BIN_INFO_INIT_bin_##bin(index, (ZU(1)<> + LG_PAGE)); + if (small_run_tab == NULL) + return (true); + +#define TAB_INIT_bin_yes(index, size) { \ + arena_bin_info_t *bin_info = &arena_bin_info[index]; \ + small_run_tab[bin_info->run_size >> LG_PAGE] = true; \ + } +#define TAB_INIT_bin_no(index, size) +#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ + TAB_INIT_bin_##bin(index, (ZU(1)<= the result + * from (2), and will always be correct. + */ + map_bias = 0; + for (i = 0; i < 3; i++) { + size_t header_size = offsetof(arena_chunk_t, map_bits) + + ((sizeof(arena_chunk_map_bits_t) + + sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); + map_bias = (header_size + PAGE_MASK) >> LG_PAGE; + } + assert(map_bias > 0); + + map_misc_offset = offsetof(arena_chunk_t, map_bits) + + sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); + + arena_maxrun = chunksize - (map_bias << LG_PAGE); + assert(arena_maxrun > 0); + large_maxclass = index2size(size2index(chunksize)-1); + if (large_maxclass > arena_maxrun) { + /* + * For small chunk sizes it's possible for there to be fewer + * non-header pages available than are necessary to serve the + * size classes just below chunksize. + */ + large_maxclass = arena_maxrun; + } + assert(large_maxclass > 0); + nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); + nhclasses = NSIZES - nlclasses - NBINS; + + bin_info_init(); + return (small_run_size_init()); +} + +void +arena_prefork(arena_t *arena) +{ + unsigned i; + + malloc_mutex_prefork(&arena->lock); + malloc_mutex_prefork(&arena->huge_mtx); + malloc_mutex_prefork(&arena->chunks_mtx); + malloc_mutex_prefork(&arena->node_cache_mtx); + for (i = 0; i < NBINS; i++) + malloc_mutex_prefork(&arena->bins[i].lock); +} + +void +arena_postfork_parent(arena_t *arena) +{ + unsigned i; + + for (i = 0; i < NBINS; i++) + malloc_mutex_postfork_parent(&arena->bins[i].lock); + malloc_mutex_postfork_parent(&arena->node_cache_mtx); + malloc_mutex_postfork_parent(&arena->chunks_mtx); + malloc_mutex_postfork_parent(&arena->huge_mtx); + malloc_mutex_postfork_parent(&arena->lock); +} + +void +arena_postfork_child(arena_t *arena) +{ + unsigned i; + + for (i = 0; i < NBINS; i++) + malloc_mutex_postfork_child(&arena->bins[i].lock); + malloc_mutex_postfork_child(&arena->node_cache_mtx); + malloc_mutex_postfork_child(&arena->chunks_mtx); + malloc_mutex_postfork_child(&arena->huge_mtx); + malloc_mutex_postfork_child(&arena->lock); +} diff --git a/redis.submodule/jemalloc/src/atomic.c b/redis.submodule/jemalloc/src/atomic.c new file mode 100644 index 0000000..77ee313 --- /dev/null +++ b/redis.submodule/jemalloc/src/atomic.c @@ -0,0 +1,2 @@ +#define JEMALLOC_ATOMIC_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/redis.submodule/jemalloc/src/base.c b/redis.submodule/jemalloc/src/base.c new file mode 100644 index 0000000..7cdcfed --- /dev/null +++ b/redis.submodule/jemalloc/src/base.c @@ -0,0 +1,174 @@ +#define JEMALLOC_BASE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +static malloc_mutex_t base_mtx; +static extent_tree_t base_avail_szad; +static extent_node_t *base_nodes; +static size_t base_allocated; +static size_t base_resident; +static size_t base_mapped; + +/******************************************************************************/ + +/* base_mtx must be held. */ +static extent_node_t * +base_node_try_alloc(void) +{ + extent_node_t *node; + + if (base_nodes == NULL) + return (NULL); + node = base_nodes; + base_nodes = *(extent_node_t **)node; + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); + return (node); +} + +/* base_mtx must be held. */ +static void +base_node_dalloc(extent_node_t *node) +{ + + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); + *(extent_node_t **)node = base_nodes; + base_nodes = node; +} + +/* base_mtx must be held. */ +static extent_node_t * +base_chunk_alloc(size_t minsize) +{ + extent_node_t *node; + size_t csize, nsize; + void *addr; + + assert(minsize != 0); + node = base_node_try_alloc(); + /* Allocate enough space to also carve a node out if necessary. */ + nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; + csize = CHUNK_CEILING(minsize + nsize); + addr = chunk_alloc_base(csize); + if (addr == NULL) { + if (node != NULL) + base_node_dalloc(node); + return (NULL); + } + base_mapped += csize; + if (node == NULL) { + node = (extent_node_t *)addr; + addr = (void *)((uintptr_t)addr + nsize); + csize -= nsize; + if (config_stats) { + base_allocated += nsize; + base_resident += PAGE_CEILING(nsize); + } + } + extent_node_init(node, NULL, addr, csize, true, true); + return (node); +} + +/* + * base_alloc() guarantees demand-zeroed memory, in order to make multi-page + * sparse data structures such as radix tree nodes efficient with respect to + * physical memory usage. + */ +void * +base_alloc(size_t size) +{ + void *ret; + size_t csize, usize; + extent_node_t *node; + extent_node_t key; + + /* + * Round size up to nearest multiple of the cacheline size, so that + * there is no chance of false cache line sharing. + */ + csize = CACHELINE_CEILING(size); + + usize = s2u(csize); + extent_node_init(&key, NULL, NULL, usize, false, false); + malloc_mutex_lock(&base_mtx); + node = extent_tree_szad_nsearch(&base_avail_szad, &key); + if (node != NULL) { + /* Use existing space. */ + extent_tree_szad_remove(&base_avail_szad, node); + } else { + /* Try to allocate more space. */ + node = base_chunk_alloc(csize); + } + if (node == NULL) { + ret = NULL; + goto label_return; + } + + ret = extent_node_addr_get(node); + if (extent_node_size_get(node) > csize) { + extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); + extent_node_size_set(node, extent_node_size_get(node) - csize); + extent_tree_szad_insert(&base_avail_szad, node); + } else + base_node_dalloc(node); + if (config_stats) { + base_allocated += csize; + /* + * Add one PAGE to base_resident for every page boundary that is + * crossed by the new allocation. + */ + base_resident += PAGE_CEILING((uintptr_t)ret + csize) - + PAGE_CEILING((uintptr_t)ret); + } + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); +label_return: + malloc_mutex_unlock(&base_mtx); + return (ret); +} + +void +base_stats_get(size_t *allocated, size_t *resident, size_t *mapped) +{ + + malloc_mutex_lock(&base_mtx); + assert(base_allocated <= base_resident); + assert(base_resident <= base_mapped); + *allocated = base_allocated; + *resident = base_resident; + *mapped = base_mapped; + malloc_mutex_unlock(&base_mtx); +} + +bool +base_boot(void) +{ + + if (malloc_mutex_init(&base_mtx)) + return (true); + extent_tree_szad_new(&base_avail_szad); + base_nodes = NULL; + + return (false); +} + +void +base_prefork(void) +{ + + malloc_mutex_prefork(&base_mtx); +} + +void +base_postfork_parent(void) +{ + + malloc_mutex_postfork_parent(&base_mtx); +} + +void +base_postfork_child(void) +{ + + malloc_mutex_postfork_child(&base_mtx); +} diff --git a/redis.submodule/jemalloc/src/bitmap.c b/redis.submodule/jemalloc/src/bitmap.c new file mode 100644 index 0000000..c733372 --- /dev/null +++ b/redis.submodule/jemalloc/src/bitmap.c @@ -0,0 +1,78 @@ +#define JEMALLOC_BITMAP_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) +{ + unsigned i; + size_t group_count; + + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + /* + * Compute the number of groups necessary to store nbits bits, and + * progressively work upward through the levels until reaching a level + * that requires only one group. + */ + binfo->levels[0].group_offset = 0; + group_count = BITMAP_BITS2GROUPS(nbits); + for (i = 1; group_count > 1; i++) { + assert(i < BITMAP_MAX_LEVELS); + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + group_count = BITMAP_BITS2GROUPS(group_count); + } + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); + binfo->nlevels = i; + binfo->nbits = nbits; +} + +size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) +{ + + return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); +} + +size_t +bitmap_size(size_t nbits) +{ + bitmap_info_t binfo; + + bitmap_info_init(&binfo, nbits); + return (bitmap_info_ngroups(&binfo)); +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ + size_t extra; + unsigned i; + + /* + * Bits are actually inverted with regard to the external bitmap + * interface, so the bitmap starts out with all 1 bits, except for + * trailing unused bits (if any). Note that each group uses bit 0 to + * correspond to the first logical bit in the group, so extra bits + * are the most significant bits of the last group. + */ + memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << + LG_SIZEOF_BITMAP); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) + bitmap[binfo->levels[1].group_offset - 1] >>= extra; + for (i = 1; i < binfo->nlevels; i++) { + size_t group_count = binfo->levels[i].group_offset - + binfo->levels[i-1].group_offset; + extra = (BITMAP_GROUP_NBITS - (group_count & + BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) + bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + } +} diff --git a/redis.submodule/jemalloc/src/chunk.c b/redis.submodule/jemalloc/src/chunk.c new file mode 100644 index 0000000..6ba1ca7 --- /dev/null +++ b/redis.submodule/jemalloc/src/chunk.c @@ -0,0 +1,761 @@ +#define JEMALLOC_CHUNK_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +const char *opt_dss = DSS_DEFAULT; +size_t opt_lg_chunk = 0; + +/* Used exclusively for gdump triggering. */ +static size_t curchunks; +static size_t highchunks; + +rtree_t chunks_rtree; + +/* Various chunk-related settings. */ +size_t chunksize; +size_t chunksize_mask; /* (chunksize - 1). */ +size_t chunk_npages; + +static void *chunk_alloc_default(void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind); +static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, + unsigned arena_ind); +static bool chunk_commit_default(void *chunk, size_t size, size_t offset, + size_t length, unsigned arena_ind); +static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, + size_t length, unsigned arena_ind); +static bool chunk_purge_default(void *chunk, size_t size, size_t offset, + size_t length, unsigned arena_ind); +static bool chunk_split_default(void *chunk, size_t size, size_t size_a, + size_t size_b, bool committed, unsigned arena_ind); +static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, + size_t size_b, bool committed, unsigned arena_ind); + +const chunk_hooks_t chunk_hooks_default = { + chunk_alloc_default, + chunk_dalloc_default, + chunk_commit_default, + chunk_decommit_default, + chunk_purge_default, + chunk_split_default, + chunk_merge_default +}; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, + extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, + void *chunk, size_t size, bool zeroed, bool committed); + +/******************************************************************************/ + +static chunk_hooks_t +chunk_hooks_get_locked(arena_t *arena) +{ + + return (arena->chunk_hooks); +} + +chunk_hooks_t +chunk_hooks_get(arena_t *arena) +{ + chunk_hooks_t chunk_hooks; + + malloc_mutex_lock(&arena->chunks_mtx); + chunk_hooks = chunk_hooks_get_locked(arena); + malloc_mutex_unlock(&arena->chunks_mtx); + + return (chunk_hooks); +} + +chunk_hooks_t +chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) +{ + chunk_hooks_t old_chunk_hooks; + + malloc_mutex_lock(&arena->chunks_mtx); + old_chunk_hooks = arena->chunk_hooks; + /* + * Copy each field atomically so that it is impossible for readers to + * see partially updated pointers. There are places where readers only + * need one hook function pointer (therefore no need to copy the + * entirety of arena->chunk_hooks), and stale reads do not affect + * correctness, so they perform unlocked reads. + */ +#define ATOMIC_COPY_HOOK(n) do { \ + union { \ + chunk_##n##_t **n; \ + void **v; \ + } u; \ + u.n = &arena->chunk_hooks.n; \ + atomic_write_p(u.v, chunk_hooks->n); \ +} while (0) + ATOMIC_COPY_HOOK(alloc); + ATOMIC_COPY_HOOK(dalloc); + ATOMIC_COPY_HOOK(commit); + ATOMIC_COPY_HOOK(decommit); + ATOMIC_COPY_HOOK(purge); + ATOMIC_COPY_HOOK(split); + ATOMIC_COPY_HOOK(merge); +#undef ATOMIC_COPY_HOOK + malloc_mutex_unlock(&arena->chunks_mtx); + + return (old_chunk_hooks); +} + +static void +chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, + bool locked) +{ + static const chunk_hooks_t uninitialized_hooks = + CHUNK_HOOKS_INITIALIZER; + + if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == + 0) { + *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : + chunk_hooks_get(arena); + } +} + +static void +chunk_hooks_assure_initialized_locked(arena_t *arena, + chunk_hooks_t *chunk_hooks) +{ + + chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true); +} + +static void +chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks) +{ + + chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false); +} + +bool +chunk_register(const void *chunk, const extent_node_t *node) +{ + + assert(extent_node_addr_get(node) == chunk); + + if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node)) + return (true); + if (config_prof && opt_prof) { + size_t size = extent_node_size_get(node); + size_t nadd = (size == 0) ? 1 : size / chunksize; + size_t cur = atomic_add_z(&curchunks, nadd); + size_t high = atomic_read_z(&highchunks); + while (cur > high && atomic_cas_z(&highchunks, high, cur)) { + /* + * Don't refresh cur, because it may have decreased + * since this thread lost the highchunks update race. + */ + high = atomic_read_z(&highchunks); + } + if (cur > high && prof_gdump_get_unlocked()) + prof_gdump(); + } + + return (false); +} + +void +chunk_deregister(const void *chunk, const extent_node_t *node) +{ + bool err; + + err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL); + assert(!err); + if (config_prof && opt_prof) { + size_t size = extent_node_size_get(node); + size_t nsub = (size == 0) ? 1 : size / chunksize; + assert(atomic_read_z(&curchunks) >= nsub); + atomic_sub_z(&curchunks, nsub); + } +} + +/* + * Do first-best-fit chunk selection, i.e. select the lowest chunk that best + * fits. + */ +static extent_node_t * +chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, + extent_tree_t *chunks_ad, size_t size) +{ + extent_node_t key; + + assert(size == CHUNK_CEILING(size)); + + extent_node_init(&key, arena, NULL, size, false, false); + return (extent_tree_szad_nsearch(chunks_szad, &key)); +} + +static void * +chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, + extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, + void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, + bool dalloc_node) +{ + void *ret; + extent_node_t *node; + size_t alloc_size, leadsize, trailsize; + bool zeroed, committed; + + assert(new_addr == NULL || alignment == chunksize); + /* + * Cached chunks use the node linkage embedded in their headers, in + * which case dalloc_node is true, and new_addr is non-NULL because + * we're operating on a specific chunk. + */ + assert(dalloc_node || new_addr != NULL); + + alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); + /* Beware size_t wrap-around. */ + if (alloc_size < size) + return (NULL); + malloc_mutex_lock(&arena->chunks_mtx); + chunk_hooks_assure_initialized_locked(arena, chunk_hooks); + if (new_addr != NULL) { + extent_node_t key; + extent_node_init(&key, arena, new_addr, alloc_size, false, + false); + node = extent_tree_ad_search(chunks_ad, &key); + } else { + node = chunk_first_best_fit(arena, chunks_szad, chunks_ad, + alloc_size); + } + if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < + size)) { + malloc_mutex_unlock(&arena->chunks_mtx); + return (NULL); + } + leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), + alignment) - (uintptr_t)extent_node_addr_get(node); + assert(new_addr == NULL || leadsize == 0); + assert(extent_node_size_get(node) >= leadsize + size); + trailsize = extent_node_size_get(node) - leadsize - size; + ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); + zeroed = extent_node_zeroed_get(node); + if (zeroed) + *zero = true; + committed = extent_node_committed_get(node); + if (committed) + *commit = true; + /* Split the lead. */ + if (leadsize != 0 && + chunk_hooks->split(extent_node_addr_get(node), + extent_node_size_get(node), leadsize, size, false, arena->ind)) { + malloc_mutex_unlock(&arena->chunks_mtx); + return (NULL); + } + /* Remove node from the tree. */ + extent_tree_szad_remove(chunks_szad, node); + extent_tree_ad_remove(chunks_ad, node); + arena_chunk_cache_maybe_remove(arena, node, cache); + if (leadsize != 0) { + /* Insert the leading space as a smaller chunk. */ + extent_node_size_set(node, leadsize); + extent_tree_szad_insert(chunks_szad, node); + extent_tree_ad_insert(chunks_ad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); + node = NULL; + } + if (trailsize != 0) { + /* Split the trail. */ + if (chunk_hooks->split(ret, size + trailsize, size, + trailsize, false, arena->ind)) { + if (dalloc_node && node != NULL) + arena_node_dalloc(arena, node); + malloc_mutex_unlock(&arena->chunks_mtx); + chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, + cache, ret, size + trailsize, zeroed, committed); + return (NULL); + } + /* Insert the trailing space as a smaller chunk. */ + if (node == NULL) { + node = arena_node_alloc(arena); + if (node == NULL) { + malloc_mutex_unlock(&arena->chunks_mtx); + chunk_record(arena, chunk_hooks, chunks_szad, + chunks_ad, cache, ret, size + trailsize, + zeroed, committed); + return (NULL); + } + } + extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), + trailsize, zeroed, committed); + extent_tree_szad_insert(chunks_szad, node); + extent_tree_ad_insert(chunks_ad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); + node = NULL; + } + if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { + malloc_mutex_unlock(&arena->chunks_mtx); + chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, + ret, size, zeroed, committed); + return (NULL); + } + malloc_mutex_unlock(&arena->chunks_mtx); + + assert(dalloc_node || node != NULL); + if (dalloc_node && node != NULL) + arena_node_dalloc(arena, node); + if (*zero) { + if (!zeroed) + memset(ret, 0, size); + else if (config_debug) { + size_t i; + size_t *p = (size_t *)(uintptr_t)ret; + + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); + for (i = 0; i < size / sizeof(size_t); i++) + assert(p[i] == 0); + } + } + return (ret); +} + +/* + * If the caller specifies (!*zero), it is still possible to receive zeroed + * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes + * advantage of this to avoid demanding zeroed chunks, but taking advantage of + * them if they are returned. + */ +static void * +chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment, + bool *zero, bool *commit, dss_prec_t dss_prec) +{ + void *ret; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + + assert(size != 0); + assert((size & chunksize_mask) == 0); + assert(alignment != 0); + assert((alignment & chunksize_mask) == 0); + + /* Retained. */ + if ((ret = chunk_recycle(arena, &chunk_hooks, + &arena->chunks_szad_retained, &arena->chunks_ad_retained, false, + new_addr, size, alignment, zero, commit, true)) != NULL) + return (ret); + + /* "primary" dss. */ + if (have_dss && dss_prec == dss_prec_primary && (ret = + chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != + NULL) + return (ret); + /* + * mmap. Requesting an address is not implemented for + * chunk_alloc_mmap(), so only call it if (new_addr == NULL). + */ + if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero, + commit)) != NULL) + return (ret); + /* "secondary" dss. */ + if (have_dss && dss_prec == dss_prec_secondary && (ret = + chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != + NULL) + return (ret); + + /* All strategies for allocation failed. */ + return (NULL); +} + +void * +chunk_alloc_base(size_t size) +{ + void *ret; + bool zero, commit; + + /* + * Directly call chunk_alloc_mmap() rather than chunk_alloc_core() + * because it's critical that chunk_alloc_base() return untouched + * demand-zeroed virtual memory. + */ + zero = true; + commit = true; + ret = chunk_alloc_mmap(size, chunksize, &zero, &commit); + if (ret == NULL) + return (NULL); + if (config_valgrind) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + + return (ret); +} + +void * +chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, + size_t size, size_t alignment, bool *zero, bool dalloc_node) +{ + void *ret; + bool commit; + + assert(size != 0); + assert((size & chunksize_mask) == 0); + assert(alignment != 0); + assert((alignment & chunksize_mask) == 0); + + commit = true; + ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached, + &arena->chunks_ad_cached, true, new_addr, size, alignment, zero, + &commit, dalloc_node); + if (ret == NULL) + return (NULL); + assert(commit); + if (config_valgrind) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + return (ret); +} + +static arena_t * +chunk_arena_get(unsigned arena_ind) +{ + arena_t *arena; + + /* Dodge tsd for a0 in order to avoid bootstrapping issues. */ + arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind, + false, true); + /* + * The arena we're allocating on behalf of must have been initialized + * already. + */ + assert(arena != NULL); + return (arena); +} + +static void * +chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit, unsigned arena_ind) +{ + void *ret; + arena_t *arena; + + arena = chunk_arena_get(arena_ind); + ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, + commit, arena->dss_prec); + if (ret == NULL) + return (NULL); + if (config_valgrind) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + + return (ret); +} + +void * +chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit) +{ + void *ret; + + chunk_hooks_assure_initialized(arena, chunk_hooks); + ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit, + arena->ind); + if (ret == NULL) + return (NULL); + if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); + return (ret); +} + +static void +chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, + extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, + void *chunk, size_t size, bool zeroed, bool committed) +{ + bool unzeroed; + extent_node_t *node, *prev; + extent_node_t key; + + assert(!cache || !zeroed); + unzeroed = cache || !zeroed; + JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); + + malloc_mutex_lock(&arena->chunks_mtx); + chunk_hooks_assure_initialized_locked(arena, chunk_hooks); + extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, + false, false); + node = extent_tree_ad_nsearch(chunks_ad, &key); + /* Try to coalesce forward. */ + if (node != NULL && extent_node_addr_get(node) == + extent_node_addr_get(&key) && extent_node_committed_get(node) == + committed && !chunk_hooks->merge(chunk, size, + extent_node_addr_get(node), extent_node_size_get(node), false, + arena->ind)) { + /* + * Coalesce chunk with the following address range. This does + * not change the position within chunks_ad, so only + * remove/insert from/into chunks_szad. + */ + extent_tree_szad_remove(chunks_szad, node); + arena_chunk_cache_maybe_remove(arena, node, cache); + extent_node_addr_set(node, chunk); + extent_node_size_set(node, size + extent_node_size_get(node)); + extent_node_zeroed_set(node, extent_node_zeroed_get(node) && + !unzeroed); + extent_tree_szad_insert(chunks_szad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); + } else { + /* Coalescing forward failed, so insert a new node. */ + node = arena_node_alloc(arena); + if (node == NULL) { + /* + * Node allocation failed, which is an exceedingly + * unlikely failure. Leak chunk after making sure its + * pages have already been purged, so that this is only + * a virtual memory leak. + */ + if (cache) { + chunk_purge_wrapper(arena, chunk_hooks, chunk, + size, 0, size); + } + goto label_return; + } + extent_node_init(node, arena, chunk, size, !unzeroed, + committed); + extent_tree_ad_insert(chunks_ad, node); + extent_tree_szad_insert(chunks_szad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); + } + + /* Try to coalesce backward. */ + prev = extent_tree_ad_prev(chunks_ad, node); + if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) + + extent_node_size_get(prev)) == chunk && + extent_node_committed_get(prev) == committed && + !chunk_hooks->merge(extent_node_addr_get(prev), + extent_node_size_get(prev), chunk, size, false, arena->ind)) { + /* + * Coalesce chunk with the previous address range. This does + * not change the position within chunks_ad, so only + * remove/insert node from/into chunks_szad. + */ + extent_tree_szad_remove(chunks_szad, prev); + extent_tree_ad_remove(chunks_ad, prev); + arena_chunk_cache_maybe_remove(arena, prev, cache); + extent_tree_szad_remove(chunks_szad, node); + arena_chunk_cache_maybe_remove(arena, node, cache); + extent_node_addr_set(node, extent_node_addr_get(prev)); + extent_node_size_set(node, extent_node_size_get(prev) + + extent_node_size_get(node)); + extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && + extent_node_zeroed_get(node)); + extent_tree_szad_insert(chunks_szad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); + + arena_node_dalloc(arena, prev); + } + +label_return: + malloc_mutex_unlock(&arena->chunks_mtx); +} + +void +chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, + size_t size, bool committed) +{ + + assert(chunk != NULL); + assert(CHUNK_ADDR2BASE(chunk) == chunk); + assert(size != 0); + assert((size & chunksize_mask) == 0); + + chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached, + &arena->chunks_ad_cached, true, chunk, size, false, committed); + arena_maybe_purge(arena); +} + +void +chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, + size_t size, bool zeroed, bool committed) +{ + + assert(chunk != NULL); + assert(CHUNK_ADDR2BASE(chunk) == chunk); + assert(size != 0); + assert((size & chunksize_mask) == 0); + + chunk_hooks_assure_initialized(arena, chunk_hooks); + /* Try to deallocate. */ + if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind)) + return; + /* Try to decommit; purge if that fails. */ + if (committed) { + committed = chunk_hooks->decommit(chunk, size, 0, size, + arena->ind); + } + zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, + arena->ind); + chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained, + &arena->chunks_ad_retained, false, chunk, size, zeroed, committed); +} + +static bool +chunk_dalloc_default(void *chunk, size_t size, bool committed, + unsigned arena_ind) +{ + + if (!have_dss || !chunk_in_dss(chunk)) + return (chunk_dalloc_mmap(chunk, size)); + return (true); +} + +void +chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, + size_t size, bool committed) +{ + + chunk_hooks_assure_initialized(arena, chunk_hooks); + chunk_hooks->dalloc(chunk, size, committed, arena->ind); + if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default) + JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); +} + +static bool +chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + + return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset), + length)); +} + +static bool +chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + + return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset), + length)); +} + +bool +chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) +{ + + assert(chunk != NULL); + assert(CHUNK_ADDR2BASE(chunk) == chunk); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset), + length)); +} + +static bool +chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + + return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset, + length)); +} + +bool +chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, + size_t size, size_t offset, size_t length) +{ + + chunk_hooks_assure_initialized(arena, chunk_hooks); + return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); +} + +static bool +chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, + bool committed, unsigned arena_ind) +{ + + if (!maps_coalesce) + return (true); + return (false); +} + +static bool +chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, + bool committed, unsigned arena_ind) +{ + + if (!maps_coalesce) + return (true); + if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b)) + return (true); + + return (false); +} + +static rtree_node_elm_t * +chunks_rtree_node_alloc(size_t nelms) +{ + + return ((rtree_node_elm_t *)base_alloc(nelms * + sizeof(rtree_node_elm_t))); +} + +bool +chunk_boot(void) +{ +#ifdef _WIN32 + SYSTEM_INFO info; + GetSystemInfo(&info); + + /* + * Verify actual page size is equal to or an integral multiple of + * configured page size. + */ + if (info.dwPageSize & ((1U << LG_PAGE) - 1)) + return (true); + + /* + * Configure chunksize (if not set) to match granularity (usually 64K), + * so pages_map will always take fast path. + */ + if (!opt_lg_chunk) { + opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity) + - 1; + } +#else + if (!opt_lg_chunk) + opt_lg_chunk = LG_CHUNK_DEFAULT; +#endif + + /* Set variables according to the value of opt_lg_chunk. */ + chunksize = (ZU(1) << opt_lg_chunk); + assert(chunksize >= PAGE); + chunksize_mask = chunksize - 1; + chunk_npages = (chunksize >> LG_PAGE); + + if (have_dss && chunk_dss_boot()) + return (true); + if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) - + opt_lg_chunk, chunks_rtree_node_alloc, NULL)) + return (true); + + return (false); +} + +void +chunk_prefork(void) +{ + + chunk_dss_prefork(); +} + +void +chunk_postfork_parent(void) +{ + + chunk_dss_postfork_parent(); +} + +void +chunk_postfork_child(void) +{ + + chunk_dss_postfork_child(); +} diff --git a/redis.submodule/jemalloc/src/chunk_dss.c b/redis.submodule/jemalloc/src/chunk_dss.c new file mode 100644 index 0000000..61fc916 --- /dev/null +++ b/redis.submodule/jemalloc/src/chunk_dss.c @@ -0,0 +1,214 @@ +#define JEMALLOC_CHUNK_DSS_C_ +#include "jemalloc/internal/jemalloc_internal.h" +/******************************************************************************/ +/* Data. */ + +const char *dss_prec_names[] = { + "disabled", + "primary", + "secondary", + "N/A" +}; + +/* Current dss precedence default, used when creating new arenas. */ +static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT; + +/* + * Protects sbrk() calls. This avoids malloc races among threads, though it + * does not protect against races with threads that call sbrk() directly. + */ +static malloc_mutex_t dss_mtx; + +/* Base address of the DSS. */ +static void *dss_base; +/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ +static void *dss_prev; +/* Current upper limit on DSS addresses. */ +static void *dss_max; + +/******************************************************************************/ + +static void * +chunk_dss_sbrk(intptr_t increment) +{ + +#ifdef JEMALLOC_DSS + return (sbrk(increment)); +#else + not_implemented(); + return (NULL); +#endif +} + +dss_prec_t +chunk_dss_prec_get(void) +{ + dss_prec_t ret; + + if (!have_dss) + return (dss_prec_disabled); + malloc_mutex_lock(&dss_mtx); + ret = dss_prec_default; + malloc_mutex_unlock(&dss_mtx); + return (ret); +} + +bool +chunk_dss_prec_set(dss_prec_t dss_prec) +{ + + if (!have_dss) + return (dss_prec != dss_prec_disabled); + malloc_mutex_lock(&dss_mtx); + dss_prec_default = dss_prec; + malloc_mutex_unlock(&dss_mtx); + return (false); +} + +void * +chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, + bool *zero, bool *commit) +{ + cassert(have_dss); + assert(size > 0 && (size & chunksize_mask) == 0); + assert(alignment > 0 && (alignment & chunksize_mask) == 0); + + /* + * sbrk() uses a signed increment argument, so take care not to + * interpret a huge allocation request as a negative increment. + */ + if ((intptr_t)size < 0) + return (NULL); + + malloc_mutex_lock(&dss_mtx); + if (dss_prev != (void *)-1) { + + /* + * The loop is necessary to recover from races with other + * threads that are using the DSS for something other than + * malloc. + */ + do { + void *ret, *cpad, *dss_next; + size_t gap_size, cpad_size; + intptr_t incr; + /* Avoid an unnecessary system call. */ + if (new_addr != NULL && dss_max != new_addr) + break; + + /* Get the current end of the DSS. */ + dss_max = chunk_dss_sbrk(0); + + /* Make sure the earlier condition still holds. */ + if (new_addr != NULL && dss_max != new_addr) + break; + + /* + * Calculate how much padding is necessary to + * chunk-align the end of the DSS. + */ + gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & + chunksize_mask; + /* + * Compute how much chunk-aligned pad space (if any) is + * necessary to satisfy alignment. This space can be + * recycled for later use. + */ + cpad = (void *)((uintptr_t)dss_max + gap_size); + ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, + alignment); + cpad_size = (uintptr_t)ret - (uintptr_t)cpad; + dss_next = (void *)((uintptr_t)ret + size); + if ((uintptr_t)ret < (uintptr_t)dss_max || + (uintptr_t)dss_next < (uintptr_t)dss_max) { + /* Wrap-around. */ + malloc_mutex_unlock(&dss_mtx); + return (NULL); + } + incr = gap_size + cpad_size + size; + dss_prev = chunk_dss_sbrk(incr); + if (dss_prev == dss_max) { + /* Success. */ + dss_max = dss_next; + malloc_mutex_unlock(&dss_mtx); + if (cpad_size != 0) { + chunk_hooks_t chunk_hooks = + CHUNK_HOOKS_INITIALIZER; + chunk_dalloc_wrapper(arena, + &chunk_hooks, cpad, cpad_size, + true); + } + if (*zero) { + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( + ret, size); + memset(ret, 0, size); + } + if (!*commit) + *commit = pages_decommit(ret, size); + return (ret); + } + } while (dss_prev != (void *)-1); + } + malloc_mutex_unlock(&dss_mtx); + + return (NULL); +} + +bool +chunk_in_dss(void *chunk) +{ + bool ret; + + cassert(have_dss); + + malloc_mutex_lock(&dss_mtx); + if ((uintptr_t)chunk >= (uintptr_t)dss_base + && (uintptr_t)chunk < (uintptr_t)dss_max) + ret = true; + else + ret = false; + malloc_mutex_unlock(&dss_mtx); + + return (ret); +} + +bool +chunk_dss_boot(void) +{ + + cassert(have_dss); + + if (malloc_mutex_init(&dss_mtx)) + return (true); + dss_base = chunk_dss_sbrk(0); + dss_prev = dss_base; + dss_max = dss_base; + + return (false); +} + +void +chunk_dss_prefork(void) +{ + + if (have_dss) + malloc_mutex_prefork(&dss_mtx); +} + +void +chunk_dss_postfork_parent(void) +{ + + if (have_dss) + malloc_mutex_postfork_parent(&dss_mtx); +} + +void +chunk_dss_postfork_child(void) +{ + + if (have_dss) + malloc_mutex_postfork_child(&dss_mtx); +} + +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/src/chunk_mmap.c b/redis.submodule/jemalloc/src/chunk_mmap.c new file mode 100644 index 0000000..b9ba741 --- /dev/null +++ b/redis.submodule/jemalloc/src/chunk_mmap.c @@ -0,0 +1,80 @@ +#define JEMALLOC_CHUNK_MMAP_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ + +static void * +chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) +{ + void *ret; + size_t alloc_size; + + alloc_size = size + alignment - PAGE; + /* Beware size_t wrap-around. */ + if (alloc_size < size) + return (NULL); + do { + void *pages; + size_t leadsize; + pages = pages_map(NULL, alloc_size); + if (pages == NULL) + return (NULL); + leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - + (uintptr_t)pages; + ret = pages_trim(pages, alloc_size, leadsize, size); + } while (ret == NULL); + + assert(ret != NULL); + *zero = true; + if (!*commit) + *commit = pages_decommit(ret, size); + return (ret); +} + +void * +chunk_alloc_mmap(size_t size, size_t alignment, bool *zero, bool *commit) +{ + void *ret; + size_t offset; + + /* + * Ideally, there would be a way to specify alignment to mmap() (like + * NetBSD has), but in the absence of such a feature, we have to work + * hard to efficiently create aligned mappings. The reliable, but + * slow method is to create a mapping that is over-sized, then trim the + * excess. However, that always results in one or two calls to + * pages_unmap(). + * + * Optimistically try mapping precisely the right amount before falling + * back to the slow method, with the expectation that the optimistic + * approach works most of the time. + */ + + assert(alignment != 0); + assert((alignment & chunksize_mask) == 0); + + ret = pages_map(NULL, size); + if (ret == NULL) + return (NULL); + offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); + if (offset != 0) { + pages_unmap(ret, size); + return (chunk_alloc_mmap_slow(size, alignment, zero, commit)); + } + + assert(ret != NULL); + *zero = true; + if (!*commit) + *commit = pages_decommit(ret, size); + return (ret); +} + +bool +chunk_dalloc_mmap(void *chunk, size_t size) +{ + + if (config_munmap) + pages_unmap(chunk, size); + + return (!config_munmap); +} diff --git a/redis.submodule/jemalloc/src/ckh.c b/redis.submodule/jemalloc/src/ckh.c new file mode 100644 index 0000000..53a1c1e --- /dev/null +++ b/redis.submodule/jemalloc/src/ckh.c @@ -0,0 +1,568 @@ +/* + ******************************************************************************* + * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each + * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash + * functions are employed. The original cuckoo hashing algorithm was described + * in: + * + * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms + * 51(2):122-144. + * + * Generalization of cuckoo hashing was discussed in: + * + * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical + * alternative to traditional hash tables. In Proceedings of the 7th + * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, + * January 2006. + * + * This implementation uses precisely two hash functions because that is the + * fewest that can work, and supporting multiple hashes is an implementation + * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) + * that shows approximate expected maximum load factors for various + * configurations: + * + * | #cells/bucket | + * #hashes | 1 | 2 | 4 | 8 | + * --------+-------+-------+-------+-------+ + * 1 | 0.006 | 0.006 | 0.03 | 0.12 | + * 2 | 0.49 | 0.86 |>0.93< |>0.96< | + * 3 | 0.91 | 0.97 | 0.98 | 0.999 | + * 4 | 0.97 | 0.99 | 0.999 | | + * + * The number of cells per bucket is chosen such that a bucket fits in one cache + * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, + * respectively. + * + ******************************************************************************/ +#define JEMALLOC_CKH_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); +static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); + +/******************************************************************************/ + +/* + * Search bucket for key and return the cell number if found; SIZE_T_MAX + * otherwise. + */ +JEMALLOC_INLINE_C size_t +ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) +{ + ckhc_t *cell; + unsigned i; + + for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; + if (cell->key != NULL && ckh->keycomp(key, cell->key)) + return ((bucket << LG_CKH_BUCKET_CELLS) + i); + } + + return (SIZE_T_MAX); +} + +/* + * Search table for key and return cell number if found; SIZE_T_MAX otherwise. + */ +JEMALLOC_INLINE_C size_t +ckh_isearch(ckh_t *ckh, const void *key) +{ + size_t hashes[2], bucket, cell; + + assert(ckh != NULL); + + ckh->hash(key, hashes); + + /* Search primary bucket. */ + bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); + cell = ckh_bucket_search(ckh, bucket, key); + if (cell != SIZE_T_MAX) + return (cell); + + /* Search secondary bucket. */ + bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); + cell = ckh_bucket_search(ckh, bucket, key); + return (cell); +} + +JEMALLOC_INLINE_C bool +ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, + const void *data) +{ + ckhc_t *cell; + unsigned offset, i; + + /* + * Cycle through the cells in the bucket, starting at a random position. + * The randomness avoids worst-case search overhead as buckets fill up. + */ + prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; + if (cell->key == NULL) { + cell->key = key; + cell->data = data; + ckh->count++; + return (false); + } + } + + return (true); +} + +/* + * No space is available in bucket. Randomly evict an item, then try to find an + * alternate location for that item. Iteratively repeat this + * eviction/relocation procedure until either success or detection of an + * eviction/relocation bucket cycle. + */ +JEMALLOC_INLINE_C bool +ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, + void const **argdata) +{ + const void *key, *data, *tkey, *tdata; + ckhc_t *cell; + size_t hashes[2], bucket, tbucket; + unsigned i; + + bucket = argbucket; + key = *argkey; + data = *argdata; + while (true) { + /* + * Choose a random item within the bucket to evict. This is + * critical to correct function, because without (eventually) + * evicting all items within a bucket during iteration, it + * would be possible to get stuck in an infinite loop if there + * were an item for which both hashes indicated the same + * bucket. + */ + prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; + assert(cell->key != NULL); + + /* Swap cell->{key,data} and {key,data} (evict). */ + tkey = cell->key; tdata = cell->data; + cell->key = key; cell->data = data; + key = tkey; data = tdata; + +#ifdef CKH_COUNT + ckh->nrelocs++; +#endif + + /* Find the alternate bucket for the evicted item. */ + ckh->hash(key, hashes); + tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (tbucket == bucket) { + tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) + - 1); + /* + * It may be that (tbucket == bucket) still, if the + * item's hashes both indicate this bucket. However, + * we are guaranteed to eventually escape this bucket + * during iteration, assuming pseudo-random item + * selection (true randomness would make infinite + * looping a remote possibility). The reason we can + * never get trapped forever is that there are two + * cases: + * + * 1) This bucket == argbucket, so we will quickly + * detect an eviction cycle and terminate. + * 2) An item was evicted to this bucket from another, + * which means that at least one item in this bucket + * has hashes that indicate distinct buckets. + */ + } + /* Check for a cycle. */ + if (tbucket == argbucket) { + *argkey = key; + *argdata = data; + return (true); + } + + bucket = tbucket; + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) + return (false); + } +} + +JEMALLOC_INLINE_C bool +ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) +{ + size_t hashes[2], bucket; + const void *key = *argkey; + const void *data = *argdata; + + ckh->hash(key, hashes); + + /* Try to insert in primary bucket. */ + bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) + return (false); + + /* Try to insert in secondary bucket. */ + bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) + return (false); + + /* + * Try to find a place for this item via iterative eviction/relocation. + */ + return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); +} + +/* + * Try to rebuild the hash table from scratch by inserting all items from the + * old table into the new. + */ +JEMALLOC_INLINE_C bool +ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) +{ + size_t count, i, nins; + const void *key, *data; + + count = ckh->count; + ckh->count = 0; + for (i = nins = 0; nins < count; i++) { + if (aTab[i].key != NULL) { + key = aTab[i].key; + data = aTab[i].data; + if (ckh_try_insert(ckh, &key, &data)) { + ckh->count = count; + return (true); + } + nins++; + } + } + + return (false); +} + +static bool +ckh_grow(tsd_t *tsd, ckh_t *ckh) +{ + bool ret; + ckhc_t *tab, *ttab; + size_t lg_curcells; + unsigned lg_prevbuckets; + +#ifdef CKH_COUNT + ckh->ngrows++; +#endif + + /* + * It is possible (though unlikely, given well behaved hashes) that the + * table will have to be doubled more than once in order to create a + * usable table. + */ + lg_prevbuckets = ckh->lg_curbuckets; + lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; + while (true) { + size_t usize; + + lg_curcells++; + usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (usize == 0) { + ret = true; + goto label_return; + } + tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, + true, NULL); + if (tab == NULL) { + ret = true; + goto label_return; + } + /* Swap in new table. */ + ttab = ckh->tab; + ckh->tab = tab; + tab = ttab; + ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; + + if (!ckh_rebuild(ckh, tab)) { + idalloctm(tsd, tab, tcache_get(tsd, false), true); + break; + } + + /* Rebuilding failed, so back out partially rebuilt table. */ + idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); + ckh->tab = tab; + ckh->lg_curbuckets = lg_prevbuckets; + } + + ret = false; +label_return: + return (ret); +} + +static void +ckh_shrink(tsd_t *tsd, ckh_t *ckh) +{ + ckhc_t *tab, *ttab; + size_t lg_curcells, usize; + unsigned lg_prevbuckets; + + /* + * It is possible (though unlikely, given well behaved hashes) that the + * table rebuild will fail. + */ + lg_prevbuckets = ckh->lg_curbuckets; + lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; + usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (usize == 0) + return; + tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, + NULL); + if (tab == NULL) { + /* + * An OOM error isn't worth propagating, since it doesn't + * prevent this or future operations from proceeding. + */ + return; + } + /* Swap in new table. */ + ttab = ckh->tab; + ckh->tab = tab; + tab = ttab; + ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; + + if (!ckh_rebuild(ckh, tab)) { + idalloctm(tsd, tab, tcache_get(tsd, false), true); +#ifdef CKH_COUNT + ckh->nshrinks++; +#endif + return; + } + + /* Rebuilding failed, so back out partially rebuilt table. */ + idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); + ckh->tab = tab; + ckh->lg_curbuckets = lg_prevbuckets; +#ifdef CKH_COUNT + ckh->nshrinkfails++; +#endif +} + +bool +ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, + ckh_keycomp_t *keycomp) +{ + bool ret; + size_t mincells, usize; + unsigned lg_mincells; + + assert(minitems > 0); + assert(hash != NULL); + assert(keycomp != NULL); + +#ifdef CKH_COUNT + ckh->ngrows = 0; + ckh->nshrinks = 0; + ckh->nshrinkfails = 0; + ckh->ninserts = 0; + ckh->nrelocs = 0; +#endif + ckh->prng_state = 42; /* Value doesn't really matter. */ + ckh->count = 0; + + /* + * Find the minimum power of 2 that is large enough to fit minitems + * entries. We are using (2+,2) cuckoo hashing, which has an expected + * maximum load factor of at least ~0.86, so 0.75 is a conservative load + * factor that will typically allow mincells items to fit without ever + * growing the table. + */ + assert(LG_CKH_BUCKET_CELLS > 0); + mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; + for (lg_mincells = LG_CKH_BUCKET_CELLS; + (ZU(1) << lg_mincells) < mincells; + lg_mincells++) + ; /* Do nothing. */ + ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; + ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; + ckh->hash = hash; + ckh->keycomp = keycomp; + + usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); + if (usize == 0) { + ret = true; + goto label_return; + } + ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, + NULL); + if (ckh->tab == NULL) { + ret = true; + goto label_return; + } + + ret = false; +label_return: + return (ret); +} + +void +ckh_delete(tsd_t *tsd, ckh_t *ckh) +{ + + assert(ckh != NULL); + +#ifdef CKH_VERBOSE + malloc_printf( + "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64"," + " nshrinkfails: %"FMTu64", ninserts: %"FMTu64"," + " nrelocs: %"FMTu64"\n", __func__, ckh, + (unsigned long long)ckh->ngrows, + (unsigned long long)ckh->nshrinks, + (unsigned long long)ckh->nshrinkfails, + (unsigned long long)ckh->ninserts, + (unsigned long long)ckh->nrelocs); +#endif + + idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); + if (config_debug) + memset(ckh, 0x5a, sizeof(ckh_t)); +} + +size_t +ckh_count(ckh_t *ckh) +{ + + assert(ckh != NULL); + + return (ckh->count); +} + +bool +ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) +{ + size_t i, ncells; + + for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { + if (ckh->tab[i].key != NULL) { + if (key != NULL) + *key = (void *)ckh->tab[i].key; + if (data != NULL) + *data = (void *)ckh->tab[i].data; + *tabind = i + 1; + return (false); + } + } + + return (true); +} + +bool +ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) +{ + bool ret; + + assert(ckh != NULL); + assert(ckh_search(ckh, key, NULL, NULL)); + +#ifdef CKH_COUNT + ckh->ninserts++; +#endif + + while (ckh_try_insert(ckh, &key, &data)) { + if (ckh_grow(tsd, ckh)) { + ret = true; + goto label_return; + } + } + + ret = false; +label_return: + return (ret); +} + +bool +ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, + void **data) +{ + size_t cell; + + assert(ckh != NULL); + + cell = ckh_isearch(ckh, searchkey); + if (cell != SIZE_T_MAX) { + if (key != NULL) + *key = (void *)ckh->tab[cell].key; + if (data != NULL) + *data = (void *)ckh->tab[cell].data; + ckh->tab[cell].key = NULL; + ckh->tab[cell].data = NULL; /* Not necessary. */ + + ckh->count--; + /* Try to halve the table if it is less than 1/4 full. */ + if (ckh->count < (ZU(1) << (ckh->lg_curbuckets + + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets + > ckh->lg_minbuckets) { + /* Ignore error due to OOM. */ + ckh_shrink(tsd, ckh); + } + + return (false); + } + + return (true); +} + +bool +ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) +{ + size_t cell; + + assert(ckh != NULL); + + cell = ckh_isearch(ckh, searchkey); + if (cell != SIZE_T_MAX) { + if (key != NULL) + *key = (void *)ckh->tab[cell].key; + if (data != NULL) + *data = (void *)ckh->tab[cell].data; + return (false); + } + + return (true); +} + +void +ckh_string_hash(const void *key, size_t r_hash[2]) +{ + + hash(key, strlen((const char *)key), 0x94122f33U, r_hash); +} + +bool +ckh_string_keycomp(const void *k1, const void *k2) +{ + + assert(k1 != NULL); + assert(k2 != NULL); + + return (strcmp((char *)k1, (char *)k2) ? false : true); +} + +void +ckh_pointer_hash(const void *key, size_t r_hash[2]) +{ + union { + const void *v; + size_t i; + } u; + + assert(sizeof(u.v) == sizeof(u.i)); + u.v = key; + hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash); +} + +bool +ckh_pointer_keycomp(const void *k1, const void *k2) +{ + + return ((k1 == k2) ? true : false); +} diff --git a/redis.submodule/jemalloc/src/ctl.c b/redis.submodule/jemalloc/src/ctl.c new file mode 100644 index 0000000..3de8e60 --- /dev/null +++ b/redis.submodule/jemalloc/src/ctl.c @@ -0,0 +1,2123 @@ +#define JEMALLOC_CTL_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +/* + * ctl_mtx protects the following: + * - ctl_stats.* + */ +static malloc_mutex_t ctl_mtx; +static bool ctl_initialized; +static uint64_t ctl_epoch; +static ctl_stats_t ctl_stats; + +/******************************************************************************/ +/* Helpers for named and indexed nodes. */ + +JEMALLOC_INLINE_C const ctl_named_node_t * +ctl_named_node(const ctl_node_t *node) +{ + + return ((node->named) ? (const ctl_named_node_t *)node : NULL); +} + +JEMALLOC_INLINE_C const ctl_named_node_t * +ctl_named_children(const ctl_named_node_t *node, int index) +{ + const ctl_named_node_t *children = ctl_named_node(node->children); + + return (children ? &children[index] : NULL); +} + +JEMALLOC_INLINE_C const ctl_indexed_node_t * +ctl_indexed_node(const ctl_node_t *node) +{ + + return (!node->named ? (const ctl_indexed_node_t *)node : NULL); +} + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +#define CTL_PROTO(n) \ +static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen); + +#define INDEX_PROTO(n) \ +static const ctl_named_node_t *n##_index(const size_t *mib, \ + size_t miblen, size_t i); + +static bool ctl_arena_init(ctl_arena_stats_t *astats); +static void ctl_arena_clear(ctl_arena_stats_t *astats); +static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, + arena_t *arena); +static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, + ctl_arena_stats_t *astats); +static void ctl_arena_refresh(arena_t *arena, unsigned i); +static bool ctl_grow(void); +static void ctl_refresh(void); +static bool ctl_init(void); +static int ctl_lookup(const char *name, ctl_node_t const **nodesp, + size_t *mibp, size_t *depthp); + +CTL_PROTO(version) +CTL_PROTO(epoch) +CTL_PROTO(thread_tcache_enabled) +CTL_PROTO(thread_tcache_flush) +CTL_PROTO(thread_prof_name) +CTL_PROTO(thread_prof_active) +CTL_PROTO(thread_arena) +CTL_PROTO(thread_allocated) +CTL_PROTO(thread_allocatedp) +CTL_PROTO(thread_deallocated) +CTL_PROTO(thread_deallocatedp) +CTL_PROTO(config_cache_oblivious) +CTL_PROTO(config_debug) +CTL_PROTO(config_fill) +CTL_PROTO(config_lazy_lock) +CTL_PROTO(config_munmap) +CTL_PROTO(config_prof) +CTL_PROTO(config_prof_libgcc) +CTL_PROTO(config_prof_libunwind) +CTL_PROTO(config_stats) +CTL_PROTO(config_tcache) +CTL_PROTO(config_tls) +CTL_PROTO(config_utrace) +CTL_PROTO(config_valgrind) +CTL_PROTO(config_xmalloc) +CTL_PROTO(opt_abort) +CTL_PROTO(opt_dss) +CTL_PROTO(opt_lg_chunk) +CTL_PROTO(opt_narenas) +CTL_PROTO(opt_lg_dirty_mult) +CTL_PROTO(opt_stats_print) +CTL_PROTO(opt_junk) +CTL_PROTO(opt_zero) +CTL_PROTO(opt_quarantine) +CTL_PROTO(opt_redzone) +CTL_PROTO(opt_utrace) +CTL_PROTO(opt_xmalloc) +CTL_PROTO(opt_tcache) +CTL_PROTO(opt_lg_tcache_max) +CTL_PROTO(opt_prof) +CTL_PROTO(opt_prof_prefix) +CTL_PROTO(opt_prof_active) +CTL_PROTO(opt_prof_thread_active_init) +CTL_PROTO(opt_lg_prof_sample) +CTL_PROTO(opt_lg_prof_interval) +CTL_PROTO(opt_prof_gdump) +CTL_PROTO(opt_prof_final) +CTL_PROTO(opt_prof_leak) +CTL_PROTO(opt_prof_accum) +CTL_PROTO(tcache_create) +CTL_PROTO(tcache_flush) +CTL_PROTO(tcache_destroy) +CTL_PROTO(arena_i_purge) +static void arena_purge(unsigned arena_ind); +CTL_PROTO(arena_i_dss) +CTL_PROTO(arena_i_lg_dirty_mult) +CTL_PROTO(arena_i_chunk_hooks) +INDEX_PROTO(arena_i) +CTL_PROTO(arenas_bin_i_size) +CTL_PROTO(arenas_bin_i_nregs) +CTL_PROTO(arenas_bin_i_run_size) +INDEX_PROTO(arenas_bin_i) +CTL_PROTO(arenas_lrun_i_size) +INDEX_PROTO(arenas_lrun_i) +CTL_PROTO(arenas_hchunk_i_size) +INDEX_PROTO(arenas_hchunk_i) +CTL_PROTO(arenas_narenas) +CTL_PROTO(arenas_initialized) +CTL_PROTO(arenas_lg_dirty_mult) +CTL_PROTO(arenas_quantum) +CTL_PROTO(arenas_page) +CTL_PROTO(arenas_tcache_max) +CTL_PROTO(arenas_nbins) +CTL_PROTO(arenas_nhbins) +CTL_PROTO(arenas_nlruns) +CTL_PROTO(arenas_nhchunks) +CTL_PROTO(arenas_extend) +CTL_PROTO(prof_thread_active_init) +CTL_PROTO(prof_active) +CTL_PROTO(prof_dump) +CTL_PROTO(prof_gdump) +CTL_PROTO(prof_reset) +CTL_PROTO(prof_interval) +CTL_PROTO(lg_prof_sample) +CTL_PROTO(stats_arenas_i_small_allocated) +CTL_PROTO(stats_arenas_i_small_nmalloc) +CTL_PROTO(stats_arenas_i_small_ndalloc) +CTL_PROTO(stats_arenas_i_small_nrequests) +CTL_PROTO(stats_arenas_i_large_allocated) +CTL_PROTO(stats_arenas_i_large_nmalloc) +CTL_PROTO(stats_arenas_i_large_ndalloc) +CTL_PROTO(stats_arenas_i_large_nrequests) +CTL_PROTO(stats_arenas_i_huge_allocated) +CTL_PROTO(stats_arenas_i_huge_nmalloc) +CTL_PROTO(stats_arenas_i_huge_ndalloc) +CTL_PROTO(stats_arenas_i_huge_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_nmalloc) +CTL_PROTO(stats_arenas_i_bins_j_ndalloc) +CTL_PROTO(stats_arenas_i_bins_j_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_curregs) +CTL_PROTO(stats_arenas_i_bins_j_nfills) +CTL_PROTO(stats_arenas_i_bins_j_nflushes) +CTL_PROTO(stats_arenas_i_bins_j_nruns) +CTL_PROTO(stats_arenas_i_bins_j_nreruns) +CTL_PROTO(stats_arenas_i_bins_j_curruns) +INDEX_PROTO(stats_arenas_i_bins_j) +CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) +CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) +CTL_PROTO(stats_arenas_i_lruns_j_nrequests) +CTL_PROTO(stats_arenas_i_lruns_j_curruns) +INDEX_PROTO(stats_arenas_i_lruns_j) +CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc) +CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc) +CTL_PROTO(stats_arenas_i_hchunks_j_nrequests) +CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks) +INDEX_PROTO(stats_arenas_i_hchunks_j) +CTL_PROTO(stats_arenas_i_nthreads) +CTL_PROTO(stats_arenas_i_dss) +CTL_PROTO(stats_arenas_i_lg_dirty_mult) +CTL_PROTO(stats_arenas_i_pactive) +CTL_PROTO(stats_arenas_i_pdirty) +CTL_PROTO(stats_arenas_i_mapped) +CTL_PROTO(stats_arenas_i_npurge) +CTL_PROTO(stats_arenas_i_nmadvise) +CTL_PROTO(stats_arenas_i_purged) +CTL_PROTO(stats_arenas_i_metadata_mapped) +CTL_PROTO(stats_arenas_i_metadata_allocated) +INDEX_PROTO(stats_arenas_i) +CTL_PROTO(stats_cactive) +CTL_PROTO(stats_allocated) +CTL_PROTO(stats_active) +CTL_PROTO(stats_metadata) +CTL_PROTO(stats_resident) +CTL_PROTO(stats_mapped) + +/******************************************************************************/ +/* mallctl tree. */ + +/* Maximum tree depth. */ +#define CTL_MAX_DEPTH 6 + +#define NAME(n) {true}, n +#define CHILD(t, c) \ + sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ + (ctl_node_t *)c##_node, \ + NULL +#define CTL(c) 0, NULL, c##_ctl + +/* + * Only handles internal indexed nodes, since there are currently no external + * ones. + */ +#define INDEX(i) {false}, i##_index + +static const ctl_named_node_t thread_tcache_node[] = { + {NAME("enabled"), CTL(thread_tcache_enabled)}, + {NAME("flush"), CTL(thread_tcache_flush)} +}; + +static const ctl_named_node_t thread_prof_node[] = { + {NAME("name"), CTL(thread_prof_name)}, + {NAME("active"), CTL(thread_prof_active)} +}; + +static const ctl_named_node_t thread_node[] = { + {NAME("arena"), CTL(thread_arena)}, + {NAME("allocated"), CTL(thread_allocated)}, + {NAME("allocatedp"), CTL(thread_allocatedp)}, + {NAME("deallocated"), CTL(thread_deallocated)}, + {NAME("deallocatedp"), CTL(thread_deallocatedp)}, + {NAME("tcache"), CHILD(named, thread_tcache)}, + {NAME("prof"), CHILD(named, thread_prof)} +}; + +static const ctl_named_node_t config_node[] = { + {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, + {NAME("debug"), CTL(config_debug)}, + {NAME("fill"), CTL(config_fill)}, + {NAME("lazy_lock"), CTL(config_lazy_lock)}, + {NAME("munmap"), CTL(config_munmap)}, + {NAME("prof"), CTL(config_prof)}, + {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, + {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, + {NAME("stats"), CTL(config_stats)}, + {NAME("tcache"), CTL(config_tcache)}, + {NAME("tls"), CTL(config_tls)}, + {NAME("utrace"), CTL(config_utrace)}, + {NAME("valgrind"), CTL(config_valgrind)}, + {NAME("xmalloc"), CTL(config_xmalloc)} +}; + +static const ctl_named_node_t opt_node[] = { + {NAME("abort"), CTL(opt_abort)}, + {NAME("dss"), CTL(opt_dss)}, + {NAME("lg_chunk"), CTL(opt_lg_chunk)}, + {NAME("narenas"), CTL(opt_narenas)}, + {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, + {NAME("stats_print"), CTL(opt_stats_print)}, + {NAME("junk"), CTL(opt_junk)}, + {NAME("zero"), CTL(opt_zero)}, + {NAME("quarantine"), CTL(opt_quarantine)}, + {NAME("redzone"), CTL(opt_redzone)}, + {NAME("utrace"), CTL(opt_utrace)}, + {NAME("xmalloc"), CTL(opt_xmalloc)}, + {NAME("tcache"), CTL(opt_tcache)}, + {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, + {NAME("prof"), CTL(opt_prof)}, + {NAME("prof_prefix"), CTL(opt_prof_prefix)}, + {NAME("prof_active"), CTL(opt_prof_active)}, + {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, + {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, + {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, + {NAME("prof_gdump"), CTL(opt_prof_gdump)}, + {NAME("prof_final"), CTL(opt_prof_final)}, + {NAME("prof_leak"), CTL(opt_prof_leak)}, + {NAME("prof_accum"), CTL(opt_prof_accum)} +}; + +static const ctl_named_node_t tcache_node[] = { + {NAME("create"), CTL(tcache_create)}, + {NAME("flush"), CTL(tcache_flush)}, + {NAME("destroy"), CTL(tcache_destroy)} +}; + +static const ctl_named_node_t arena_i_node[] = { + {NAME("purge"), CTL(arena_i_purge)}, + {NAME("dss"), CTL(arena_i_dss)}, + {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)}, + {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)} +}; +static const ctl_named_node_t super_arena_i_node[] = { + {NAME(""), CHILD(named, arena_i)} +}; + +static const ctl_indexed_node_t arena_node[] = { + {INDEX(arena_i)} +}; + +static const ctl_named_node_t arenas_bin_i_node[] = { + {NAME("size"), CTL(arenas_bin_i_size)}, + {NAME("nregs"), CTL(arenas_bin_i_nregs)}, + {NAME("run_size"), CTL(arenas_bin_i_run_size)} +}; +static const ctl_named_node_t super_arenas_bin_i_node[] = { + {NAME(""), CHILD(named, arenas_bin_i)} +}; + +static const ctl_indexed_node_t arenas_bin_node[] = { + {INDEX(arenas_bin_i)} +}; + +static const ctl_named_node_t arenas_lrun_i_node[] = { + {NAME("size"), CTL(arenas_lrun_i_size)} +}; +static const ctl_named_node_t super_arenas_lrun_i_node[] = { + {NAME(""), CHILD(named, arenas_lrun_i)} +}; + +static const ctl_indexed_node_t arenas_lrun_node[] = { + {INDEX(arenas_lrun_i)} +}; + +static const ctl_named_node_t arenas_hchunk_i_node[] = { + {NAME("size"), CTL(arenas_hchunk_i_size)} +}; +static const ctl_named_node_t super_arenas_hchunk_i_node[] = { + {NAME(""), CHILD(named, arenas_hchunk_i)} +}; + +static const ctl_indexed_node_t arenas_hchunk_node[] = { + {INDEX(arenas_hchunk_i)} +}; + +static const ctl_named_node_t arenas_node[] = { + {NAME("narenas"), CTL(arenas_narenas)}, + {NAME("initialized"), CTL(arenas_initialized)}, + {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)}, + {NAME("quantum"), CTL(arenas_quantum)}, + {NAME("page"), CTL(arenas_page)}, + {NAME("tcache_max"), CTL(arenas_tcache_max)}, + {NAME("nbins"), CTL(arenas_nbins)}, + {NAME("nhbins"), CTL(arenas_nhbins)}, + {NAME("bin"), CHILD(indexed, arenas_bin)}, + {NAME("nlruns"), CTL(arenas_nlruns)}, + {NAME("lrun"), CHILD(indexed, arenas_lrun)}, + {NAME("nhchunks"), CTL(arenas_nhchunks)}, + {NAME("hchunk"), CHILD(indexed, arenas_hchunk)}, + {NAME("extend"), CTL(arenas_extend)} +}; + +static const ctl_named_node_t prof_node[] = { + {NAME("thread_active_init"), CTL(prof_thread_active_init)}, + {NAME("active"), CTL(prof_active)}, + {NAME("dump"), CTL(prof_dump)}, + {NAME("gdump"), CTL(prof_gdump)}, + {NAME("reset"), CTL(prof_reset)}, + {NAME("interval"), CTL(prof_interval)}, + {NAME("lg_sample"), CTL(lg_prof_sample)} +}; + +static const ctl_named_node_t stats_arenas_i_metadata_node[] = { + {NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)}, + {NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)} +}; + +static const ctl_named_node_t stats_arenas_i_small_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} +}; + +static const ctl_named_node_t stats_arenas_i_large_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} +}; + +static const ctl_named_node_t stats_arenas_i_huge_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)} +}; + +static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, + {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, + {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, + {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, + {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, + {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} +}; +static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_bins_j)} +}; + +static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { + {INDEX(stats_arenas_i_bins_j)} +}; + +static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, + {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} +}; +static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_lruns_j)} +}; + +static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { + {INDEX(stats_arenas_i_lruns_j)} +}; + +static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)}, + {NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)} +}; +static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_hchunks_j)} +}; + +static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = { + {INDEX(stats_arenas_i_hchunks_j)} +}; + +static const ctl_named_node_t stats_arenas_i_node[] = { + {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, + {NAME("dss"), CTL(stats_arenas_i_dss)}, + {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)}, + {NAME("pactive"), CTL(stats_arenas_i_pactive)}, + {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, + {NAME("mapped"), CTL(stats_arenas_i_mapped)}, + {NAME("npurge"), CTL(stats_arenas_i_npurge)}, + {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, + {NAME("purged"), CTL(stats_arenas_i_purged)}, + {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)}, + {NAME("small"), CHILD(named, stats_arenas_i_small)}, + {NAME("large"), CHILD(named, stats_arenas_i_large)}, + {NAME("huge"), CHILD(named, stats_arenas_i_huge)}, + {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, + {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}, + {NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)} +}; +static const ctl_named_node_t super_stats_arenas_i_node[] = { + {NAME(""), CHILD(named, stats_arenas_i)} +}; + +static const ctl_indexed_node_t stats_arenas_node[] = { + {INDEX(stats_arenas_i)} +}; + +static const ctl_named_node_t stats_node[] = { + {NAME("cactive"), CTL(stats_cactive)}, + {NAME("allocated"), CTL(stats_allocated)}, + {NAME("active"), CTL(stats_active)}, + {NAME("metadata"), CTL(stats_metadata)}, + {NAME("resident"), CTL(stats_resident)}, + {NAME("mapped"), CTL(stats_mapped)}, + {NAME("arenas"), CHILD(indexed, stats_arenas)} +}; + +static const ctl_named_node_t root_node[] = { + {NAME("version"), CTL(version)}, + {NAME("epoch"), CTL(epoch)}, + {NAME("thread"), CHILD(named, thread)}, + {NAME("config"), CHILD(named, config)}, + {NAME("opt"), CHILD(named, opt)}, + {NAME("tcache"), CHILD(named, tcache)}, + {NAME("arena"), CHILD(indexed, arena)}, + {NAME("arenas"), CHILD(named, arenas)}, + {NAME("prof"), CHILD(named, prof)}, + {NAME("stats"), CHILD(named, stats)} +}; +static const ctl_named_node_t super_root_node[] = { + {NAME(""), CHILD(named, root)} +}; + +#undef NAME +#undef CHILD +#undef CTL +#undef INDEX + +/******************************************************************************/ + +static bool +ctl_arena_init(ctl_arena_stats_t *astats) +{ + + if (astats->lstats == NULL) { + astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses * + sizeof(malloc_large_stats_t)); + if (astats->lstats == NULL) + return (true); + } + + if (astats->hstats == NULL) { + astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses * + sizeof(malloc_huge_stats_t)); + if (astats->hstats == NULL) + return (true); + } + + return (false); +} + +static void +ctl_arena_clear(ctl_arena_stats_t *astats) +{ + + astats->dss = dss_prec_names[dss_prec_limit]; + astats->lg_dirty_mult = -1; + astats->pactive = 0; + astats->pdirty = 0; + if (config_stats) { + memset(&astats->astats, 0, sizeof(arena_stats_t)); + astats->allocated_small = 0; + astats->nmalloc_small = 0; + astats->ndalloc_small = 0; + astats->nrequests_small = 0; + memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); + memset(astats->lstats, 0, nlclasses * + sizeof(malloc_large_stats_t)); + memset(astats->hstats, 0, nhclasses * + sizeof(malloc_huge_stats_t)); + } +} + +static void +ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) +{ + unsigned i; + + arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult, + &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats, + cstats->lstats, cstats->hstats); + + for (i = 0; i < NBINS; i++) { + cstats->allocated_small += cstats->bstats[i].curregs * + index2size(i); + cstats->nmalloc_small += cstats->bstats[i].nmalloc; + cstats->ndalloc_small += cstats->bstats[i].ndalloc; + cstats->nrequests_small += cstats->bstats[i].nrequests; + } +} + +static void +ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) +{ + unsigned i; + + sstats->pactive += astats->pactive; + sstats->pdirty += astats->pdirty; + + sstats->astats.mapped += astats->astats.mapped; + sstats->astats.npurge += astats->astats.npurge; + sstats->astats.nmadvise += astats->astats.nmadvise; + sstats->astats.purged += astats->astats.purged; + + sstats->astats.metadata_mapped += astats->astats.metadata_mapped; + sstats->astats.metadata_allocated += astats->astats.metadata_allocated; + + sstats->allocated_small += astats->allocated_small; + sstats->nmalloc_small += astats->nmalloc_small; + sstats->ndalloc_small += astats->ndalloc_small; + sstats->nrequests_small += astats->nrequests_small; + + sstats->astats.allocated_large += astats->astats.allocated_large; + sstats->astats.nmalloc_large += astats->astats.nmalloc_large; + sstats->astats.ndalloc_large += astats->astats.ndalloc_large; + sstats->astats.nrequests_large += astats->astats.nrequests_large; + + sstats->astats.allocated_huge += astats->astats.allocated_huge; + sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge; + sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge; + + for (i = 0; i < NBINS; i++) { + sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; + sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; + sstats->bstats[i].nrequests += astats->bstats[i].nrequests; + sstats->bstats[i].curregs += astats->bstats[i].curregs; + if (config_tcache) { + sstats->bstats[i].nfills += astats->bstats[i].nfills; + sstats->bstats[i].nflushes += + astats->bstats[i].nflushes; + } + sstats->bstats[i].nruns += astats->bstats[i].nruns; + sstats->bstats[i].reruns += astats->bstats[i].reruns; + sstats->bstats[i].curruns += astats->bstats[i].curruns; + } + + for (i = 0; i < nlclasses; i++) { + sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; + sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; + sstats->lstats[i].nrequests += astats->lstats[i].nrequests; + sstats->lstats[i].curruns += astats->lstats[i].curruns; + } + + for (i = 0; i < nhclasses; i++) { + sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc; + sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; + sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks; + } +} + +static void +ctl_arena_refresh(arena_t *arena, unsigned i) +{ + ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; + ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; + + ctl_arena_clear(astats); + + sstats->nthreads += astats->nthreads; + if (config_stats) { + ctl_arena_stats_amerge(astats, arena); + /* Merge into sum stats as well. */ + ctl_arena_stats_smerge(sstats, astats); + } else { + astats->pactive += arena->nactive; + astats->pdirty += arena->ndirty; + /* Merge into sum stats as well. */ + sstats->pactive += arena->nactive; + sstats->pdirty += arena->ndirty; + } +} + +static bool +ctl_grow(void) +{ + ctl_arena_stats_t *astats; + + /* Initialize new arena. */ + if (arena_init(ctl_stats.narenas) == NULL) + return (true); + + /* Allocate extended arena stats. */ + astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) * + sizeof(ctl_arena_stats_t)); + if (astats == NULL) + return (true); + + /* Initialize the new astats element. */ + memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * + sizeof(ctl_arena_stats_t)); + memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); + if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { + a0dalloc(astats); + return (true); + } + /* Swap merged stats to their new location. */ + { + ctl_arena_stats_t tstats; + memcpy(&tstats, &astats[ctl_stats.narenas], + sizeof(ctl_arena_stats_t)); + memcpy(&astats[ctl_stats.narenas], + &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t)); + memcpy(&astats[ctl_stats.narenas + 1], &tstats, + sizeof(ctl_arena_stats_t)); + } + a0dalloc(ctl_stats.arenas); + ctl_stats.arenas = astats; + ctl_stats.narenas++; + + return (false); +} + +static void +ctl_refresh(void) +{ + tsd_t *tsd; + unsigned i; + bool refreshed; + VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); + + /* + * Clear sum stats, since they will be merged into by + * ctl_arena_refresh(). + */ + ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; + ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); + + tsd = tsd_fetch(); + for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { + tarenas[i] = arena_get(tsd, i, false, false); + if (tarenas[i] == NULL && !refreshed) { + tarenas[i] = arena_get(tsd, i, false, true); + refreshed = true; + } + } + + for (i = 0; i < ctl_stats.narenas; i++) { + if (tarenas[i] != NULL) + ctl_stats.arenas[i].nthreads = arena_nbound(i); + else + ctl_stats.arenas[i].nthreads = 0; + } + + for (i = 0; i < ctl_stats.narenas; i++) { + bool initialized = (tarenas[i] != NULL); + + ctl_stats.arenas[i].initialized = initialized; + if (initialized) + ctl_arena_refresh(tarenas[i], i); + } + + if (config_stats) { + size_t base_allocated, base_resident, base_mapped; + base_stats_get(&base_allocated, &base_resident, &base_mapped); + ctl_stats.allocated = + ctl_stats.arenas[ctl_stats.narenas].allocated_small + + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge; + ctl_stats.active = + (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE); + ctl_stats.metadata = base_allocated + + ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + + ctl_stats.arenas[ctl_stats.narenas].astats + .metadata_allocated; + ctl_stats.resident = base_resident + + ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + + ((ctl_stats.arenas[ctl_stats.narenas].pactive + + ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE); + ctl_stats.mapped = base_mapped + + ctl_stats.arenas[ctl_stats.narenas].astats.mapped; + } + + ctl_epoch++; +} + +static bool +ctl_init(void) +{ + bool ret; + + malloc_mutex_lock(&ctl_mtx); + if (!ctl_initialized) { + /* + * Allocate space for one extra arena stats element, which + * contains summed stats across all arenas. + */ + ctl_stats.narenas = narenas_total_get(); + ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc( + (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); + if (ctl_stats.arenas == NULL) { + ret = true; + goto label_return; + } + memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) * + sizeof(ctl_arena_stats_t)); + + /* + * Initialize all stats structures, regardless of whether they + * ever get used. Lazy initialization would allow errors to + * cause inconsistent state to be viewable by the application. + */ + if (config_stats) { + unsigned i; + for (i = 0; i <= ctl_stats.narenas; i++) { + if (ctl_arena_init(&ctl_stats.arenas[i])) { + unsigned j; + for (j = 0; j < i; j++) { + a0dalloc( + ctl_stats.arenas[j].lstats); + a0dalloc( + ctl_stats.arenas[j].hstats); + } + a0dalloc(ctl_stats.arenas); + ctl_stats.arenas = NULL; + ret = true; + goto label_return; + } + } + } + ctl_stats.arenas[ctl_stats.narenas].initialized = true; + + ctl_epoch = 0; + ctl_refresh(); + ctl_initialized = true; + } + + ret = false; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +static int +ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, + size_t *depthp) +{ + int ret; + const char *elm, *tdot, *dot; + size_t elen, i, j; + const ctl_named_node_t *node; + + elm = name; + /* Equivalent to strchrnul(). */ + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + if (elen == 0) { + ret = ENOENT; + goto label_return; + } + node = super_root_node; + for (i = 0; i < *depthp; i++) { + assert(node); + assert(node->nchildren > 0); + if (ctl_named_node(node->children) != NULL) { + const ctl_named_node_t *pnode = node; + + /* Children are named. */ + for (j = 0; j < node->nchildren; j++) { + const ctl_named_node_t *child = + ctl_named_children(node, j); + if (strlen(child->name) == elen && + strncmp(elm, child->name, elen) == 0) { + node = child; + if (nodesp != NULL) + nodesp[i] = + (const ctl_node_t *)node; + mibp[i] = j; + break; + } + } + if (node == pnode) { + ret = ENOENT; + goto label_return; + } + } else { + uintmax_t index; + const ctl_indexed_node_t *inode; + + /* Children are indexed. */ + index = malloc_strtoumax(elm, NULL, 10); + if (index == UINTMAX_MAX || index > SIZE_T_MAX) { + ret = ENOENT; + goto label_return; + } + + inode = ctl_indexed_node(node->children); + node = inode->index(mibp, *depthp, (size_t)index); + if (node == NULL) { + ret = ENOENT; + goto label_return; + } + + if (nodesp != NULL) + nodesp[i] = (const ctl_node_t *)node; + mibp[i] = (size_t)index; + } + + if (node->ctl != NULL) { + /* Terminal node. */ + if (*dot != '\0') { + /* + * The name contains more elements than are + * in this path through the tree. + */ + ret = ENOENT; + goto label_return; + } + /* Complete lookup successful. */ + *depthp = i + 1; + break; + } + + /* Update elm. */ + if (*dot == '\0') { + /* No more elements. */ + ret = ENOENT; + goto label_return; + } + elm = &dot[1]; + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : + strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + } + + ret = 0; +label_return: + return (ret); +} + +int +ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) +{ + int ret; + size_t depth; + ctl_node_t const *nodes[CTL_MAX_DEPTH]; + size_t mib[CTL_MAX_DEPTH]; + const ctl_named_node_t *node; + + if (!ctl_initialized && ctl_init()) { + ret = EAGAIN; + goto label_return; + } + + depth = CTL_MAX_DEPTH; + ret = ctl_lookup(name, nodes, mib, &depth); + if (ret != 0) + goto label_return; + + node = ctl_named_node(nodes[depth-1]); + if (node != NULL && node->ctl) + ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); + else { + /* The name refers to a partial path through the ctl tree. */ + ret = ENOENT; + } + +label_return: + return(ret); +} + +int +ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) +{ + int ret; + + if (!ctl_initialized && ctl_init()) { + ret = EAGAIN; + goto label_return; + } + + ret = ctl_lookup(name, NULL, mibp, miblenp); +label_return: + return(ret); +} + +int +ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + const ctl_named_node_t *node; + size_t i; + + if (!ctl_initialized && ctl_init()) { + ret = EAGAIN; + goto label_return; + } + + /* Iterate down the tree. */ + node = super_root_node; + for (i = 0; i < miblen; i++) { + assert(node); + assert(node->nchildren > 0); + if (ctl_named_node(node->children) != NULL) { + /* Children are named. */ + if (node->nchildren <= mib[i]) { + ret = ENOENT; + goto label_return; + } + node = ctl_named_children(node, mib[i]); + } else { + const ctl_indexed_node_t *inode; + + /* Indexed element. */ + inode = ctl_indexed_node(node->children); + node = inode->index(mib, miblen, mib[i]); + if (node == NULL) { + ret = ENOENT; + goto label_return; + } + } + } + + /* Call the ctl function. */ + if (node && node->ctl) + ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); + else { + /* Partial MIB. */ + ret = ENOENT; + } + +label_return: + return(ret); +} + +bool +ctl_boot(void) +{ + + if (malloc_mutex_init(&ctl_mtx)) + return (true); + + ctl_initialized = false; + + return (false); +} + +void +ctl_prefork(void) +{ + + malloc_mutex_prefork(&ctl_mtx); +} + +void +ctl_postfork_parent(void) +{ + + malloc_mutex_postfork_parent(&ctl_mtx); +} + +void +ctl_postfork_child(void) +{ + + malloc_mutex_postfork_child(&ctl_mtx); +} + +/******************************************************************************/ +/* *_ctl() functions. */ + +#define READONLY() do { \ + if (newp != NULL || newlen != 0) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define WRITEONLY() do { \ + if (oldp != NULL || oldlenp != NULL) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define READ_XOR_WRITE() do { \ + if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ + newlen != 0)) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define READ(v, t) do { \ + if (oldp != NULL && oldlenp != NULL) { \ + if (*oldlenp != sizeof(t)) { \ + size_t copylen = (sizeof(t) <= *oldlenp) \ + ? sizeof(t) : *oldlenp; \ + memcpy(oldp, (void *)&(v), copylen); \ + ret = EINVAL; \ + goto label_return; \ + } \ + *(t *)oldp = (v); \ + } \ +} while (0) + +#define WRITE(v, t) do { \ + if (newp != NULL) { \ + if (newlen != sizeof(t)) { \ + ret = EINVAL; \ + goto label_return; \ + } \ + (v) = *(t *)newp; \ + } \ +} while (0) + +/* + * There's a lot of code duplication in the following macros due to limitations + * in how nested cpp macros are expanded. + */ +#define CTL_RO_CLGEN(c, l, n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + if (!(c)) \ + return (ENOENT); \ + if (l) \ + malloc_mutex_lock(&ctl_mtx); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + if (l) \ + malloc_mutex_unlock(&ctl_mtx); \ + return (ret); \ +} + +#define CTL_RO_CGEN(c, n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + if (!(c)) \ + return (ENOENT); \ + malloc_mutex_lock(&ctl_mtx); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + malloc_mutex_unlock(&ctl_mtx); \ + return (ret); \ +} + +#define CTL_RO_GEN(n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + malloc_mutex_lock(&ctl_mtx); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + malloc_mutex_unlock(&ctl_mtx); \ + return (ret); \ +} + +/* + * ctl_mtx is not acquired, under the assumption that no pertinent data will + * mutate during the call. + */ +#define CTL_RO_NL_CGEN(c, n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + if (!(c)) \ + return (ENOENT); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return (ret); \ +} + +#define CTL_RO_NL_GEN(n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return (ret); \ +} + +#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + tsd_t *tsd; \ + \ + if (!(c)) \ + return (ENOENT); \ + READONLY(); \ + tsd = tsd_fetch(); \ + oldval = (m(tsd)); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return (ret); \ +} + +#define CTL_RO_BOOL_CONFIG_GEN(n) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + bool oldval; \ + \ + READONLY(); \ + oldval = n; \ + READ(oldval, bool); \ + \ + ret = 0; \ +label_return: \ + return (ret); \ +} + +/******************************************************************************/ + +CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) + +static int +epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + UNUSED uint64_t newval; + + malloc_mutex_lock(&ctl_mtx); + WRITE(newval, uint64_t); + if (newp != NULL) + ctl_refresh(); + READ(ctl_epoch, uint64_t); + + ret = 0; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +/******************************************************************************/ + +CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious) +CTL_RO_BOOL_CONFIG_GEN(config_debug) +CTL_RO_BOOL_CONFIG_GEN(config_fill) +CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) +CTL_RO_BOOL_CONFIG_GEN(config_munmap) +CTL_RO_BOOL_CONFIG_GEN(config_prof) +CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) +CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) +CTL_RO_BOOL_CONFIG_GEN(config_stats) +CTL_RO_BOOL_CONFIG_GEN(config_tcache) +CTL_RO_BOOL_CONFIG_GEN(config_tls) +CTL_RO_BOOL_CONFIG_GEN(config_utrace) +CTL_RO_BOOL_CONFIG_GEN(config_valgrind) +CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) + +/******************************************************************************/ + +CTL_RO_NL_GEN(opt_abort, opt_abort, bool) +CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) +CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) +CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) +CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) +CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) +CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) +CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) +CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) +CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) +CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) +CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) +CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) +CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) +CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, + opt_prof_thread_active_init, bool) +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) + +/******************************************************************************/ + +static int +thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + tsd_t *tsd; + arena_t *oldarena; + unsigned newind, oldind; + + tsd = tsd_fetch(); + oldarena = arena_choose(tsd, NULL); + if (oldarena == NULL) + return (EAGAIN); + + malloc_mutex_lock(&ctl_mtx); + newind = oldind = oldarena->ind; + WRITE(newind, unsigned); + READ(oldind, unsigned); + if (newind != oldind) { + arena_t *newarena; + + if (newind >= ctl_stats.narenas) { + /* New arena index is out of range. */ + ret = EFAULT; + goto label_return; + } + + /* Initialize arena if necessary. */ + newarena = arena_get(tsd, newind, true, true); + if (newarena == NULL) { + ret = EAGAIN; + goto label_return; + } + /* Set new arena/tcache associations. */ + arena_migrate(tsd, oldind, newind); + if (config_tcache) { + tcache_t *tcache = tsd_tcache_get(tsd); + if (tcache != NULL) { + tcache_arena_reassociate(tcache, oldarena, + newarena); + } + } + } + + ret = 0; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, + uint64_t) +CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, + uint64_t *) +CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, + uint64_t) +CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, + tsd_thread_deallocatedp_get, uint64_t *) + +static int +thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (!config_tcache) + return (ENOENT); + + oldval = tcache_enabled_get(); + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + tcache_enabled_set(*(bool *)newp); + } + READ(oldval, bool); + + ret = 0; +label_return: + return (ret); +} + +static int +thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + if (!config_tcache) + return (ENOENT); + + READONLY(); + WRITEONLY(); + + tcache_flush(); + + ret = 0; +label_return: + return (ret); +} + +static int +thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + if (!config_prof) + return (ENOENT); + + READ_XOR_WRITE(); + + if (newp != NULL) { + tsd_t *tsd; + + if (newlen != sizeof(const char *)) { + ret = EINVAL; + goto label_return; + } + + tsd = tsd_fetch(); + + if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != + 0) + goto label_return; + } else { + const char *oldname = prof_thread_name_get(); + READ(oldname, const char *); + } + + ret = 0; +label_return: + return (ret); +} + +static int +thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (!config_prof) + return (ENOENT); + + oldval = prof_thread_active_get(); + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + if (prof_thread_active_set(*(bool *)newp)) { + ret = EAGAIN; + goto label_return; + } + } + READ(oldval, bool); + + ret = 0; +label_return: + return (ret); +} + +/******************************************************************************/ + +static int +tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + tsd_t *tsd; + unsigned tcache_ind; + + if (!config_tcache) + return (ENOENT); + + tsd = tsd_fetch(); + + malloc_mutex_lock(&ctl_mtx); + READONLY(); + if (tcaches_create(tsd, &tcache_ind)) { + ret = EFAULT; + goto label_return; + } + READ(tcache_ind, unsigned); + + ret = 0; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +static int +tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + tsd_t *tsd; + unsigned tcache_ind; + + if (!config_tcache) + return (ENOENT); + + tsd = tsd_fetch(); + + WRITEONLY(); + tcache_ind = UINT_MAX; + WRITE(tcache_ind, unsigned); + if (tcache_ind == UINT_MAX) { + ret = EFAULT; + goto label_return; + } + tcaches_flush(tsd, tcache_ind); + + ret = 0; +label_return: + return (ret); +} + +static int +tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + tsd_t *tsd; + unsigned tcache_ind; + + if (!config_tcache) + return (ENOENT); + + tsd = tsd_fetch(); + + WRITEONLY(); + tcache_ind = UINT_MAX; + WRITE(tcache_ind, unsigned); + if (tcache_ind == UINT_MAX) { + ret = EFAULT; + goto label_return; + } + tcaches_destroy(tsd, tcache_ind); + + ret = 0; +label_return: + return (ret); +} + +/******************************************************************************/ + +/* ctl_mutex must be held during execution of this function. */ +static void +arena_purge(unsigned arena_ind) +{ + tsd_t *tsd; + unsigned i; + bool refreshed; + VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); + + tsd = tsd_fetch(); + for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { + tarenas[i] = arena_get(tsd, i, false, false); + if (tarenas[i] == NULL && !refreshed) { + tarenas[i] = arena_get(tsd, i, false, true); + refreshed = true; + } + } + + if (arena_ind == ctl_stats.narenas) { + unsigned i; + for (i = 0; i < ctl_stats.narenas; i++) { + if (tarenas[i] != NULL) + arena_purge_all(tarenas[i]); + } + } else { + assert(arena_ind < ctl_stats.narenas); + if (tarenas[arena_ind] != NULL) + arena_purge_all(tarenas[arena_ind]); + } +} + +static int +arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + + READONLY(); + WRITEONLY(); + malloc_mutex_lock(&ctl_mtx); + arena_purge(mib[1]); + malloc_mutex_unlock(&ctl_mtx); + + ret = 0; +label_return: + return (ret); +} + +static int +arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + const char *dss = NULL; + unsigned arena_ind = mib[1]; + dss_prec_t dss_prec_old = dss_prec_limit; + dss_prec_t dss_prec = dss_prec_limit; + + malloc_mutex_lock(&ctl_mtx); + WRITE(dss, const char *); + if (dss != NULL) { + int i; + bool match = false; + + for (i = 0; i < dss_prec_limit; i++) { + if (strcmp(dss_prec_names[i], dss) == 0) { + dss_prec = i; + match = true; + break; + } + } + + if (!match) { + ret = EINVAL; + goto label_return; + } + } + + if (arena_ind < ctl_stats.narenas) { + arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true); + if (arena == NULL || (dss_prec != dss_prec_limit && + arena_dss_prec_set(arena, dss_prec))) { + ret = EFAULT; + goto label_return; + } + dss_prec_old = arena_dss_prec_get(arena); + } else { + if (dss_prec != dss_prec_limit && + chunk_dss_prec_set(dss_prec)) { + ret = EFAULT; + goto label_return; + } + dss_prec_old = chunk_dss_prec_get(); + } + + dss = dss_prec_names[dss_prec_old]; + READ(dss, const char *); + + ret = 0; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +static int +arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned arena_ind = mib[1]; + arena_t *arena; + + arena = arena_get(tsd_fetch(), arena_ind, false, true); + if (arena == NULL) { + ret = EFAULT; + goto label_return; + } + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = arena_lg_dirty_mult_get(arena); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + + ret = 0; +label_return: + return (ret); +} + +static int +arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned arena_ind = mib[1]; + arena_t *arena; + + malloc_mutex_lock(&ctl_mtx); + if (arena_ind < narenas_total_get() && (arena = + arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) { + if (newp != NULL) { + chunk_hooks_t old_chunk_hooks, new_chunk_hooks; + WRITE(new_chunk_hooks, chunk_hooks_t); + old_chunk_hooks = chunk_hooks_set(arena, + &new_chunk_hooks); + READ(old_chunk_hooks, chunk_hooks_t); + } else { + chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena); + READ(old_chunk_hooks, chunk_hooks_t); + } + } else { + ret = EFAULT; + goto label_return; + } + ret = 0; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +static const ctl_named_node_t * +arena_i_index(const size_t *mib, size_t miblen, size_t i) +{ + const ctl_named_node_t * ret; + + malloc_mutex_lock(&ctl_mtx); + if (i > ctl_stats.narenas) { + ret = NULL; + goto label_return; + } + + ret = super_arena_i_node; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +/******************************************************************************/ + +static int +arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned narenas; + + malloc_mutex_lock(&ctl_mtx); + READONLY(); + if (*oldlenp != sizeof(unsigned)) { + ret = EINVAL; + goto label_return; + } + narenas = ctl_stats.narenas; + READ(narenas, unsigned); + + ret = 0; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +static int +arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned nread, i; + + malloc_mutex_lock(&ctl_mtx); + READONLY(); + if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { + ret = EINVAL; + nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) + ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas; + } else { + ret = 0; + nread = ctl_stats.narenas; + } + + for (i = 0; i < nread; i++) + ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; + +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +static int +arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = arena_lg_dirty_mult_default_get(); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + + ret = 0; +label_return: + return (ret); +} + +CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) +CTL_RO_NL_GEN(arenas_page, PAGE, size_t) +CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) +CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) +CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) +CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) +CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) +CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) +static const ctl_named_node_t * +arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) +{ + + if (i > NBINS) + return (NULL); + return (super_arenas_bin_i_node); +} + +CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned) +CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t) +static const ctl_named_node_t * +arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) +{ + + if (i > nlclasses) + return (NULL); + return (super_arenas_lrun_i_node); +} + +CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned) +CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t) +static const ctl_named_node_t * +arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i) +{ + + if (i > nhclasses) + return (NULL); + return (super_arenas_hchunk_i_node); +} + +static int +arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + unsigned narenas; + + malloc_mutex_lock(&ctl_mtx); + READONLY(); + if (ctl_grow()) { + ret = EAGAIN; + goto label_return; + } + narenas = ctl_stats.narenas - 1; + READ(narenas, unsigned); + + ret = 0; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +/******************************************************************************/ + +static int +prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (!config_prof) + return (ENOENT); + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_thread_active_init_set(*(bool *)newp); + } else + oldval = prof_thread_active_init_get(); + READ(oldval, bool); + + ret = 0; +label_return: + return (ret); +} + +static int +prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (!config_prof) + return (ENOENT); + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_active_set(*(bool *)newp); + } else + oldval = prof_active_get(); + READ(oldval, bool); + + ret = 0; +label_return: + return (ret); +} + +static int +prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + const char *filename = NULL; + + if (!config_prof) + return (ENOENT); + + WRITEONLY(); + WRITE(filename, const char *); + + if (prof_mdump(filename)) { + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: + return (ret); +} + +static int +prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (!config_prof) + return (ENOENT); + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_gdump_set(*(bool *)newp); + } else + oldval = prof_gdump_get(); + READ(oldval, bool); + + ret = 0; +label_return: + return (ret); +} + +static int +prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + size_t lg_sample = lg_prof_sample; + tsd_t *tsd; + + if (!config_prof) + return (ENOENT); + + WRITEONLY(); + WRITE(lg_sample, size_t); + if (lg_sample >= (sizeof(uint64_t) << 3)) + lg_sample = (sizeof(uint64_t) << 3) - 1; + + tsd = tsd_fetch(); + + prof_reset(tsd, lg_sample); + + ret = 0; +label_return: + return (ret); +} + +CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) +CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) + +/******************************************************************************/ + +CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) +CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) +CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) +CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) +CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t) +CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) + +CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) +CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult, + ssize_t) +CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) +CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) +CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, + ctl_stats.arenas[mib[2]].astats.mapped, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, + ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, + ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_purged, + ctl_stats.arenas[mib[2]].astats.purged, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped, + ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated, + ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, + ctl_stats.arenas[mib[2]].allocated_small, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, + ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, + ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, + ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, + ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, + ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, + ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, + ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated, + ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc, + ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc, + ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests, + ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */ + +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, + ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, + ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t) +CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) +CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) + +static const ctl_named_node_t * +stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) +{ + + if (j > NBINS) + return (NULL); + return (super_stats_arenas_i_bins_j_node); +} + +CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, + ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, + ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, + ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, + ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) + +static const ctl_named_node_t * +stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) +{ + + if (j > nlclasses) + return (NULL); + return (super_stats_arenas_i_lruns_j_node); +} + +CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc, + ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc, + ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests, + ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */ + uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, + ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) + +static const ctl_named_node_t * +stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j) +{ + + if (j > nhclasses) + return (NULL); + return (super_stats_arenas_i_hchunks_j_node); +} + +static const ctl_named_node_t * +stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) +{ + const ctl_named_node_t * ret; + + malloc_mutex_lock(&ctl_mtx); + if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) { + ret = NULL; + goto label_return; + } + + ret = super_stats_arenas_i_node; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} diff --git a/redis.submodule/jemalloc/src/extent.c b/redis.submodule/jemalloc/src/extent.c new file mode 100644 index 0000000..13f9441 --- /dev/null +++ b/redis.submodule/jemalloc/src/extent.c @@ -0,0 +1,53 @@ +#define JEMALLOC_EXTENT_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ + +JEMALLOC_INLINE_C size_t +extent_quantize(size_t size) +{ + + /* + * Round down to the nearest chunk size that can actually be requested + * during normal huge allocation. + */ + return (index2size(size2index(size + 1) - 1)); +} + +JEMALLOC_INLINE_C int +extent_szad_comp(extent_node_t *a, extent_node_t *b) +{ + int ret; + size_t a_qsize = extent_quantize(extent_node_size_get(a)); + size_t b_qsize = extent_quantize(extent_node_size_get(b)); + + /* + * Compare based on quantized size rather than size, in order to sort + * equally useful extents only by address. + */ + ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); + if (ret == 0) { + uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); + uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); + + ret = (a_addr > b_addr) - (a_addr < b_addr); + } + + return (ret); +} + +/* Generate red-black tree functions. */ +rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link, + extent_szad_comp) + +JEMALLOC_INLINE_C int +extent_ad_comp(extent_node_t *a, extent_node_t *b) +{ + uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); + uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); + + return ((a_addr > b_addr) - (a_addr < b_addr)); +} + +/* Generate red-black tree functions. */ +rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp) diff --git a/redis.submodule/jemalloc/src/hash.c b/redis.submodule/jemalloc/src/hash.c new file mode 100644 index 0000000..cfa4da0 --- /dev/null +++ b/redis.submodule/jemalloc/src/hash.c @@ -0,0 +1,2 @@ +#define JEMALLOC_HASH_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/redis.submodule/jemalloc/src/huge.c b/redis.submodule/jemalloc/src/huge.c new file mode 100644 index 0000000..1e9a665 --- /dev/null +++ b/redis.submodule/jemalloc/src/huge.c @@ -0,0 +1,435 @@ +#define JEMALLOC_HUGE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ + +static extent_node_t * +huge_node_get(const void *ptr) +{ + extent_node_t *node; + + node = chunk_lookup(ptr, true); + assert(!extent_node_achunk_get(node)); + + return (node); +} + +static bool +huge_node_set(const void *ptr, extent_node_t *node) +{ + + assert(extent_node_addr_get(node) == ptr); + assert(!extent_node_achunk_get(node)); + return (chunk_register(ptr, node)); +} + +static void +huge_node_unset(const void *ptr, const extent_node_t *node) +{ + + chunk_deregister(ptr, node); +} + +void * +huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, + tcache_t *tcache) +{ + size_t usize; + + usize = s2u(size); + if (usize == 0) { + /* size_t overflow. */ + return (NULL); + } + + return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache)); +} + +void * +huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, + bool zero, tcache_t *tcache) +{ + void *ret; + size_t usize; + extent_node_t *node; + bool is_zeroed; + + /* Allocate one or more contiguous chunks for this request. */ + + usize = sa2u(size, alignment); + if (unlikely(usize == 0)) + return (NULL); + assert(usize >= chunksize); + + /* Allocate an extent node with which to track the chunk. */ + node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)), + CACHELINE, false, tcache, true, arena); + if (node == NULL) + return (NULL); + + /* + * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that + * it is possible to make correct junk/zero fill decisions below. + */ + is_zeroed = zero; + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena, + size, alignment, &is_zeroed)) == NULL) { + idalloctm(tsd, node, tcache, true); + return (NULL); + } + + extent_node_init(node, arena, ret, size, is_zeroed, true); + + if (huge_node_set(ret, node)) { + arena_chunk_dalloc_huge(arena, ret, size); + idalloctm(tsd, node, tcache, true); + return (NULL); + } + + /* Insert node into huge. */ + malloc_mutex_lock(&arena->huge_mtx); + ql_elm_new(node, ql_link); + ql_tail_insert(&arena->huge, node, ql_link); + malloc_mutex_unlock(&arena->huge_mtx); + + if (zero || (config_fill && unlikely(opt_zero))) { + if (!is_zeroed) + memset(ret, 0, size); + } else if (config_fill && unlikely(opt_junk_alloc)) + memset(ret, 0xa5, size); + + return (ret); +} + +#ifdef JEMALLOC_JET +#undef huge_dalloc_junk +#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) +#endif +static void +huge_dalloc_junk(void *ptr, size_t usize) +{ + + if (config_fill && have_dss && unlikely(opt_junk_free)) { + /* + * Only bother junk filling if the chunk isn't about to be + * unmapped. + */ + if (!config_munmap || (have_dss && chunk_in_dss(ptr))) + memset(ptr, 0x5a, usize); + } +} +#ifdef JEMALLOC_JET +#undef huge_dalloc_junk +#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) +huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); +#endif + +static void +huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, + size_t usize_max, bool zero) +{ + size_t usize, usize_next; + extent_node_t *node; + arena_t *arena; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; + bool pre_zeroed, post_zeroed; + + /* Increase usize to incorporate extra. */ + for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1)) + <= oldsize; usize = usize_next) + ; /* Do nothing. */ + + if (oldsize == usize) + return; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + pre_zeroed = extent_node_zeroed_get(node); + + /* Fill if necessary (shrinking). */ + if (oldsize > usize) { + size_t sdiff = oldsize - usize; + if (config_fill && unlikely(opt_junk_free)) { + memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff); + post_zeroed = false; + } else { + post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, + ptr, CHUNK_CEILING(oldsize), usize, sdiff); + } + } else + post_zeroed = pre_zeroed; + + malloc_mutex_lock(&arena->huge_mtx); + /* Update the size of the huge allocation. */ + assert(extent_node_size_get(node) != usize); + extent_node_size_set(node, usize); + /* Update zeroed. */ + extent_node_zeroed_set(node, post_zeroed); + malloc_mutex_unlock(&arena->huge_mtx); + + arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize); + + /* Fill if necessary (growing). */ + if (oldsize < usize) { + if (zero || (config_fill && unlikely(opt_zero))) { + if (!pre_zeroed) { + memset((void *)((uintptr_t)ptr + oldsize), 0, + usize - oldsize); + } + } else if (config_fill && unlikely(opt_junk_alloc)) { + memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - + oldsize); + } + } +} + +static bool +huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) +{ + extent_node_t *node; + arena_t *arena; + chunk_hooks_t chunk_hooks; + size_t cdiff; + bool pre_zeroed, post_zeroed; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + pre_zeroed = extent_node_zeroed_get(node); + chunk_hooks = chunk_hooks_get(arena); + + assert(oldsize > usize); + + /* Split excess chunks. */ + cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); + if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize), + CHUNK_CEILING(usize), cdiff, true, arena->ind)) + return (true); + + if (oldsize > usize) { + size_t sdiff = oldsize - usize; + if (config_fill && unlikely(opt_junk_free)) { + huge_dalloc_junk((void *)((uintptr_t)ptr + usize), + sdiff); + post_zeroed = false; + } else { + post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, + CHUNK_ADDR2BASE((uintptr_t)ptr + usize), + CHUNK_CEILING(oldsize), + CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); + } + } else + post_zeroed = pre_zeroed; + + malloc_mutex_lock(&arena->huge_mtx); + /* Update the size of the huge allocation. */ + extent_node_size_set(node, usize); + /* Update zeroed. */ + extent_node_zeroed_set(node, post_zeroed); + malloc_mutex_unlock(&arena->huge_mtx); + + /* Zap the excess chunks. */ + arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize); + + return (false); +} + +static bool +huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) { + extent_node_t *node; + arena_t *arena; + bool is_zeroed_subchunk, is_zeroed_chunk; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + malloc_mutex_lock(&arena->huge_mtx); + is_zeroed_subchunk = extent_node_zeroed_get(node); + malloc_mutex_unlock(&arena->huge_mtx); + + /* + * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so + * that it is possible to make correct junk/zero fill decisions below. + */ + is_zeroed_chunk = zero; + + if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize, + &is_zeroed_chunk)) + return (true); + + malloc_mutex_lock(&arena->huge_mtx); + /* Update the size of the huge allocation. */ + extent_node_size_set(node, usize); + malloc_mutex_unlock(&arena->huge_mtx); + + if (zero || (config_fill && unlikely(opt_zero))) { + if (!is_zeroed_subchunk) { + memset((void *)((uintptr_t)ptr + oldsize), 0, + CHUNK_CEILING(oldsize) - oldsize); + } + if (!is_zeroed_chunk) { + memset((void *)((uintptr_t)ptr + + CHUNK_CEILING(oldsize)), 0, usize - + CHUNK_CEILING(oldsize)); + } + } else if (config_fill && unlikely(opt_junk_alloc)) { + memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - + oldsize); + } + + return (false); +} + +bool +huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, + size_t usize_max, bool zero) +{ + + assert(s2u(oldsize) == oldsize); + + /* Both allocations must be huge to avoid a move. */ + if (oldsize < chunksize || usize_max < chunksize) + return (true); + + if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { + /* Attempt to expand the allocation in-place. */ + if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero)) + return (false); + /* Try again, this time with usize_min. */ + if (usize_min < usize_max && CHUNK_CEILING(usize_min) > + CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr, + oldsize, usize_min, zero)) + return (false); + } + + /* + * Avoid moving the allocation if the existing chunk size accommodates + * the new size. + */ + if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) + && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { + huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max, + zero); + return (false); + } + + /* Attempt to shrink the allocation in-place. */ + if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) + return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)); + return (true); +} + +static void * +huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) +{ + + if (alignment <= chunksize) + return (huge_malloc(tsd, arena, usize, zero, tcache)); + return (huge_palloc(tsd, arena, usize, alignment, zero, tcache)); +} + +void * +huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) +{ + void *ret; + size_t copysize; + + /* Try to avoid moving the allocation. */ + if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero)) + return (ptr); + + /* + * usize and oldsize are different enough that we need to use a + * different size class. In that case, fall back to allocating new + * space and copying. + */ + ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero, + tcache); + if (ret == NULL) + return (NULL); + + copysize = (usize < oldsize) ? usize : oldsize; + memcpy(ret, ptr, copysize); + isqalloc(tsd, ptr, oldsize, tcache); + return (ret); +} + +void +huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) +{ + extent_node_t *node; + arena_t *arena; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + huge_node_unset(ptr, node); + malloc_mutex_lock(&arena->huge_mtx); + ql_remove(&arena->huge, node, ql_link); + malloc_mutex_unlock(&arena->huge_mtx); + + huge_dalloc_junk(extent_node_addr_get(node), + extent_node_size_get(node)); + arena_chunk_dalloc_huge(extent_node_arena_get(node), + extent_node_addr_get(node), extent_node_size_get(node)); + idalloctm(tsd, node, tcache, true); +} + +arena_t * +huge_aalloc(const void *ptr) +{ + + return (extent_node_arena_get(huge_node_get(ptr))); +} + +size_t +huge_salloc(const void *ptr) +{ + size_t size; + extent_node_t *node; + arena_t *arena; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + malloc_mutex_lock(&arena->huge_mtx); + size = extent_node_size_get(node); + malloc_mutex_unlock(&arena->huge_mtx); + + return (size); +} + +prof_tctx_t * +huge_prof_tctx_get(const void *ptr) +{ + prof_tctx_t *tctx; + extent_node_t *node; + arena_t *arena; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + malloc_mutex_lock(&arena->huge_mtx); + tctx = extent_node_prof_tctx_get(node); + malloc_mutex_unlock(&arena->huge_mtx); + + return (tctx); +} + +void +huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) +{ + extent_node_t *node; + arena_t *arena; + + node = huge_node_get(ptr); + arena = extent_node_arena_get(node); + malloc_mutex_lock(&arena->huge_mtx); + extent_node_prof_tctx_set(node, tctx); + malloc_mutex_unlock(&arena->huge_mtx); +} + +void +huge_prof_tctx_reset(const void *ptr) +{ + + huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); +} diff --git a/redis.submodule/jemalloc/src/jemalloc.c b/redis.submodule/jemalloc/src/jemalloc.c new file mode 100644 index 0000000..5a2d324 --- /dev/null +++ b/redis.submodule/jemalloc/src/jemalloc.c @@ -0,0 +1,2593 @@ +#define JEMALLOC_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +/* Runtime configuration options. */ +const char *je_malloc_conf JEMALLOC_ATTR(weak); +bool opt_abort = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +const char *opt_junk = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + "true" +#else + "false" +#endif + ; +bool opt_junk_alloc = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + true +#else + false +#endif + ; +bool opt_junk_free = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + true +#else + false +#endif + ; + +size_t opt_quarantine = ZU(0); +bool opt_redzone = false; +bool opt_utrace = false; +bool opt_xmalloc = false; +bool opt_zero = false; +size_t opt_narenas = 0; + +/* Initialized to true if the process is running inside Valgrind. */ +bool in_valgrind; + +unsigned ncpus; + +/* Protects arenas initialization (arenas, narenas_total). */ +static malloc_mutex_t arenas_lock; +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + * + * arenas[0..narenas_auto) are used for automatic multiplexing of threads and + * arenas. arenas[narenas_auto..narenas_total) are only used if the application + * takes some action to create them and allocate from them. + */ +static arena_t **arenas; +static unsigned narenas_total; +static arena_t *a0; /* arenas[0]; read-only after initialization. */ +static unsigned narenas_auto; /* Read-only after initialization. */ + +typedef enum { + malloc_init_uninitialized = 3, + malloc_init_a0_initialized = 2, + malloc_init_recursible = 1, + malloc_init_initialized = 0 /* Common case --> jnz. */ +} malloc_init_t; +static malloc_init_t malloc_init_state = malloc_init_uninitialized; + +JEMALLOC_ALIGNED(CACHELINE) +const size_t index2size_tab[NSIZES] = { +#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ + ((ZU(1)<= 0x0600 +static malloc_mutex_t init_lock = SRWLOCK_INIT; +#else +static malloc_mutex_t init_lock; +static bool init_lock_initialized = false; + +JEMALLOC_ATTR(constructor) +static void WINAPI +_init_init_lock(void) +{ + + /* If another constructor in the same binary is using mallctl to + * e.g. setup chunk hooks, it may end up running before this one, + * and malloc_init_hard will crash trying to lock the uninitialized + * lock. So we force an initialization of the lock in + * malloc_init_hard as well. We don't try to care about atomicity + * of the accessed to the init_lock_initialized boolean, since it + * really only matters early in the process creation, before any + * separate thread normally starts doing anything. */ + if (!init_lock_initialized) + malloc_mutex_init(&init_lock); + init_lock_initialized = true; +} + +#ifdef _MSC_VER +# pragma section(".CRT$XCU", read) +JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) +static const void (WINAPI *init_init_lock)(void) = _init_init_lock; +#endif +#endif +#else +static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; +#endif + +typedef struct { + void *p; /* Input pointer (as in realloc(p, s)). */ + size_t s; /* Request size. */ + void *r; /* Result pointer. */ +} malloc_utrace_t; + +#ifdef JEMALLOC_UTRACE +# define UTRACE(a, b, c) do { \ + if (unlikely(opt_utrace)) { \ + int utrace_serrno = errno; \ + malloc_utrace_t ut; \ + ut.p = (a); \ + ut.s = (b); \ + ut.r = (c); \ + utrace(&ut, sizeof(ut)); \ + errno = utrace_serrno; \ + } \ +} while (0) +#else +# define UTRACE(a, b, c) +#endif + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static bool malloc_init_hard_a0(void); +static bool malloc_init_hard(void); + +/******************************************************************************/ +/* + * Begin miscellaneous support functions. + */ + +JEMALLOC_ALWAYS_INLINE_C bool +malloc_initialized(void) +{ + + return (malloc_init_state == malloc_init_initialized); +} + +JEMALLOC_ALWAYS_INLINE_C void +malloc_thread_init(void) +{ + + /* + * TSD initialization can't be safely done as a side effect of + * deallocation, because it is possible for a thread to do nothing but + * deallocate its TLS data via free(), in which case writing to TLS + * would cause write-after-free memory corruption. The quarantine + * facility *only* gets used as a side effect of deallocation, so make + * a best effort attempt at initializing its TSD by hooking all + * allocation events. + */ + if (config_fill && unlikely(opt_quarantine)) + quarantine_alloc_hook(); +} + +JEMALLOC_ALWAYS_INLINE_C bool +malloc_init_a0(void) +{ + + if (unlikely(malloc_init_state == malloc_init_uninitialized)) + return (malloc_init_hard_a0()); + return (false); +} + +JEMALLOC_ALWAYS_INLINE_C bool +malloc_init(void) +{ + + if (unlikely(!malloc_initialized()) && malloc_init_hard()) + return (true); + malloc_thread_init(); + + return (false); +} + +/* + * The a0*() functions are used instead of i[mcd]alloc() in situations that + * cannot tolerate TLS variable access. + */ + +arena_t * +a0get(void) +{ + + assert(a0 != NULL); + return (a0); +} + +static void * +a0ialloc(size_t size, bool zero, bool is_metadata) +{ + + if (unlikely(malloc_init_a0())) + return (NULL); + + return (iallocztm(NULL, size, zero, false, is_metadata, a0get())); +} + +static void +a0idalloc(void *ptr, bool is_metadata) +{ + + idalloctm(NULL, ptr, false, is_metadata); +} + +void * +a0malloc(size_t size) +{ + + return (a0ialloc(size, false, true)); +} + +void +a0dalloc(void *ptr) +{ + + a0idalloc(ptr, true); +} + +/* + * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive + * situations that cannot tolerate TLS variable access (TLS allocation and very + * early internal data structure initialization). + */ + +void * +bootstrap_malloc(size_t size) +{ + + if (unlikely(size == 0)) + size = 1; + + return (a0ialloc(size, false, false)); +} + +void * +bootstrap_calloc(size_t num, size_t size) +{ + size_t num_size; + + num_size = num * size; + if (unlikely(num_size == 0)) { + assert(num == 0 || size == 0); + num_size = 1; + } + + return (a0ialloc(num_size, true, false)); +} + +void +bootstrap_free(void *ptr) +{ + + if (unlikely(ptr == NULL)) + return; + + a0idalloc(ptr, false); +} + +/* Create a new arena and insert it into the arenas array at index ind. */ +static arena_t * +arena_init_locked(unsigned ind) +{ + arena_t *arena; + + /* Expand arenas if necessary. */ + assert(ind <= narenas_total); + if (ind > MALLOCX_ARENA_MAX) + return (NULL); + if (ind == narenas_total) { + unsigned narenas_new = narenas_total + 1; + arena_t **arenas_new = + (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new * + sizeof(arena_t *))); + if (arenas_new == NULL) + return (NULL); + memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *)); + arenas_new[ind] = NULL; + /* + * Deallocate only if arenas came from a0malloc() (not + * base_alloc()). + */ + if (narenas_total != narenas_auto) + a0dalloc(arenas); + arenas = arenas_new; + narenas_total = narenas_new; + } + + /* + * Another thread may have already initialized arenas[ind] if it's an + * auto arena. + */ + arena = arenas[ind]; + if (arena != NULL) { + assert(ind < narenas_auto); + return (arena); + } + + /* Actually initialize the arena. */ + arena = arenas[ind] = arena_new(ind); + return (arena); +} + +arena_t * +arena_init(unsigned ind) +{ + arena_t *arena; + + malloc_mutex_lock(&arenas_lock); + arena = arena_init_locked(ind); + malloc_mutex_unlock(&arenas_lock); + return (arena); +} + +unsigned +narenas_total_get(void) +{ + unsigned narenas; + + malloc_mutex_lock(&arenas_lock); + narenas = narenas_total; + malloc_mutex_unlock(&arenas_lock); + + return (narenas); +} + +static void +arena_bind_locked(tsd_t *tsd, unsigned ind) +{ + arena_t *arena; + + arena = arenas[ind]; + arena->nthreads++; + + if (tsd_nominal(tsd)) + tsd_arena_set(tsd, arena); +} + +static void +arena_bind(tsd_t *tsd, unsigned ind) +{ + + malloc_mutex_lock(&arenas_lock); + arena_bind_locked(tsd, ind); + malloc_mutex_unlock(&arenas_lock); +} + +void +arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) +{ + arena_t *oldarena, *newarena; + + malloc_mutex_lock(&arenas_lock); + oldarena = arenas[oldind]; + newarena = arenas[newind]; + oldarena->nthreads--; + newarena->nthreads++; + malloc_mutex_unlock(&arenas_lock); + tsd_arena_set(tsd, newarena); +} + +unsigned +arena_nbound(unsigned ind) +{ + unsigned nthreads; + + malloc_mutex_lock(&arenas_lock); + nthreads = arenas[ind]->nthreads; + malloc_mutex_unlock(&arenas_lock); + return (nthreads); +} + +static void +arena_unbind(tsd_t *tsd, unsigned ind) +{ + arena_t *arena; + + malloc_mutex_lock(&arenas_lock); + arena = arenas[ind]; + arena->nthreads--; + malloc_mutex_unlock(&arenas_lock); + tsd_arena_set(tsd, NULL); +} + +arena_t * +arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing) +{ + arena_t *arena; + arena_t **arenas_cache = tsd_arenas_cache_get(tsd); + unsigned narenas_cache = tsd_narenas_cache_get(tsd); + unsigned narenas_actual = narenas_total_get(); + + /* Deallocate old cache if it's too small. */ + if (arenas_cache != NULL && narenas_cache < narenas_actual) { + a0dalloc(arenas_cache); + arenas_cache = NULL; + narenas_cache = 0; + tsd_arenas_cache_set(tsd, arenas_cache); + tsd_narenas_cache_set(tsd, narenas_cache); + } + + /* Allocate cache if it's missing. */ + if (arenas_cache == NULL) { + bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd); + assert(ind < narenas_actual || !init_if_missing); + narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1; + + if (tsd_nominal(tsd) && !*arenas_cache_bypassp) { + *arenas_cache_bypassp = true; + arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) * + narenas_cache); + *arenas_cache_bypassp = false; + } + if (arenas_cache == NULL) { + /* + * This function must always tell the truth, even if + * it's slow, so don't let OOM, thread cleanup (note + * tsd_nominal check), nor recursive allocation + * avoidance (note arenas_cache_bypass check) get in the + * way. + */ + if (ind >= narenas_actual) + return (NULL); + malloc_mutex_lock(&arenas_lock); + arena = arenas[ind]; + malloc_mutex_unlock(&arenas_lock); + return (arena); + } + assert(tsd_nominal(tsd) && !*arenas_cache_bypassp); + tsd_arenas_cache_set(tsd, arenas_cache); + tsd_narenas_cache_set(tsd, narenas_cache); + } + + /* + * Copy to cache. It's possible that the actual number of arenas has + * increased since narenas_total_get() was called above, but that causes + * no correctness issues unless two threads concurrently execute the + * arenas.extend mallctl, which we trust mallctl synchronization to + * prevent. + */ + malloc_mutex_lock(&arenas_lock); + memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual); + malloc_mutex_unlock(&arenas_lock); + if (narenas_cache > narenas_actual) { + memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) * + (narenas_cache - narenas_actual)); + } + + /* Read the refreshed cache, and init the arena if necessary. */ + arena = arenas_cache[ind]; + if (init_if_missing && arena == NULL) + arena = arenas_cache[ind] = arena_init(ind); + return (arena); +} + +/* Slow path, called only by arena_choose(). */ +arena_t * +arena_choose_hard(tsd_t *tsd) +{ + arena_t *ret; + + if (narenas_auto > 1) { + unsigned i, choose, first_null; + + choose = 0; + first_null = narenas_auto; + malloc_mutex_lock(&arenas_lock); + assert(a0get() != NULL); + for (i = 1; i < narenas_auto; i++) { + if (arenas[i] != NULL) { + /* + * Choose the first arena that has the lowest + * number of threads assigned to it. + */ + if (arenas[i]->nthreads < + arenas[choose]->nthreads) + choose = i; + } else if (first_null == narenas_auto) { + /* + * Record the index of the first uninitialized + * arena, in case all extant arenas are in use. + * + * NB: It is possible for there to be + * discontinuities in terms of initialized + * versus uninitialized arenas, due to the + * "thread.arena" mallctl. + */ + first_null = i; + } + } + + if (arenas[choose]->nthreads == 0 + || first_null == narenas_auto) { + /* + * Use an unloaded arena, or the least loaded arena if + * all arenas are already initialized. + */ + ret = arenas[choose]; + } else { + /* Initialize a new arena. */ + choose = first_null; + ret = arena_init_locked(choose); + if (ret == NULL) { + malloc_mutex_unlock(&arenas_lock); + return (NULL); + } + } + arena_bind_locked(tsd, choose); + malloc_mutex_unlock(&arenas_lock); + } else { + ret = a0get(); + arena_bind(tsd, 0); + } + + return (ret); +} + +void +thread_allocated_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +void +thread_deallocated_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +void +arena_cleanup(tsd_t *tsd) +{ + arena_t *arena; + + arena = tsd_arena_get(tsd); + if (arena != NULL) + arena_unbind(tsd, arena->ind); +} + +void +arenas_cache_cleanup(tsd_t *tsd) +{ + arena_t **arenas_cache; + + arenas_cache = tsd_arenas_cache_get(tsd); + if (arenas_cache != NULL) { + tsd_arenas_cache_set(tsd, NULL); + a0dalloc(arenas_cache); + } +} + +void +narenas_cache_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +void +arenas_cache_bypass_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +static void +stats_print_atexit(void) +{ + + if (config_tcache && config_stats) { + unsigned narenas, i; + + /* + * Merge stats from extant threads. This is racy, since + * individual threads do not lock when recording tcache stats + * events. As a consequence, the final stats may be slightly + * out of date by the time they are reported, if other threads + * continue to allocate. + */ + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena = arenas[i]; + if (arena != NULL) { + tcache_t *tcache; + + /* + * tcache_stats_merge() locks bins, so if any + * code is introduced that acquires both arena + * and bin locks in the opposite order, + * deadlocks may result. + */ + malloc_mutex_lock(&arena->lock); + ql_foreach(tcache, &arena->tcache_ql, link) { + tcache_stats_merge(tcache, arena); + } + malloc_mutex_unlock(&arena->lock); + } + } + } + je_malloc_stats_print(NULL, NULL, NULL); +} + +/* + * End miscellaneous support functions. + */ +/******************************************************************************/ +/* + * Begin initialization functions. + */ + +#ifndef JEMALLOC_HAVE_SECURE_GETENV +static char * +secure_getenv(const char *name) +{ + +# ifdef JEMALLOC_HAVE_ISSETUGID + if (issetugid() != 0) + return (NULL); +# endif + return (getenv(name)); +} +#endif + +static unsigned +malloc_ncpus(void) +{ + long result; + +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + result = si.dwNumberOfProcessors; +#else + result = sysconf(_SC_NPROCESSORS_ONLN); +#endif + return ((result == -1) ? 1 : (unsigned)result); +} + +static bool +malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, + char const **v_p, size_t *vlen_p) +{ + bool accept; + const char *opts = *opts_p; + + *k_p = opts; + + for (accept = false; !accept;) { + switch (*opts) { + case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': + case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': + case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': + case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': + case 'Y': case 'Z': + case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': + case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': + case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': + case 's': case 't': case 'u': case 'v': case 'w': case 'x': + case 'y': case 'z': + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': case '8': case '9': + case '_': + opts++; + break; + case ':': + opts++; + *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; + *v_p = opts; + accept = true; + break; + case '\0': + if (opts != *opts_p) { + malloc_write(": Conf string ends " + "with key\n"); + } + return (true); + default: + malloc_write(": Malformed conf string\n"); + return (true); + } + } + + for (accept = false; !accept;) { + switch (*opts) { + case ',': + opts++; + /* + * Look ahead one character here, because the next time + * this function is called, it will assume that end of + * input has been cleanly reached if no input remains, + * but we have optimistically already consumed the + * comma if one exists. + */ + if (*opts == '\0') { + malloc_write(": Conf string ends " + "with comma\n"); + } + *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; + accept = true; + break; + case '\0': + *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; + accept = true; + break; + default: + opts++; + break; + } + } + + *opts_p = opts; + return (false); +} + +static void +malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, + size_t vlen) +{ + + malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, + (int)vlen, v); +} + +static void +malloc_conf_init(void) +{ + unsigned i; + char buf[PATH_MAX + 1]; + const char *opts, *k, *v; + size_t klen, vlen; + + /* + * Automatically configure valgrind before processing options. The + * valgrind option remains in jemalloc 3.x for compatibility reasons. + */ + if (config_valgrind) { + in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; + if (config_fill && unlikely(in_valgrind)) { + opt_junk = "false"; + opt_junk_alloc = false; + opt_junk_free = false; + assert(!opt_zero); + opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; + opt_redzone = true; + } + if (config_tcache && unlikely(in_valgrind)) + opt_tcache = false; + } + + for (i = 0; i < 3; i++) { + /* Get runtime configuration. */ + switch (i) { + case 0: + if (je_malloc_conf != NULL) { + /* + * Use options that were compiled into the + * program. + */ + opts = je_malloc_conf; + } else { + /* No configuration specified. */ + buf[0] = '\0'; + opts = buf; + } + break; + case 1: { + int linklen = 0; +#ifndef _WIN32 + int saved_errno = errno; + const char *linkname = +# ifdef JEMALLOC_PREFIX + "/etc/"JEMALLOC_PREFIX"malloc.conf" +# else + "/etc/malloc.conf" +# endif + ; + + /* + * Try to use the contents of the "/etc/malloc.conf" + * symbolic link's name. + */ + linklen = readlink(linkname, buf, sizeof(buf) - 1); + if (linklen == -1) { + /* No configuration specified. */ + linklen = 0; + /* Restore errno. */ + set_errno(saved_errno); + } +#endif + buf[linklen] = '\0'; + opts = buf; + break; + } case 2: { + const char *envname = +#ifdef JEMALLOC_PREFIX + JEMALLOC_CPREFIX"MALLOC_CONF" +#else + "MALLOC_CONF" +#endif + ; + + if ((opts = secure_getenv(envname)) != NULL) { + /* + * Do nothing; opts is already initialized to + * the value of the MALLOC_CONF environment + * variable. + */ + } else { + /* No configuration specified. */ + buf[0] = '\0'; + opts = buf; + } + break; + } default: + not_reached(); + buf[0] = '\0'; + opts = buf; + } + + while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, + &vlen)) { +#define CONF_MATCH(n) \ + (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) +#define CONF_MATCH_VALUE(n) \ + (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) +#define CONF_HANDLE_BOOL(o, n, cont) \ + if (CONF_MATCH(n)) { \ + if (CONF_MATCH_VALUE("true")) \ + o = true; \ + else if (CONF_MATCH_VALUE("false")) \ + o = false; \ + else { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } \ + if (cont) \ + continue; \ + } +#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ + if (CONF_MATCH(n)) { \ + uintmax_t um; \ + char *end; \ + \ + set_errno(0); \ + um = malloc_strtoumax(v, &end, 0); \ + if (get_errno() != 0 || (uintptr_t)end -\ + (uintptr_t)v != vlen) { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } else if (clip) { \ + if ((min) != 0 && um < (min)) \ + o = (min); \ + else if (um > (max)) \ + o = (max); \ + else \ + o = um; \ + } else { \ + if (((min) != 0 && um < (min)) \ + || um > (max)) { \ + malloc_conf_error( \ + "Out-of-range " \ + "conf value", \ + k, klen, v, vlen); \ + } else \ + o = um; \ + } \ + continue; \ + } +#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ + if (CONF_MATCH(n)) { \ + long l; \ + char *end; \ + \ + set_errno(0); \ + l = strtol(v, &end, 0); \ + if (get_errno() != 0 || (uintptr_t)end -\ + (uintptr_t)v != vlen) { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } else if (l < (ssize_t)(min) || l > \ + (ssize_t)(max)) { \ + malloc_conf_error( \ + "Out-of-range conf value", \ + k, klen, v, vlen); \ + } else \ + o = l; \ + continue; \ + } +#define CONF_HANDLE_CHAR_P(o, n, d) \ + if (CONF_MATCH(n)) { \ + size_t cpylen = (vlen <= \ + sizeof(o)-1) ? vlen : \ + sizeof(o)-1; \ + strncpy(o, v, cpylen); \ + o[cpylen] = '\0'; \ + continue; \ + } + + CONF_HANDLE_BOOL(opt_abort, "abort", true) + /* + * Chunks always require at least one header page, + * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and + * possibly an additional page in the presence of + * redzones. In order to simplify options processing, + * use a conservative bound that accommodates all these + * constraints. + */ + CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + + LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), + (sizeof(size_t) << 3) - 1, true) + if (strncmp("dss", k, klen) == 0) { + int i; + bool match = false; + for (i = 0; i < dss_prec_limit; i++) { + if (strncmp(dss_prec_names[i], v, vlen) + == 0) { + if (chunk_dss_prec_set(i)) { + malloc_conf_error( + "Error setting dss", + k, klen, v, vlen); + } else { + opt_dss = + dss_prec_names[i]; + match = true; + break; + } + } + } + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; + } + CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, + SIZE_T_MAX, false) + CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", + -1, (sizeof(size_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) + if (config_fill) { + if (CONF_MATCH("junk")) { + if (CONF_MATCH_VALUE("true")) { + opt_junk = "true"; + opt_junk_alloc = opt_junk_free = + true; + } else if (CONF_MATCH_VALUE("false")) { + opt_junk = "false"; + opt_junk_alloc = opt_junk_free = + false; + } else if (CONF_MATCH_VALUE("alloc")) { + opt_junk = "alloc"; + opt_junk_alloc = true; + opt_junk_free = false; + } else if (CONF_MATCH_VALUE("free")) { + opt_junk = "free"; + opt_junk_alloc = false; + opt_junk_free = true; + } else { + malloc_conf_error( + "Invalid conf value", k, + klen, v, vlen); + } + continue; + } + CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", + 0, SIZE_T_MAX, false) + CONF_HANDLE_BOOL(opt_redzone, "redzone", true) + CONF_HANDLE_BOOL(opt_zero, "zero", true) + } + if (config_utrace) { + CONF_HANDLE_BOOL(opt_utrace, "utrace", true) + } + if (config_xmalloc) { + CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) + } + if (config_tcache) { + CONF_HANDLE_BOOL(opt_tcache, "tcache", + !config_valgrind || !in_valgrind) + if (CONF_MATCH("tcache")) { + assert(config_valgrind && in_valgrind); + if (opt_tcache) { + opt_tcache = false; + malloc_conf_error( + "tcache cannot be enabled " + "while running inside Valgrind", + k, klen, v, vlen); + } + continue; + } + CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, + "lg_tcache_max", -1, + (sizeof(size_t) << 3) - 1) + } + if (config_prof) { + CONF_HANDLE_BOOL(opt_prof, "prof", true) + CONF_HANDLE_CHAR_P(opt_prof_prefix, + "prof_prefix", "jeprof") + CONF_HANDLE_BOOL(opt_prof_active, "prof_active", + true) + CONF_HANDLE_BOOL(opt_prof_thread_active_init, + "prof_thread_active_init", true) + CONF_HANDLE_SIZE_T(opt_lg_prof_sample, + "lg_prof_sample", 0, + (sizeof(uint64_t) << 3) - 1, true) + CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", + true) + CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, + "lg_prof_interval", -1, + (sizeof(uint64_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", + true) + CONF_HANDLE_BOOL(opt_prof_final, "prof_final", + true) + CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", + true) + } + malloc_conf_error("Invalid conf pair", k, klen, v, + vlen); +#undef CONF_MATCH +#undef CONF_HANDLE_BOOL +#undef CONF_HANDLE_SIZE_T +#undef CONF_HANDLE_SSIZE_T +#undef CONF_HANDLE_CHAR_P + } + } +} + +/* init_lock must be held. */ +static bool +malloc_init_hard_needed(void) +{ + + if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == + malloc_init_recursible)) { + /* + * Another thread initialized the allocator before this one + * acquired init_lock, or this thread is the initializing + * thread, and it is recursively allocating. + */ + return (false); + } +#ifdef JEMALLOC_THREADED_INIT + if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { + /* Busy-wait until the initializing thread completes. */ + do { + malloc_mutex_unlock(&init_lock); + CPU_SPINWAIT; + malloc_mutex_lock(&init_lock); + } while (!malloc_initialized()); + return (false); + } +#endif + return (true); +} + +/* init_lock must be held. */ +static bool +malloc_init_hard_a0_locked(void) +{ + + malloc_initializer = INITIALIZER; + + if (config_prof) + prof_boot0(); + malloc_conf_init(); + if (opt_stats_print) { + /* Print statistics at exit. */ + if (atexit(stats_print_atexit) != 0) { + malloc_write(": Error in atexit()\n"); + if (opt_abort) + abort(); + } + } + if (base_boot()) + return (true); + if (chunk_boot()) + return (true); + if (ctl_boot()) + return (true); + if (config_prof) + prof_boot1(); + if (arena_boot()) + return (true); + if (config_tcache && tcache_boot()) + return (true); + if (malloc_mutex_init(&arenas_lock)) + return (true); + /* + * Create enough scaffolding to allow recursive allocation in + * malloc_ncpus(). + */ + narenas_total = narenas_auto = 1; + arenas = &a0; + memset(arenas, 0, sizeof(arena_t *) * narenas_auto); + /* + * Initialize one arena here. The rest are lazily created in + * arena_choose_hard(). + */ + if (arena_init(0) == NULL) + return (true); + malloc_init_state = malloc_init_a0_initialized; + return (false); +} + +static bool +malloc_init_hard_a0(void) +{ + bool ret; + + malloc_mutex_lock(&init_lock); + ret = malloc_init_hard_a0_locked(); + malloc_mutex_unlock(&init_lock); + return (ret); +} + +/* + * Initialize data structures which may trigger recursive allocation. + * + * init_lock must be held. + */ +static void +malloc_init_hard_recursible(void) +{ + + malloc_init_state = malloc_init_recursible; + malloc_mutex_unlock(&init_lock); + + ncpus = malloc_ncpus(); + +#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ + && !defined(_WIN32) && !defined(__native_client__)) + /* LinuxThreads's pthread_atfork() allocates. */ + if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, + jemalloc_postfork_child) != 0) { + malloc_write(": Error in pthread_atfork()\n"); + if (opt_abort) + abort(); + } +#endif + malloc_mutex_lock(&init_lock); +} + +/* init_lock must be held. */ +static bool +malloc_init_hard_finish(void) +{ + + if (mutex_boot()) + return (true); + + if (opt_narenas == 0) { + /* + * For SMP systems, create more than one arena per CPU by + * default. + */ + if (ncpus > 1) + opt_narenas = ncpus << 2; + else + opt_narenas = 1; + } + narenas_auto = opt_narenas; + /* + * Make sure that the arenas array can be allocated. In practice, this + * limit is enough to allow the allocator to function, but the ctl + * machinery will fail to allocate memory at far lower limits. + */ + if (narenas_auto > chunksize / sizeof(arena_t *)) { + narenas_auto = chunksize / sizeof(arena_t *); + malloc_printf(": Reducing narenas to limit (%d)\n", + narenas_auto); + } + narenas_total = narenas_auto; + + /* Allocate and initialize arenas. */ + arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); + if (arenas == NULL) + return (true); + /* + * Zero the array. In practice, this should always be pre-zeroed, + * since it was just mmap()ed, but let's be sure. + */ + memset(arenas, 0, sizeof(arena_t *) * narenas_total); + /* Copy the pointer to the one arena that was already initialized. */ + arenas[0] = a0; + + malloc_init_state = malloc_init_initialized; + return (false); +} + +static bool +malloc_init_hard(void) +{ + +#if defined(_WIN32) && _WIN32_WINNT < 0x0600 + _init_init_lock(); +#endif + malloc_mutex_lock(&init_lock); + if (!malloc_init_hard_needed()) { + malloc_mutex_unlock(&init_lock); + return (false); + } + + if (malloc_init_state != malloc_init_a0_initialized && + malloc_init_hard_a0_locked()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + if (malloc_tsd_boot0()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + if (config_prof && prof_boot2()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + malloc_init_hard_recursible(); + + if (malloc_init_hard_finish()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + malloc_mutex_unlock(&init_lock); + malloc_tsd_boot1(); + return (false); +} + +/* + * End initialization functions. + */ +/******************************************************************************/ +/* + * Begin malloc(3)-compatible functions. + */ + +static void * +imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) +{ + void *p; + + if (tctx == NULL) + return (NULL); + if (usize <= SMALL_MAXCLASS) { + p = imalloc(tsd, LARGE_MINCLASS); + if (p == NULL) + return (NULL); + arena_prof_promoted(p, usize); + } else + p = imalloc(tsd, usize); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +imalloc_prof(tsd_t *tsd, size_t usize) +{ + void *p; + prof_tctx_t *tctx; + + tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) + p = imalloc_prof_sample(tsd, usize, tctx); + else + p = imalloc(tsd, usize); + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return (NULL); + } + prof_malloc(p, usize, tctx); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +imalloc_body(size_t size, tsd_t **tsd, size_t *usize) +{ + + if (unlikely(malloc_init())) + return (NULL); + *tsd = tsd_fetch(); + + if (config_prof && opt_prof) { + *usize = s2u(size); + if (unlikely(*usize == 0)) + return (NULL); + return (imalloc_prof(*tsd, *usize)); + } + + if (config_stats || (config_valgrind && unlikely(in_valgrind))) + *usize = s2u(size); + return (imalloc(*tsd, size)); +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_malloc(size_t size) +{ + void *ret; + tsd_t *tsd; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + if (size == 0) + size = 1; + + ret = imalloc_body(size, &tsd, &usize); + if (unlikely(ret == NULL)) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error in malloc(): " + "out of memory\n"); + abort(); + } + set_errno(ENOMEM); + } + if (config_stats && likely(ret != NULL)) { + assert(usize == isalloc(ret, config_prof)); + *tsd_thread_allocatedp_get(tsd) += usize; + } + UTRACE(0, size, ret); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); + return (ret); +} + +static void * +imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, + prof_tctx_t *tctx) +{ + void *p; + + if (tctx == NULL) + return (NULL); + if (usize <= SMALL_MAXCLASS) { + assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); + p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); + if (p == NULL) + return (NULL); + arena_prof_promoted(p, usize); + } else + p = ipalloc(tsd, usize, alignment, false); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) +{ + void *p; + prof_tctx_t *tctx; + + tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) + p = imemalign_prof_sample(tsd, alignment, usize, tctx); + else + p = ipalloc(tsd, usize, alignment, false); + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return (NULL); + } + prof_malloc(p, usize, tctx); + + return (p); +} + +JEMALLOC_ATTR(nonnull(1)) +static int +imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) +{ + int ret; + tsd_t *tsd; + size_t usize; + void *result; + + assert(min_alignment != 0); + + if (unlikely(malloc_init())) { + result = NULL; + goto label_oom; + } + tsd = tsd_fetch(); + if (size == 0) + size = 1; + + /* Make sure that alignment is a large enough power of 2. */ + if (unlikely(((alignment - 1) & alignment) != 0 + || (alignment < min_alignment))) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error allocating " + "aligned memory: invalid alignment\n"); + abort(); + } + result = NULL; + ret = EINVAL; + goto label_return; + } + + usize = sa2u(size, alignment); + if (unlikely(usize == 0)) { + result = NULL; + goto label_oom; + } + + if (config_prof && opt_prof) + result = imemalign_prof(tsd, alignment, usize); + else + result = ipalloc(tsd, usize, alignment, false); + if (unlikely(result == NULL)) + goto label_oom; + assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); + + *memptr = result; + ret = 0; +label_return: + if (config_stats && likely(result != NULL)) { + assert(usize == isalloc(result, config_prof)); + *tsd_thread_allocatedp_get(tsd) += usize; + } + UTRACE(0, size, result); + return (ret); +label_oom: + assert(result == NULL); + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error allocating aligned memory: " + "out of memory\n"); + abort(); + } + ret = ENOMEM; + goto label_return; +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +JEMALLOC_ATTR(nonnull(1)) +je_posix_memalign(void **memptr, size_t alignment, size_t size) +{ + int ret = imemalign(memptr, alignment, size, sizeof(void *)); + JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, + config_prof), false); + return (ret); +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) +je_aligned_alloc(size_t alignment, size_t size) +{ + void *ret; + int err; + + if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { + ret = NULL; + set_errno(err); + } + JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), + false); + return (ret); +} + +static void * +icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) +{ + void *p; + + if (tctx == NULL) + return (NULL); + if (usize <= SMALL_MAXCLASS) { + p = icalloc(tsd, LARGE_MINCLASS); + if (p == NULL) + return (NULL); + arena_prof_promoted(p, usize); + } else + p = icalloc(tsd, usize); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +icalloc_prof(tsd_t *tsd, size_t usize) +{ + void *p; + prof_tctx_t *tctx; + + tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) + p = icalloc_prof_sample(tsd, usize, tctx); + else + p = icalloc(tsd, usize); + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return (NULL); + } + prof_malloc(p, usize, tctx); + + return (p); +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) +je_calloc(size_t num, size_t size) +{ + void *ret; + tsd_t *tsd; + size_t num_size; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + if (unlikely(malloc_init())) { + num_size = 0; + ret = NULL; + goto label_return; + } + tsd = tsd_fetch(); + + num_size = num * size; + if (unlikely(num_size == 0)) { + if (num == 0 || size == 0) + num_size = 1; + else { + ret = NULL; + goto label_return; + } + /* + * Try to avoid division here. We know that it isn't possible to + * overflow during multiplication if neither operand uses any of the + * most significant half of the bits in a size_t. + */ + } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << + 2))) && (num_size / size != num))) { + /* size_t overflow. */ + ret = NULL; + goto label_return; + } + + if (config_prof && opt_prof) { + usize = s2u(num_size); + if (unlikely(usize == 0)) { + ret = NULL; + goto label_return; + } + ret = icalloc_prof(tsd, usize); + } else { + if (config_stats || (config_valgrind && unlikely(in_valgrind))) + usize = s2u(num_size); + ret = icalloc(tsd, num_size); + } + +label_return: + if (unlikely(ret == NULL)) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error in calloc(): out of " + "memory\n"); + abort(); + } + set_errno(ENOMEM); + } + if (config_stats && likely(ret != NULL)) { + assert(usize == isalloc(ret, config_prof)); + *tsd_thread_allocatedp_get(tsd) += usize; + } + UTRACE(0, num_size, ret); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); + return (ret); +} + +static void * +irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, + prof_tctx_t *tctx) +{ + void *p; + + if (tctx == NULL) + return (NULL); + if (usize <= SMALL_MAXCLASS) { + p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); + if (p == NULL) + return (NULL); + arena_prof_promoted(p, usize); + } else + p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) +{ + void *p; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(old_ptr); + tctx = prof_alloc_prep(tsd, usize, prof_active, true); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) + p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); + else + p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return (NULL); + } + prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, + old_tctx); + + return (p); +} + +JEMALLOC_INLINE_C void +ifree(tsd_t *tsd, void *ptr, tcache_t *tcache) +{ + size_t usize; + UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + if (config_prof && opt_prof) { + usize = isalloc(ptr, config_prof); + prof_free(tsd, ptr, usize); + } else if (config_stats || config_valgrind) + usize = isalloc(ptr, config_prof); + if (config_stats) + *tsd_thread_deallocatedp_get(tsd) += usize; + if (config_valgrind && unlikely(in_valgrind)) + rzsize = p2rz(ptr); + iqalloc(tsd, ptr, tcache); + JEMALLOC_VALGRIND_FREE(ptr, rzsize); +} + +JEMALLOC_INLINE_C void +isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache) +{ + UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + if (config_prof && opt_prof) + prof_free(tsd, ptr, usize); + if (config_stats) + *tsd_thread_deallocatedp_get(tsd) += usize; + if (config_valgrind && unlikely(in_valgrind)) + rzsize = p2rz(ptr); + isqalloc(tsd, ptr, usize, tcache); + JEMALLOC_VALGRIND_FREE(ptr, rzsize); +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_realloc(void *ptr, size_t size) +{ + void *ret; + tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL); + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + size_t old_usize = 0; + UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + + if (unlikely(size == 0)) { + if (ptr != NULL) { + /* realloc(ptr, 0) is equivalent to free(ptr). */ + UTRACE(ptr, 0, 0); + tsd = tsd_fetch(); + ifree(tsd, ptr, tcache_get(tsd, false)); + return (NULL); + } + size = 1; + } + + if (likely(ptr != NULL)) { + assert(malloc_initialized() || IS_INITIALIZER); + malloc_thread_init(); + tsd = tsd_fetch(); + + old_usize = isalloc(ptr, config_prof); + if (config_valgrind && unlikely(in_valgrind)) + old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); + + if (config_prof && opt_prof) { + usize = s2u(size); + ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd, + ptr, old_usize, usize); + } else { + if (config_stats || (config_valgrind && + unlikely(in_valgrind))) + usize = s2u(size); + ret = iralloc(tsd, ptr, old_usize, size, 0, false); + } + } else { + /* realloc(NULL, size) is equivalent to malloc(size). */ + ret = imalloc_body(size, &tsd, &usize); + } + + if (unlikely(ret == NULL)) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error in realloc(): " + "out of memory\n"); + abort(); + } + set_errno(ENOMEM); + } + if (config_stats && likely(ret != NULL)) { + assert(usize == isalloc(ret, config_prof)); + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; + } + UTRACE(ptr, size, ret); + JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize, + old_rzsize, true, false); + return (ret); +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_free(void *ptr) +{ + + UTRACE(ptr, 0, 0); + if (likely(ptr != NULL)) { + tsd_t *tsd = tsd_fetch(); + ifree(tsd, ptr, tcache_get(tsd, false)); + } +} + +/* + * End malloc(3)-compatible functions. + */ +/******************************************************************************/ +/* + * Begin non-standard override functions. + */ + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) +je_memalign(size_t alignment, size_t size) +{ + void *ret JEMALLOC_CC_SILENCE_INIT(NULL); + if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) + ret = NULL; + JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); + return (ret); +} +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) +je_valloc(size_t size) +{ + void *ret JEMALLOC_CC_SILENCE_INIT(NULL); + if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) + ret = NULL; + JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); + return (ret); +} +#endif + +/* + * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has + * #define je_malloc malloc + */ +#define malloc_is_malloc 1 +#define is_malloc_(a) malloc_is_ ## a +#define is_malloc(a) is_malloc_(a) + +#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) +/* + * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible + * to inconsistently reference libc's malloc(3)-compatible functions + * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). + * + * These definitions interpose hooks in glibc. The functions are actually + * passed an extra argument for the caller return address, which will be + * ignored. + */ +JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; +JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; +JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; +# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK +JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = + je_memalign; +# endif +#endif + +/* + * End non-standard override functions. + */ +/******************************************************************************/ +/* + * Begin non-standard functions. + */ + +JEMALLOC_ALWAYS_INLINE_C bool +imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize, + size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) +{ + + if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { + *alignment = 0; + *usize = s2u(size); + } else { + *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); + *usize = sa2u(size, *alignment); + } + assert(*usize != 0); + *zero = MALLOCX_ZERO_GET(flags); + if ((flags & MALLOCX_TCACHE_MASK) != 0) { + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + *tcache = NULL; + else + *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } else + *tcache = tcache_get(tsd, true); + if ((flags & MALLOCX_ARENA_MASK) != 0) { + unsigned arena_ind = MALLOCX_ARENA_GET(flags); + *arena = arena_get(tsd, arena_ind, true, true); + if (unlikely(*arena == NULL)) + return (true); + } else + *arena = NULL; + return (false); +} + +JEMALLOC_ALWAYS_INLINE_C bool +imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, + size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) +{ + + if (likely(flags == 0)) { + *usize = s2u(size); + assert(*usize != 0); + *alignment = 0; + *zero = false; + *tcache = tcache_get(tsd, true); + *arena = NULL; + return (false); + } else { + return (imallocx_flags_decode_hard(tsd, size, flags, usize, + alignment, zero, tcache, arena)); + } +} + +JEMALLOC_ALWAYS_INLINE_C void * +imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena) +{ + + if (unlikely(alignment != 0)) + return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); + if (unlikely(zero)) + return (icalloct(tsd, usize, tcache, arena)); + return (imalloct(tsd, usize, tcache, arena)); +} + +static void * +imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena) +{ + void *p; + + if (usize <= SMALL_MAXCLASS) { + assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : + sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); + p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache, + arena); + if (p == NULL) + return (NULL); + arena_prof_promoted(p, usize); + } else + p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena); + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) +{ + void *p; + size_t alignment; + bool zero; + tcache_t *tcache; + arena_t *arena; + prof_tctx_t *tctx; + + if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, + &zero, &tcache, &arena))) + return (NULL); + tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); + if (likely((uintptr_t)tctx == (uintptr_t)1U)) + p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); + else if ((uintptr_t)tctx > (uintptr_t)1U) { + p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache, + arena); + } else + p = NULL; + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return (NULL); + } + prof_malloc(p, *usize, tctx); + + assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) +{ + void *p; + size_t alignment; + bool zero; + tcache_t *tcache; + arena_t *arena; + + if (likely(flags == 0)) { + if (config_stats || (config_valgrind && unlikely(in_valgrind))) + *usize = s2u(size); + return (imalloc(tsd, size)); + } + + if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize, + &alignment, &zero, &tcache, &arena))) + return (NULL); + p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); + assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); + return (p); +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_mallocx(size_t size, int flags) +{ + tsd_t *tsd; + void *p; + size_t usize; + + assert(size != 0); + + if (unlikely(malloc_init())) + goto label_oom; + tsd = tsd_fetch(); + + if (config_prof && opt_prof) + p = imallocx_prof(tsd, size, flags, &usize); + else + p = imallocx_no_prof(tsd, size, flags, &usize); + if (unlikely(p == NULL)) + goto label_oom; + + if (config_stats) { + assert(usize == isalloc(p, config_prof)); + *tsd_thread_allocatedp_get(tsd) += usize; + } + UTRACE(0, size, p); + JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags)); + return (p); +label_oom: + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error in mallocx(): out of memory\n"); + abort(); + } + UTRACE(0, size, 0); + return (NULL); +} + +static void * +irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, + prof_tctx_t *tctx) +{ + void *p; + + if (tctx == NULL) + return (NULL); + if (usize <= SMALL_MAXCLASS) { + p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, + zero, tcache, arena); + if (p == NULL) + return (NULL); + arena_prof_promoted(p, usize); + } else { + p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, + tcache, arena); + } + + return (p); +} + +JEMALLOC_ALWAYS_INLINE_C void * +irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, + size_t alignment, size_t *usize, bool zero, tcache_t *tcache, + arena_t *arena) +{ + void *p; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(old_ptr); + tctx = prof_alloc_prep(tsd, *usize, prof_active, true); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, + alignment, zero, tcache, arena, tctx); + } else { + p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, + tcache, arena); + } + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return (NULL); + } + + if (p == old_ptr && alignment != 0) { + /* + * The allocation did not move, so it is possible that the size + * class is smaller than would guarantee the requested + * alignment, and that the alignment constraint was + * serendipitously satisfied. Additionally, old_usize may not + * be the same as the current usize because of in-place large + * reallocation. Therefore, query the actual value of usize. + */ + *usize = isalloc(p, config_prof); + } + prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr, + old_usize, old_tctx); + + return (p); +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_rallocx(void *ptr, size_t size, int flags) +{ + void *p; + tsd_t *tsd; + size_t usize; + size_t old_usize; + UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + size_t alignment = MALLOCX_ALIGN_GET(flags); + bool zero = flags & MALLOCX_ZERO; + arena_t *arena; + tcache_t *tcache; + + assert(ptr != NULL); + assert(size != 0); + assert(malloc_initialized() || IS_INITIALIZER); + malloc_thread_init(); + tsd = tsd_fetch(); + + if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { + unsigned arena_ind = MALLOCX_ARENA_GET(flags); + arena = arena_get(tsd, arena_ind, true, true); + if (unlikely(arena == NULL)) + goto label_oom; + } else + arena = NULL; + + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + tcache = NULL; + else + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } else + tcache = tcache_get(tsd, true); + + old_usize = isalloc(ptr, config_prof); + if (config_valgrind && unlikely(in_valgrind)) + old_rzsize = u2rz(old_usize); + + if (config_prof && opt_prof) { + usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); + assert(usize != 0); + p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, + zero, tcache, arena); + if (unlikely(p == NULL)) + goto label_oom; + } else { + p = iralloct(tsd, ptr, old_usize, size, alignment, zero, + tcache, arena); + if (unlikely(p == NULL)) + goto label_oom; + if (config_stats || (config_valgrind && unlikely(in_valgrind))) + usize = isalloc(p, config_prof); + } + assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); + + if (config_stats) { + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; + } + UTRACE(ptr, size, p); + JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize, + old_rzsize, false, zero); + return (p); +label_oom: + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error in rallocx(): out of memory\n"); + abort(); + } + UTRACE(ptr, size, 0); + return (NULL); +} + +JEMALLOC_ALWAYS_INLINE_C size_t +ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra, + size_t alignment, bool zero) +{ + size_t usize; + + if (ixalloc(ptr, old_usize, size, extra, alignment, zero)) + return (old_usize); + usize = isalloc(ptr, config_prof); + + return (usize); +} + +static size_t +ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra, + size_t alignment, bool zero, prof_tctx_t *tctx) +{ + size_t usize; + + if (tctx == NULL) + return (old_usize); + usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero); + + return (usize); +} + +JEMALLOC_ALWAYS_INLINE_C size_t +ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero) +{ + size_t usize_max, usize; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(ptr); + /* + * usize isn't knowable before ixalloc() returns when extra is non-zero. + * Therefore, compute its maximum possible value and use that in + * prof_alloc_prep() to decide whether to capture a backtrace. + * prof_realloc() will use the actual usize to decide whether to sample. + */ + usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra, + alignment); + assert(usize_max != 0); + tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + usize = ixallocx_prof_sample(ptr, old_usize, size, extra, + alignment, zero, tctx); + } else { + usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, + zero); + } + if (usize == old_usize) { + prof_alloc_rollback(tsd, tctx, false); + return (usize); + } + prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, + old_tctx); + + return (usize); +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_xallocx(void *ptr, size_t size, size_t extra, int flags) +{ + tsd_t *tsd; + size_t usize, old_usize; + UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + size_t alignment = MALLOCX_ALIGN_GET(flags); + bool zero = flags & MALLOCX_ZERO; + + assert(ptr != NULL); + assert(size != 0); + assert(SIZE_T_MAX - size >= extra); + assert(malloc_initialized() || IS_INITIALIZER); + malloc_thread_init(); + tsd = tsd_fetch(); + + old_usize = isalloc(ptr, config_prof); + + /* Clamp extra if necessary to avoid (size + extra) overflow. */ + if (unlikely(size + extra > HUGE_MAXCLASS)) { + /* Check for size overflow. */ + if (unlikely(size > HUGE_MAXCLASS)) { + usize = old_usize; + goto label_not_resized; + } + extra = HUGE_MAXCLASS - size; + } + + if (config_valgrind && unlikely(in_valgrind)) + old_rzsize = u2rz(old_usize); + + if (config_prof && opt_prof) { + usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, + alignment, zero); + } else { + usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, + zero); + } + if (unlikely(usize == old_usize)) + goto label_not_resized; + + if (config_stats) { + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; + } + JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize, + old_rzsize, false, zero); +label_not_resized: + UTRACE(ptr, size, ptr); + return (usize); +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +JEMALLOC_ATTR(pure) +je_sallocx(const void *ptr, int flags) +{ + size_t usize; + + assert(malloc_initialized() || IS_INITIALIZER); + malloc_thread_init(); + + if (config_ivsalloc) + usize = ivsalloc(ptr, config_prof); + else + usize = isalloc(ptr, config_prof); + + return (usize); +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_dallocx(void *ptr, int flags) +{ + tsd_t *tsd; + tcache_t *tcache; + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + tsd = tsd_fetch(); + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + tcache = NULL; + else + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } else + tcache = tcache_get(tsd, false); + + UTRACE(ptr, 0, 0); + ifree(tsd_fetch(), ptr, tcache); +} + +JEMALLOC_ALWAYS_INLINE_C size_t +inallocx(size_t size, int flags) +{ + size_t usize; + + if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) + usize = s2u(size); + else + usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); + assert(usize != 0); + return (usize); +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_sdallocx(void *ptr, size_t size, int flags) +{ + tsd_t *tsd; + tcache_t *tcache; + size_t usize; + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + usize = inallocx(size, flags); + assert(usize == isalloc(ptr, config_prof)); + + tsd = tsd_fetch(); + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) + tcache = NULL; + else + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } else + tcache = tcache_get(tsd, false); + + UTRACE(ptr, 0, 0); + isfree(tsd, ptr, usize, tcache); +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +JEMALLOC_ATTR(pure) +je_nallocx(size_t size, int flags) +{ + + assert(size != 0); + + if (unlikely(malloc_init())) + return (0); + + return (inallocx(size, flags)); +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) +{ + + if (unlikely(malloc_init())) + return (EAGAIN); + + return (ctl_byname(name, oldp, oldlenp, newp, newlen)); +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) +{ + + if (unlikely(malloc_init())) + return (EAGAIN); + + return (ctl_nametomib(name, mibp, miblenp)); +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + + if (unlikely(malloc_init())) + return (EAGAIN); + + return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) +{ + + stats_print(write_cb, cbopaque, opts); +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) +{ + size_t ret; + + assert(malloc_initialized() || IS_INITIALIZER); + malloc_thread_init(); + + if (config_ivsalloc) + ret = ivsalloc(ptr, config_prof); + else + ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof); + + return (ret); +} + +/* + * End non-standard functions. + */ +/******************************************************************************/ +/* + * The following functions are used by threading libraries for protection of + * malloc during fork(). + */ + +/* + * If an application creates a thread before doing any allocation in the main + * thread, then calls fork(2) in the main thread followed by memory allocation + * in the child process, a race can occur that results in deadlock within the + * child: the main thread may have forked while the created thread had + * partially initialized the allocator. Ordinarily jemalloc prevents + * fork/malloc races via the following functions it registers during + * initialization using pthread_atfork(), but of course that does no good if + * the allocator isn't fully initialized at fork time. The following library + * constructor is a partial solution to this problem. It may still be possible + * to trigger the deadlock described above, but doing so would involve forking + * via a library constructor that runs before jemalloc's runs. + */ +JEMALLOC_ATTR(constructor) +static void +jemalloc_constructor(void) +{ + + malloc_init(); +} + +#ifndef JEMALLOC_MUTEX_INIT_CB +void +jemalloc_prefork(void) +#else +JEMALLOC_EXPORT void +_malloc_prefork(void) +#endif +{ + unsigned i; + +#ifdef JEMALLOC_MUTEX_INIT_CB + if (!malloc_initialized()) + return; +#endif + assert(malloc_initialized()); + + /* Acquire all mutexes in a safe order. */ + ctl_prefork(); + prof_prefork(); + malloc_mutex_prefork(&arenas_lock); + for (i = 0; i < narenas_total; i++) { + if (arenas[i] != NULL) + arena_prefork(arenas[i]); + } + chunk_prefork(); + base_prefork(); +} + +#ifndef JEMALLOC_MUTEX_INIT_CB +void +jemalloc_postfork_parent(void) +#else +JEMALLOC_EXPORT void +_malloc_postfork(void) +#endif +{ + unsigned i; + +#ifdef JEMALLOC_MUTEX_INIT_CB + if (!malloc_initialized()) + return; +#endif + assert(malloc_initialized()); + + /* Release all mutexes, now that fork() has completed. */ + base_postfork_parent(); + chunk_postfork_parent(); + for (i = 0; i < narenas_total; i++) { + if (arenas[i] != NULL) + arena_postfork_parent(arenas[i]); + } + malloc_mutex_postfork_parent(&arenas_lock); + prof_postfork_parent(); + ctl_postfork_parent(); +} + +void +jemalloc_postfork_child(void) +{ + unsigned i; + + assert(malloc_initialized()); + + /* Release all mutexes, now that fork() has completed. */ + base_postfork_child(); + chunk_postfork_child(); + for (i = 0; i < narenas_total; i++) { + if (arenas[i] != NULL) + arena_postfork_child(arenas[i]); + } + malloc_mutex_postfork_child(&arenas_lock); + prof_postfork_child(); + ctl_postfork_child(); +} + +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/src/mb.c b/redis.submodule/jemalloc/src/mb.c new file mode 100644 index 0000000..dc2c0a2 --- /dev/null +++ b/redis.submodule/jemalloc/src/mb.c @@ -0,0 +1,2 @@ +#define JEMALLOC_MB_C_ +#include "jemalloc/internal/jemalloc_internal.h" diff --git a/redis.submodule/jemalloc/src/mutex.c b/redis.submodule/jemalloc/src/mutex.c new file mode 100644 index 0000000..2d47af9 --- /dev/null +++ b/redis.submodule/jemalloc/src/mutex.c @@ -0,0 +1,153 @@ +#define JEMALLOC_MUTEX_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) +#include +#endif + +#ifndef _CRT_SPINCOUNT +#define _CRT_SPINCOUNT 4000 +#endif + +/******************************************************************************/ +/* Data. */ + +#ifdef JEMALLOC_LAZY_LOCK +bool isthreaded = false; +#endif +#ifdef JEMALLOC_MUTEX_INIT_CB +static bool postpone_init = true; +static malloc_mutex_t *postponed_mutexes = NULL; +#endif + +#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) +static void pthread_create_once(void); +#endif + +/******************************************************************************/ +/* + * We intercept pthread_create() calls in order to toggle isthreaded if the + * process goes multi-threaded. + */ + +#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) +static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, + void *(*)(void *), void *__restrict); + +static void +pthread_create_once(void) +{ + + pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); + if (pthread_create_fptr == NULL) { + malloc_write(": Error in dlsym(RTLD_NEXT, " + "\"pthread_create\")\n"); + abort(); + } + + isthreaded = true; +} + +JEMALLOC_EXPORT int +pthread_create(pthread_t *__restrict thread, + const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), + void *__restrict arg) +{ + static pthread_once_t once_control = PTHREAD_ONCE_INIT; + + pthread_once(&once_control, pthread_create_once); + + return (pthread_create_fptr(thread, attr, start_routine, arg)); +} +#endif + +/******************************************************************************/ + +#ifdef JEMALLOC_MUTEX_INIT_CB +JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, + void *(calloc_cb)(size_t, size_t)); +#endif + +bool +malloc_mutex_init(malloc_mutex_t *mutex) +{ + +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + InitializeSRWLock(&mutex->lock); +# else + if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, + _CRT_SPINCOUNT)) + return (true); +# endif +#elif (defined(JEMALLOC_OSSPIN)) + mutex->lock = 0; +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) + if (postpone_init) { + mutex->postponed_next = postponed_mutexes; + postponed_mutexes = mutex; + } else { + if (_pthread_mutex_init_calloc_cb(&mutex->lock, + bootstrap_calloc) != 0) + return (true); + } +#else + pthread_mutexattr_t attr; + + if (pthread_mutexattr_init(&attr) != 0) + return (true); + pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); + if (pthread_mutex_init(&mutex->lock, &attr) != 0) { + pthread_mutexattr_destroy(&attr); + return (true); + } + pthread_mutexattr_destroy(&attr); +#endif + return (false); +} + +void +malloc_mutex_prefork(malloc_mutex_t *mutex) +{ + + malloc_mutex_lock(mutex); +} + +void +malloc_mutex_postfork_parent(malloc_mutex_t *mutex) +{ + + malloc_mutex_unlock(mutex); +} + +void +malloc_mutex_postfork_child(malloc_mutex_t *mutex) +{ + +#ifdef JEMALLOC_MUTEX_INIT_CB + malloc_mutex_unlock(mutex); +#else + if (malloc_mutex_init(mutex)) { + malloc_printf(": Error re-initializing mutex in " + "child\n"); + if (opt_abort) + abort(); + } +#endif +} + +bool +mutex_boot(void) +{ + +#ifdef JEMALLOC_MUTEX_INIT_CB + postpone_init = false; + while (postponed_mutexes != NULL) { + if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, + bootstrap_calloc) != 0) + return (true); + postponed_mutexes = postponed_mutexes->postponed_next; + } +#endif + return (false); +} diff --git a/redis.submodule/jemalloc/src/pages.c b/redis.submodule/jemalloc/src/pages.c new file mode 100644 index 0000000..83a167f --- /dev/null +++ b/redis.submodule/jemalloc/src/pages.c @@ -0,0 +1,173 @@ +#define JEMALLOC_PAGES_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ + +void * +pages_map(void *addr, size_t size) +{ + void *ret; + + assert(size != 0); + +#ifdef _WIN32 + /* + * If VirtualAlloc can't allocate at the given address when one is + * given, it fails and returns NULL. + */ + ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, + PAGE_READWRITE); +#else + /* + * We don't use MAP_FIXED here, because it can cause the *replacement* + * of existing mappings, and we only want to create new mappings. + */ + ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, + -1, 0); + assert(ret != NULL); + + if (ret == MAP_FAILED) + ret = NULL; + else if (addr != NULL && ret != addr) { + /* + * We succeeded in mapping memory, but not in the right place. + */ + pages_unmap(ret, size); + ret = NULL; + } +#endif + assert(ret == NULL || (addr == NULL && ret != addr) + || (addr != NULL && ret == addr)); + return (ret); +} + +void +pages_unmap(void *addr, size_t size) +{ + +#ifdef _WIN32 + if (VirtualFree(addr, 0, MEM_RELEASE) == 0) +#else + if (munmap(addr, size) == -1) +#endif + { + char buf[BUFERROR_BUF]; + + buferror(get_errno(), buf, sizeof(buf)); + malloc_printf(": Error in " +#ifdef _WIN32 + "VirtualFree" +#else + "munmap" +#endif + "(): %s\n", buf); + if (opt_abort) + abort(); + } +} + +void * +pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) +{ + void *ret = (void *)((uintptr_t)addr + leadsize); + + assert(alloc_size >= leadsize + size); +#ifdef _WIN32 + { + void *new_addr; + + pages_unmap(addr, alloc_size); + new_addr = pages_map(ret, size); + if (new_addr == ret) + return (ret); + if (new_addr) + pages_unmap(new_addr, size); + return (NULL); + } +#else + { + size_t trailsize = alloc_size - leadsize - size; + + if (leadsize != 0) + pages_unmap(addr, leadsize); + if (trailsize != 0) + pages_unmap((void *)((uintptr_t)ret + size), trailsize); + return (ret); + } +#endif +} + +static bool +pages_commit_impl(void *addr, size_t size, bool commit) +{ + +#ifndef _WIN32 + /* + * The following decommit/commit implementation is functional, but + * always disabled because it doesn't add value beyong improved + * debugging (at the cost of extra system calls) on systems that + * overcommit. + */ + if (false) { + int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE; + void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON | + MAP_FIXED, -1, 0); + if (result == MAP_FAILED) + return (true); + if (result != addr) { + /* + * We succeeded in mapping memory, but not in the right + * place. + */ + pages_unmap(result, size); + return (true); + } + return (false); + } +#endif + return (true); +} + +bool +pages_commit(void *addr, size_t size) +{ + + return (pages_commit_impl(addr, size, true)); +} + +bool +pages_decommit(void *addr, size_t size) +{ + + return (pages_commit_impl(addr, size, false)); +} + +bool +pages_purge(void *addr, size_t size) +{ + bool unzeroed; + +#ifdef _WIN32 + VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); + unzeroed = true; +#elif defined(JEMALLOC_HAVE_MADVISE) +# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED +# define JEMALLOC_MADV_PURGE MADV_DONTNEED +# define JEMALLOC_MADV_ZEROS true +# elif defined(JEMALLOC_PURGE_MADVISE_FREE) +# define JEMALLOC_MADV_PURGE MADV_FREE +# define JEMALLOC_MADV_ZEROS false +# else +# error "No madvise(2) flag defined for purging unused dirty pages." +# endif + int err = madvise(addr, size, JEMALLOC_MADV_PURGE); + unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0); +# undef JEMALLOC_MADV_PURGE +# undef JEMALLOC_MADV_ZEROS +#else + /* Last resort no-op. */ + unzeroed = true; +#endif + return (unzeroed); +} + diff --git a/redis.submodule/jemalloc/src/prof.c b/redis.submodule/jemalloc/src/prof.c new file mode 100644 index 0000000..5d2b959 --- /dev/null +++ b/redis.submodule/jemalloc/src/prof.c @@ -0,0 +1,2237 @@ +#define JEMALLOC_PROF_C_ +#include "jemalloc/internal/jemalloc_internal.h" +/******************************************************************************/ + +#ifdef JEMALLOC_PROF_LIBUNWIND +#define UNW_LOCAL_ONLY +#include +#endif + +#ifdef JEMALLOC_PROF_LIBGCC +#include +#endif + +/******************************************************************************/ +/* Data. */ + +bool opt_prof = false; +bool opt_prof_active = true; +bool opt_prof_thread_active_init = true; +size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; +ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; +bool opt_prof_gdump = false; +bool opt_prof_final = false; +bool opt_prof_leak = false; +bool opt_prof_accum = false; +char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* + * Initialized as opt_prof_active, and accessed via + * prof_active_[gs]et{_unlocked,}(). + */ +bool prof_active; +static malloc_mutex_t prof_active_mtx; + +/* + * Initialized as opt_prof_thread_active_init, and accessed via + * prof_thread_active_init_[gs]et(). + */ +static bool prof_thread_active_init; +static malloc_mutex_t prof_thread_active_init_mtx; + +/* + * Initialized as opt_prof_gdump, and accessed via + * prof_gdump_[gs]et{_unlocked,}(). + */ +bool prof_gdump_val; +static malloc_mutex_t prof_gdump_mtx; + +uint64_t prof_interval = 0; + +size_t lg_prof_sample; + +/* + * Table of mutexes that are shared among gctx's. These are leaf locks, so + * there is no problem with using them for more than one gctx at the same time. + * The primary motivation for this sharing though is that gctx's are ephemeral, + * and destroying mutexes causes complications for systems that allocate when + * creating/destroying mutexes. + */ +static malloc_mutex_t *gctx_locks; +static unsigned cum_gctxs; /* Atomic counter. */ + +/* + * Table of mutexes that are shared among tdata's. No operations require + * holding multiple tdata locks, so there is no problem with using them for more + * than one tdata at the same time, even though a gctx lock may be acquired + * while holding a tdata lock. + */ +static malloc_mutex_t *tdata_locks; + +/* + * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data + * structure that knows about all backtraces currently captured. + */ +static ckh_t bt2gctx; +static malloc_mutex_t bt2gctx_mtx; + +/* + * Tree of all extant prof_tdata_t structures, regardless of state, + * {attached,detached,expired}. + */ +static prof_tdata_tree_t tdatas; +static malloc_mutex_t tdatas_mtx; + +static uint64_t next_thr_uid; +static malloc_mutex_t next_thr_uid_mtx; + +static malloc_mutex_t prof_dump_seq_mtx; +static uint64_t prof_dump_seq; +static uint64_t prof_dump_iseq; +static uint64_t prof_dump_mseq; +static uint64_t prof_dump_useq; + +/* + * This buffer is rather large for stack allocation, so use a single buffer for + * all profile dumps. + */ +static malloc_mutex_t prof_dump_mtx; +static char prof_dump_buf[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PROF_DUMP_BUFSIZE +#else + 1 +#endif +]; +static unsigned prof_dump_buf_end; +static int prof_dump_fd; + +/* Do not dump any profiles until bootstrapping is complete. */ +static bool prof_booted = false; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static bool prof_tctx_should_destroy(prof_tctx_t *tctx); +static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); +static bool prof_tdata_should_destroy(prof_tdata_t *tdata, + bool even_if_attached); +static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached); +static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name); + +/******************************************************************************/ +/* Red-black trees. */ + +JEMALLOC_INLINE_C int +prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) +{ + uint64_t a_thr_uid = a->thr_uid; + uint64_t b_thr_uid = b->thr_uid; + int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); + if (ret == 0) { + uint64_t a_thr_discrim = a->thr_discrim; + uint64_t b_thr_discrim = b->thr_discrim; + ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < + b_thr_discrim); + if (ret == 0) { + uint64_t a_tctx_uid = a->tctx_uid; + uint64_t b_tctx_uid = b->tctx_uid; + ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < + b_tctx_uid); + } + } + return (ret); +} + +rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, + tctx_link, prof_tctx_comp) + +JEMALLOC_INLINE_C int +prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) +{ + unsigned a_len = a->bt.len; + unsigned b_len = b->bt.len; + unsigned comp_len = (a_len < b_len) ? a_len : b_len; + int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); + if (ret == 0) + ret = (a_len > b_len) - (a_len < b_len); + return (ret); +} + +rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, + prof_gctx_comp) + +JEMALLOC_INLINE_C int +prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) +{ + int ret; + uint64_t a_uid = a->thr_uid; + uint64_t b_uid = b->thr_uid; + + ret = ((a_uid > b_uid) - (a_uid < b_uid)); + if (ret == 0) { + uint64_t a_discrim = a->thr_discrim; + uint64_t b_discrim = b->thr_discrim; + + ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); + } + return (ret); +} + +rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, + prof_tdata_comp) + +/******************************************************************************/ + +void +prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) +{ + prof_tdata_t *tdata; + + cassert(config_prof); + + if (updated) { + /* + * Compute a new sample threshold. This isn't very important in + * practice, because this function is rarely executed, so the + * potential for sample bias is minimal except in contrived + * programs. + */ + tdata = prof_tdata_get(tsd, true); + if (tdata != NULL) + prof_sample_threshold_update(tdata); + } + + if ((uintptr_t)tctx > (uintptr_t)1U) { + malloc_mutex_lock(tctx->tdata->lock); + tctx->prepared = false; + if (prof_tctx_should_destroy(tctx)) + prof_tctx_destroy(tsd, tctx); + else + malloc_mutex_unlock(tctx->tdata->lock); + } +} + +void +prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) +{ + + prof_tctx_set(ptr, usize, tctx); + + malloc_mutex_lock(tctx->tdata->lock); + tctx->cnts.curobjs++; + tctx->cnts.curbytes += usize; + if (opt_prof_accum) { + tctx->cnts.accumobjs++; + tctx->cnts.accumbytes += usize; + } + tctx->prepared = false; + malloc_mutex_unlock(tctx->tdata->lock); +} + +void +prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) +{ + + malloc_mutex_lock(tctx->tdata->lock); + assert(tctx->cnts.curobjs > 0); + assert(tctx->cnts.curbytes >= usize); + tctx->cnts.curobjs--; + tctx->cnts.curbytes -= usize; + + if (prof_tctx_should_destroy(tctx)) + prof_tctx_destroy(tsd, tctx); + else + malloc_mutex_unlock(tctx->tdata->lock); +} + +void +bt_init(prof_bt_t *bt, void **vec) +{ + + cassert(config_prof); + + bt->vec = vec; + bt->len = 0; +} + +JEMALLOC_INLINE_C void +prof_enter(tsd_t *tsd, prof_tdata_t *tdata) +{ + + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + if (tdata != NULL) { + assert(!tdata->enq); + tdata->enq = true; + } + + malloc_mutex_lock(&bt2gctx_mtx); +} + +JEMALLOC_INLINE_C void +prof_leave(tsd_t *tsd, prof_tdata_t *tdata) +{ + + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + malloc_mutex_unlock(&bt2gctx_mtx); + + if (tdata != NULL) { + bool idump, gdump; + + assert(tdata->enq); + tdata->enq = false; + idump = tdata->enq_idump; + tdata->enq_idump = false; + gdump = tdata->enq_gdump; + tdata->enq_gdump = false; + + if (idump) + prof_idump(); + if (gdump) + prof_gdump(); + } +} + +#ifdef JEMALLOC_PROF_LIBUNWIND +void +prof_backtrace(prof_bt_t *bt) +{ + int nframes; + + cassert(config_prof); + assert(bt->len == 0); + assert(bt->vec != NULL); + + nframes = unw_backtrace(bt->vec, PROF_BT_MAX); + if (nframes <= 0) + return; + bt->len = nframes; +} +#elif (defined(JEMALLOC_PROF_LIBGCC)) +static _Unwind_Reason_Code +prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) +{ + + cassert(config_prof); + + return (_URC_NO_REASON); +} + +static _Unwind_Reason_Code +prof_unwind_callback(struct _Unwind_Context *context, void *arg) +{ + prof_unwind_data_t *data = (prof_unwind_data_t *)arg; + void *ip; + + cassert(config_prof); + + ip = (void *)_Unwind_GetIP(context); + if (ip == NULL) + return (_URC_END_OF_STACK); + data->bt->vec[data->bt->len] = ip; + data->bt->len++; + if (data->bt->len == data->max) + return (_URC_END_OF_STACK); + + return (_URC_NO_REASON); +} + +void +prof_backtrace(prof_bt_t *bt) +{ + prof_unwind_data_t data = {bt, PROF_BT_MAX}; + + cassert(config_prof); + + _Unwind_Backtrace(prof_unwind_callback, &data); +} +#elif (defined(JEMALLOC_PROF_GCC)) +void +prof_backtrace(prof_bt_t *bt) +{ +#define BT_FRAME(i) \ + if ((i) < PROF_BT_MAX) { \ + void *p; \ + if (__builtin_frame_address(i) == 0) \ + return; \ + p = __builtin_return_address(i); \ + if (p == NULL) \ + return; \ + bt->vec[(i)] = p; \ + bt->len = (i) + 1; \ + } else \ + return; + + cassert(config_prof); + + BT_FRAME(0) + BT_FRAME(1) + BT_FRAME(2) + BT_FRAME(3) + BT_FRAME(4) + BT_FRAME(5) + BT_FRAME(6) + BT_FRAME(7) + BT_FRAME(8) + BT_FRAME(9) + + BT_FRAME(10) + BT_FRAME(11) + BT_FRAME(12) + BT_FRAME(13) + BT_FRAME(14) + BT_FRAME(15) + BT_FRAME(16) + BT_FRAME(17) + BT_FRAME(18) + BT_FRAME(19) + + BT_FRAME(20) + BT_FRAME(21) + BT_FRAME(22) + BT_FRAME(23) + BT_FRAME(24) + BT_FRAME(25) + BT_FRAME(26) + BT_FRAME(27) + BT_FRAME(28) + BT_FRAME(29) + + BT_FRAME(30) + BT_FRAME(31) + BT_FRAME(32) + BT_FRAME(33) + BT_FRAME(34) + BT_FRAME(35) + BT_FRAME(36) + BT_FRAME(37) + BT_FRAME(38) + BT_FRAME(39) + + BT_FRAME(40) + BT_FRAME(41) + BT_FRAME(42) + BT_FRAME(43) + BT_FRAME(44) + BT_FRAME(45) + BT_FRAME(46) + BT_FRAME(47) + BT_FRAME(48) + BT_FRAME(49) + + BT_FRAME(50) + BT_FRAME(51) + BT_FRAME(52) + BT_FRAME(53) + BT_FRAME(54) + BT_FRAME(55) + BT_FRAME(56) + BT_FRAME(57) + BT_FRAME(58) + BT_FRAME(59) + + BT_FRAME(60) + BT_FRAME(61) + BT_FRAME(62) + BT_FRAME(63) + BT_FRAME(64) + BT_FRAME(65) + BT_FRAME(66) + BT_FRAME(67) + BT_FRAME(68) + BT_FRAME(69) + + BT_FRAME(70) + BT_FRAME(71) + BT_FRAME(72) + BT_FRAME(73) + BT_FRAME(74) + BT_FRAME(75) + BT_FRAME(76) + BT_FRAME(77) + BT_FRAME(78) + BT_FRAME(79) + + BT_FRAME(80) + BT_FRAME(81) + BT_FRAME(82) + BT_FRAME(83) + BT_FRAME(84) + BT_FRAME(85) + BT_FRAME(86) + BT_FRAME(87) + BT_FRAME(88) + BT_FRAME(89) + + BT_FRAME(90) + BT_FRAME(91) + BT_FRAME(92) + BT_FRAME(93) + BT_FRAME(94) + BT_FRAME(95) + BT_FRAME(96) + BT_FRAME(97) + BT_FRAME(98) + BT_FRAME(99) + + BT_FRAME(100) + BT_FRAME(101) + BT_FRAME(102) + BT_FRAME(103) + BT_FRAME(104) + BT_FRAME(105) + BT_FRAME(106) + BT_FRAME(107) + BT_FRAME(108) + BT_FRAME(109) + + BT_FRAME(110) + BT_FRAME(111) + BT_FRAME(112) + BT_FRAME(113) + BT_FRAME(114) + BT_FRAME(115) + BT_FRAME(116) + BT_FRAME(117) + BT_FRAME(118) + BT_FRAME(119) + + BT_FRAME(120) + BT_FRAME(121) + BT_FRAME(122) + BT_FRAME(123) + BT_FRAME(124) + BT_FRAME(125) + BT_FRAME(126) + BT_FRAME(127) +#undef BT_FRAME +} +#else +void +prof_backtrace(prof_bt_t *bt) +{ + + cassert(config_prof); + not_reached(); +} +#endif + +static malloc_mutex_t * +prof_gctx_mutex_choose(void) +{ + unsigned ngctxs = atomic_add_u(&cum_gctxs, 1); + + return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]); +} + +static malloc_mutex_t * +prof_tdata_mutex_choose(uint64_t thr_uid) +{ + + return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]); +} + +static prof_gctx_t * +prof_gctx_create(tsd_t *tsd, prof_bt_t *bt) +{ + /* + * Create a single allocation that has space for vec of length bt->len. + */ + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t, + vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true), + true, NULL); + if (gctx == NULL) + return (NULL); + gctx->lock = prof_gctx_mutex_choose(); + /* + * Set nlimbo to 1, in order to avoid a race condition with + * prof_tctx_destroy()/prof_gctx_try_destroy(). + */ + gctx->nlimbo = 1; + tctx_tree_new(&gctx->tctxs); + /* Duplicate bt. */ + memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); + gctx->bt.vec = gctx->vec; + gctx->bt.len = bt->len; + return (gctx); +} + +static void +prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, + prof_tdata_t *tdata) +{ + + cassert(config_prof); + + /* + * Check that gctx is still unused by any thread cache before destroying + * it. prof_lookup() increments gctx->nlimbo in order to avoid a race + * condition with this function, as does prof_tctx_destroy() in order to + * avoid a race between the main body of prof_tctx_destroy() and entry + * into this function. + */ + prof_enter(tsd, tdata_self); + malloc_mutex_lock(gctx->lock); + assert(gctx->nlimbo != 0); + if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { + /* Remove gctx from bt2gctx. */ + if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) + not_reached(); + prof_leave(tsd, tdata_self); + /* Destroy gctx. */ + malloc_mutex_unlock(gctx->lock); + idalloctm(tsd, gctx, tcache_get(tsd, false), true); + } else { + /* + * Compensate for increment in prof_tctx_destroy() or + * prof_lookup(). + */ + gctx->nlimbo--; + malloc_mutex_unlock(gctx->lock); + prof_leave(tsd, tdata_self); + } +} + +/* tctx->tdata->lock must be held. */ +static bool +prof_tctx_should_destroy(prof_tctx_t *tctx) +{ + + if (opt_prof_accum) + return (false); + if (tctx->cnts.curobjs != 0) + return (false); + if (tctx->prepared) + return (false); + return (true); +} + +static bool +prof_gctx_should_destroy(prof_gctx_t *gctx) +{ + + if (opt_prof_accum) + return (false); + if (!tctx_tree_empty(&gctx->tctxs)) + return (false); + if (gctx->nlimbo != 0) + return (false); + return (true); +} + +/* tctx->tdata->lock is held upon entry, and released before return. */ +static void +prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) +{ + prof_tdata_t *tdata = tctx->tdata; + prof_gctx_t *gctx = tctx->gctx; + bool destroy_tdata, destroy_tctx, destroy_gctx; + + assert(tctx->cnts.curobjs == 0); + assert(tctx->cnts.curbytes == 0); + assert(!opt_prof_accum); + assert(tctx->cnts.accumobjs == 0); + assert(tctx->cnts.accumbytes == 0); + + ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); + destroy_tdata = prof_tdata_should_destroy(tdata, false); + malloc_mutex_unlock(tdata->lock); + + malloc_mutex_lock(gctx->lock); + switch (tctx->state) { + case prof_tctx_state_nominal: + tctx_tree_remove(&gctx->tctxs, tctx); + destroy_tctx = true; + if (prof_gctx_should_destroy(gctx)) { + /* + * Increment gctx->nlimbo in order to keep another + * thread from winning the race to destroy gctx while + * this one has gctx->lock dropped. Without this, it + * would be possible for another thread to: + * + * 1) Sample an allocation associated with gctx. + * 2) Deallocate the sampled object. + * 3) Successfully prof_gctx_try_destroy(gctx). + * + * The result would be that gctx no longer exists by the + * time this thread accesses it in + * prof_gctx_try_destroy(). + */ + gctx->nlimbo++; + destroy_gctx = true; + } else + destroy_gctx = false; + break; + case prof_tctx_state_dumping: + /* + * A dumping thread needs tctx to remain valid until dumping + * has finished. Change state such that the dumping thread will + * complete destruction during a late dump iteration phase. + */ + tctx->state = prof_tctx_state_purgatory; + destroy_tctx = false; + destroy_gctx = false; + break; + default: + not_reached(); + destroy_tctx = false; + destroy_gctx = false; + } + malloc_mutex_unlock(gctx->lock); + if (destroy_gctx) { + prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, + tdata); + } + + if (destroy_tdata) + prof_tdata_destroy(tsd, tdata, false); + + if (destroy_tctx) + idalloctm(tsd, tctx, tcache_get(tsd, false), true); +} + +static bool +prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, + void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) +{ + union { + prof_gctx_t *p; + void *v; + } gctx; + union { + prof_bt_t *p; + void *v; + } btkey; + bool new_gctx; + + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + /* bt has never been seen before. Insert it. */ + gctx.p = prof_gctx_create(tsd, bt); + if (gctx.v == NULL) { + prof_leave(tsd, tdata); + return (true); + } + btkey.p = &gctx.p->bt; + if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { + /* OOM. */ + prof_leave(tsd, tdata); + idalloctm(tsd, gctx.v, tcache_get(tsd, false), true); + return (true); + } + new_gctx = true; + } else { + /* + * Increment nlimbo, in order to avoid a race condition with + * prof_tctx_destroy()/prof_gctx_try_destroy(). + */ + malloc_mutex_lock(gctx.p->lock); + gctx.p->nlimbo++; + malloc_mutex_unlock(gctx.p->lock); + new_gctx = false; + } + prof_leave(tsd, tdata); + + *p_btkey = btkey.v; + *p_gctx = gctx.p; + *p_new_gctx = new_gctx; + return (false); +} + +prof_tctx_t * +prof_lookup(tsd_t *tsd, prof_bt_t *bt) +{ + union { + prof_tctx_t *p; + void *v; + } ret; + prof_tdata_t *tdata; + bool not_found; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) + return (NULL); + + malloc_mutex_lock(tdata->lock); + not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); + if (!not_found) /* Note double negative! */ + ret.p->prepared = true; + malloc_mutex_unlock(tdata->lock); + if (not_found) { + tcache_t *tcache; + void *btkey; + prof_gctx_t *gctx; + bool new_gctx, error; + + /* + * This thread's cache lacks bt. Look for it in the global + * cache. + */ + if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, + &new_gctx)) + return (NULL); + + /* Link a prof_tctx_t into gctx for this thread. */ + tcache = tcache_get(tsd, true); + ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true, + NULL); + if (ret.p == NULL) { + if (new_gctx) + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + return (NULL); + } + ret.p->tdata = tdata; + ret.p->thr_uid = tdata->thr_uid; + ret.p->thr_discrim = tdata->thr_discrim; + memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); + ret.p->gctx = gctx; + ret.p->tctx_uid = tdata->tctx_uid_next++; + ret.p->prepared = true; + ret.p->state = prof_tctx_state_initializing; + malloc_mutex_lock(tdata->lock); + error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); + malloc_mutex_unlock(tdata->lock); + if (error) { + if (new_gctx) + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + idalloctm(tsd, ret.v, tcache, true); + return (NULL); + } + malloc_mutex_lock(gctx->lock); + ret.p->state = prof_tctx_state_nominal; + tctx_tree_insert(&gctx->tctxs, ret.p); + gctx->nlimbo--; + malloc_mutex_unlock(gctx->lock); + } + + return (ret.p); +} + +void +prof_sample_threshold_update(prof_tdata_t *tdata) +{ + /* + * The body of this function is compiled out unless heap profiling is + * enabled, so that it is possible to compile jemalloc with floating + * point support completely disabled. Avoiding floating point code is + * important on memory-constrained systems, but it also enables a + * workaround for versions of glibc that don't properly save/restore + * floating point registers during dynamic lazy symbol loading (which + * internally calls into whatever malloc implementation happens to be + * integrated into the application). Note that some compilers (e.g. + * gcc 4.8) may use floating point registers for fast memory moves, so + * jemalloc must be compiled with such optimizations disabled (e.g. + * -mno-sse) in order for the workaround to be complete. + */ +#ifdef JEMALLOC_PROF + uint64_t r; + double u; + + if (!config_prof) + return; + + if (lg_prof_sample == 0) { + tdata->bytes_until_sample = 0; + return; + } + + /* + * Compute sample interval as a geometrically distributed random + * variable with mean (2^lg_prof_sample). + * + * __ __ + * | log(u) | 1 + * tdata->bytes_until_sample = | -------- |, where p = --------------- + * | log(1-p) | lg_prof_sample + * 2 + * + * For more information on the math, see: + * + * Non-Uniform Random Variate Generation + * Luc Devroye + * Springer-Verlag, New York, 1986 + * pp 500 + * (http://luc.devroye.org/rnbookindex.html) + */ + prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005), + UINT64_C(1442695040888963407)); + u = (double)r * (1.0/9007199254740992.0L); + tdata->bytes_until_sample = (uint64_t)(log(u) / + log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + + (uint64_t)1U; +#endif +} + +#ifdef JEMALLOC_JET +static prof_tdata_t * +prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) +{ + size_t *tdata_count = (size_t *)arg; + + (*tdata_count)++; + + return (NULL); +} + +size_t +prof_tdata_count(void) +{ + size_t tdata_count = 0; + + malloc_mutex_lock(&tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, + (void *)&tdata_count); + malloc_mutex_unlock(&tdatas_mtx); + + return (tdata_count); +} +#endif + +#ifdef JEMALLOC_JET +size_t +prof_bt_count(void) +{ + size_t bt_count; + tsd_t *tsd; + prof_tdata_t *tdata; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) + return (0); + + malloc_mutex_lock(&bt2gctx_mtx); + bt_count = ckh_count(&bt2gctx); + malloc_mutex_unlock(&bt2gctx_mtx); + + return (bt_count); +} +#endif + +#ifdef JEMALLOC_JET +#undef prof_dump_open +#define prof_dump_open JEMALLOC_N(prof_dump_open_impl) +#endif +static int +prof_dump_open(bool propagate_err, const char *filename) +{ + int fd; + + fd = creat(filename, 0644); + if (fd == -1 && !propagate_err) { + malloc_printf(": creat(\"%s\"), 0644) failed\n", + filename); + if (opt_abort) + abort(); + } + + return (fd); +} +#ifdef JEMALLOC_JET +#undef prof_dump_open +#define prof_dump_open JEMALLOC_N(prof_dump_open) +prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl); +#endif + +static bool +prof_dump_flush(bool propagate_err) +{ + bool ret = false; + ssize_t err; + + cassert(config_prof); + + err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); + if (err == -1) { + if (!propagate_err) { + malloc_write(": write() failed during heap " + "profile flush\n"); + if (opt_abort) + abort(); + } + ret = true; + } + prof_dump_buf_end = 0; + + return (ret); +} + +static bool +prof_dump_close(bool propagate_err) +{ + bool ret; + + assert(prof_dump_fd != -1); + ret = prof_dump_flush(propagate_err); + close(prof_dump_fd); + prof_dump_fd = -1; + + return (ret); +} + +static bool +prof_dump_write(bool propagate_err, const char *s) +{ + unsigned i, slen, n; + + cassert(config_prof); + + i = 0; + slen = strlen(s); + while (i < slen) { + /* Flush the buffer if it is full. */ + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) + if (prof_dump_flush(propagate_err) && propagate_err) + return (true); + + if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { + /* Finish writing. */ + n = slen - i; + } else { + /* Write as much of s as will fit. */ + n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; + } + memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); + prof_dump_buf_end += n; + i += n; + } + + return (false); +} + +JEMALLOC_FORMAT_PRINTF(2, 3) +static bool +prof_dump_printf(bool propagate_err, const char *format, ...) +{ + bool ret; + va_list ap; + char buf[PROF_PRINTF_BUFSIZE]; + + va_start(ap, format); + malloc_vsnprintf(buf, sizeof(buf), format, ap); + va_end(ap); + ret = prof_dump_write(propagate_err, buf); + + return (ret); +} + +/* tctx->tdata->lock is held. */ +static void +prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata) +{ + + malloc_mutex_lock(tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + malloc_mutex_unlock(tctx->gctx->lock); + return; + case prof_tctx_state_nominal: + tctx->state = prof_tctx_state_dumping; + malloc_mutex_unlock(tctx->gctx->lock); + + memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); + + tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + tdata->cnt_summed.accumobjs += + tctx->dump_cnts.accumobjs; + tdata->cnt_summed.accumbytes += + tctx->dump_cnts.accumbytes; + } + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + not_reached(); + } +} + +/* gctx->lock is held. */ +static void +prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx) +{ + + gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; + gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; + } +} + +/* tctx->gctx is held. */ +static prof_tctx_t * +prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) +{ + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + prof_tctx_merge_gctx(tctx, tctx->gctx); + break; + default: + not_reached(); + } + + return (NULL); +} + +/* gctx->lock is held. */ +static prof_tctx_t * +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) +{ + bool propagate_err = *(bool *)arg; + + switch (tctx->state) { + case prof_tctx_state_initializing: + case prof_tctx_state_nominal: + /* Not captured by this dump. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + if (prof_dump_printf(propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " + "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, + tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, + tctx->dump_cnts.accumbytes)) + return (tctx); + break; + default: + not_reached(); + } + return (NULL); +} + +/* tctx->gctx is held. */ +static prof_tctx_t * +prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) +{ + prof_tctx_t *ret; + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + tctx->state = prof_tctx_state_nominal; + break; + case prof_tctx_state_purgatory: + ret = tctx; + goto label_return; + default: + not_reached(); + } + + ret = NULL; +label_return: + return (ret); +} + +static void +prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) +{ + + cassert(config_prof); + + malloc_mutex_lock(gctx->lock); + + /* + * Increment nlimbo so that gctx won't go away before dump. + * Additionally, link gctx into the dump list so that it is included in + * prof_dump()'s second pass. + */ + gctx->nlimbo++; + gctx_tree_insert(gctxs, gctx); + + memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); + + malloc_mutex_unlock(gctx->lock); +} + +static prof_gctx_t * +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) +{ + size_t *leak_ngctx = (size_t *)arg; + + malloc_mutex_lock(gctx->lock); + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL); + if (gctx->cnt_summed.curobjs != 0) + (*leak_ngctx)++; + malloc_mutex_unlock(gctx->lock); + + return (NULL); +} + +static void +prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) +{ + prof_tdata_t *tdata = prof_tdata_get(tsd, false); + prof_gctx_t *gctx; + + /* + * Standard tree iteration won't work here, because as soon as we + * decrement gctx->nlimbo and unlock gctx, another thread can + * concurrently destroy it, which will corrupt the tree. Therefore, + * tear down the tree one node at a time during iteration. + */ + while ((gctx = gctx_tree_first(gctxs)) != NULL) { + gctx_tree_remove(gctxs, gctx); + malloc_mutex_lock(gctx->lock); + { + prof_tctx_t *next; + + next = NULL; + do { + prof_tctx_t *to_destroy = + tctx_tree_iter(&gctx->tctxs, next, + prof_tctx_finish_iter, NULL); + if (to_destroy != NULL) { + next = tctx_tree_next(&gctx->tctxs, + to_destroy); + tctx_tree_remove(&gctx->tctxs, + to_destroy); + idalloctm(tsd, to_destroy, + tcache_get(tsd, false), true); + } else + next = NULL; + } while (next != NULL); + } + gctx->nlimbo--; + if (prof_gctx_should_destroy(gctx)) { + gctx->nlimbo++; + malloc_mutex_unlock(gctx->lock); + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } else + malloc_mutex_unlock(gctx->lock); + } +} + +static prof_tdata_t * +prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) +{ + prof_cnt_t *cnt_all = (prof_cnt_t *)arg; + + malloc_mutex_lock(tdata->lock); + if (!tdata->expired) { + size_t tabind; + union { + prof_tctx_t *p; + void *v; + } tctx; + + tdata->dumping = true; + memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); + for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, + &tctx.v);) + prof_tctx_merge_tdata(tctx.p, tdata); + + cnt_all->curobjs += tdata->cnt_summed.curobjs; + cnt_all->curbytes += tdata->cnt_summed.curbytes; + if (opt_prof_accum) { + cnt_all->accumobjs += tdata->cnt_summed.accumobjs; + cnt_all->accumbytes += tdata->cnt_summed.accumbytes; + } + } else + tdata->dumping = false; + malloc_mutex_unlock(tdata->lock); + + return (NULL); +} + +static prof_tdata_t * +prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) +{ + bool propagate_err = *(bool *)arg; + + if (!tdata->dumping) + return (NULL); + + if (prof_dump_printf(propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", + tdata->thr_uid, tdata->cnt_summed.curobjs, + tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, + tdata->cnt_summed.accumbytes, + (tdata->thread_name != NULL) ? " " : "", + (tdata->thread_name != NULL) ? tdata->thread_name : "")) + return (tdata); + return (NULL); +} + +#ifdef JEMALLOC_JET +#undef prof_dump_header +#define prof_dump_header JEMALLOC_N(prof_dump_header_impl) +#endif +static bool +prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) +{ + bool ret; + + if (prof_dump_printf(propagate_err, + "heap_v2/%"FMTu64"\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, + cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) + return (true); + + malloc_mutex_lock(&tdatas_mtx); + ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, + (void *)&propagate_err) != NULL); + malloc_mutex_unlock(&tdatas_mtx); + return (ret); +} +#ifdef JEMALLOC_JET +#undef prof_dump_header +#define prof_dump_header JEMALLOC_N(prof_dump_header) +prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl); +#endif + +/* gctx->lock is held. */ +static bool +prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, + prof_gctx_tree_t *gctxs) +{ + bool ret; + unsigned i; + + cassert(config_prof); + + /* Avoid dumping such gctx's that have no useful data. */ + if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || + (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { + assert(gctx->cnt_summed.curobjs == 0); + assert(gctx->cnt_summed.curbytes == 0); + assert(gctx->cnt_summed.accumobjs == 0); + assert(gctx->cnt_summed.accumbytes == 0); + ret = false; + goto label_return; + } + + if (prof_dump_printf(propagate_err, "@")) { + ret = true; + goto label_return; + } + for (i = 0; i < bt->len; i++) { + if (prof_dump_printf(propagate_err, " %#"FMTxPTR, + (uintptr_t)bt->vec[i])) { + ret = true; + goto label_return; + } + } + + if (prof_dump_printf(propagate_err, + "\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, + gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { + ret = true; + goto label_return; + } + + if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, + (void *)&propagate_err) != NULL) { + ret = true; + goto label_return; + } + + ret = false; +label_return: + return (ret); +} + +JEMALLOC_FORMAT_PRINTF(1, 2) +static int +prof_open_maps(const char *format, ...) +{ + int mfd; + va_list ap; + char filename[PATH_MAX + 1]; + + va_start(ap, format); + malloc_vsnprintf(filename, sizeof(filename), format, ap); + va_end(ap); + mfd = open(filename, O_RDONLY); + + return (mfd); +} + +static bool +prof_dump_maps(bool propagate_err) +{ + bool ret; + int mfd; + + cassert(config_prof); +#ifdef __FreeBSD__ + mfd = prof_open_maps("/proc/curproc/map"); +#else + { + int pid = getpid(); + + mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); + if (mfd == -1) + mfd = prof_open_maps("/proc/%d/maps", pid); + } +#endif + if (mfd != -1) { + ssize_t nread; + + if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && + propagate_err) { + ret = true; + goto label_return; + } + nread = 0; + do { + prof_dump_buf_end += nread; + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + /* Make space in prof_dump_buf before read(). */ + if (prof_dump_flush(propagate_err) && + propagate_err) { + ret = true; + goto label_return; + } + } + nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], + PROF_DUMP_BUFSIZE - prof_dump_buf_end); + } while (nread > 0); + } else { + ret = true; + goto label_return; + } + + ret = false; +label_return: + if (mfd != -1) + close(mfd); + return (ret); +} + +static void +prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, + const char *filename) +{ + + if (cnt_all->curbytes != 0) { + malloc_printf(": Leak summary: %"FMTu64" byte%s, %" + FMTu64" object%s, %zu context%s\n", + cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "", + cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "", + leak_ngctx, (leak_ngctx != 1) ? "s" : ""); + malloc_printf( + ": Run jeprof on \"%s\" for leak detail\n", + filename); + } +} + +static prof_gctx_t * +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) +{ + prof_gctx_t *ret; + bool propagate_err = *(bool *)arg; + + malloc_mutex_lock(gctx->lock); + + if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) { + ret = gctx; + goto label_return; + } + + ret = NULL; +label_return: + malloc_mutex_unlock(gctx->lock); + return (ret); +} + +static bool +prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) +{ + prof_tdata_t *tdata; + prof_cnt_t cnt_all; + size_t tabind; + union { + prof_gctx_t *p; + void *v; + } gctx; + size_t leak_ngctx; + prof_gctx_tree_t gctxs; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) + return (true); + + malloc_mutex_lock(&prof_dump_mtx); + prof_enter(tsd, tdata); + + /* + * Put gctx's in limbo and clear their counters in preparation for + * summing. + */ + gctx_tree_new(&gctxs); + for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) + prof_dump_gctx_prep(gctx.p, &gctxs); + + /* + * Iterate over tdatas, and for the non-expired ones snapshot their tctx + * stats and merge them into the associated gctx's. + */ + memset(&cnt_all, 0, sizeof(prof_cnt_t)); + malloc_mutex_lock(&tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all); + malloc_mutex_unlock(&tdatas_mtx); + + /* Merge tctx stats into gctx's. */ + leak_ngctx = 0; + gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx); + + prof_leave(tsd, tdata); + + /* Create dump file. */ + if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) + goto label_open_close_error; + + /* Dump profile header. */ + if (prof_dump_header(propagate_err, &cnt_all)) + goto label_write_error; + + /* Dump per gctx profile stats. */ + if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, + (void *)&propagate_err) != NULL) + goto label_write_error; + + /* Dump /proc//maps if possible. */ + if (prof_dump_maps(propagate_err)) + goto label_write_error; + + if (prof_dump_close(propagate_err)) + goto label_open_close_error; + + prof_gctx_finish(tsd, &gctxs); + malloc_mutex_unlock(&prof_dump_mtx); + + if (leakcheck) + prof_leakcheck(&cnt_all, leak_ngctx, filename); + + return (false); +label_write_error: + prof_dump_close(propagate_err); +label_open_close_error: + prof_gctx_finish(tsd, &gctxs); + malloc_mutex_unlock(&prof_dump_mtx); + return (true); +} + +#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) +#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) +static void +prof_dump_filename(char *filename, char v, uint64_t vseq) +{ + + cassert(config_prof); + + if (vseq != VSEQ_INVALID) { + /* "...v.heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"FMTu64".%c%"FMTu64".heap", + opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq); + } else { + /* "....heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"FMTu64".%c.heap", + opt_prof_prefix, (int)getpid(), prof_dump_seq, v); + } + prof_dump_seq++; +} + +static void +prof_fdump(void) +{ + tsd_t *tsd; + char filename[DUMP_FILENAME_BUFSIZE]; + + cassert(config_prof); + assert(opt_prof_final); + assert(opt_prof_prefix[0] != '\0'); + + if (!prof_booted) + return; + tsd = tsd_fetch(); + + malloc_mutex_lock(&prof_dump_seq_mtx); + prof_dump_filename(filename, 'f', VSEQ_INVALID); + malloc_mutex_unlock(&prof_dump_seq_mtx); + prof_dump(tsd, false, filename, opt_prof_leak); +} + +void +prof_idump(void) +{ + tsd_t *tsd; + prof_tdata_t *tdata; + + cassert(config_prof); + + if (!prof_booted) + return; + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) + return; + if (tdata->enq) { + tdata->enq_idump = true; + return; + } + + if (opt_prof_prefix[0] != '\0') { + char filename[PATH_MAX + 1]; + malloc_mutex_lock(&prof_dump_seq_mtx); + prof_dump_filename(filename, 'i', prof_dump_iseq); + prof_dump_iseq++; + malloc_mutex_unlock(&prof_dump_seq_mtx); + prof_dump(tsd, false, filename, false); + } +} + +bool +prof_mdump(const char *filename) +{ + tsd_t *tsd; + char filename_buf[DUMP_FILENAME_BUFSIZE]; + + cassert(config_prof); + + if (!opt_prof || !prof_booted) + return (true); + tsd = tsd_fetch(); + + if (filename == NULL) { + /* No filename specified, so automatically generate one. */ + if (opt_prof_prefix[0] == '\0') + return (true); + malloc_mutex_lock(&prof_dump_seq_mtx); + prof_dump_filename(filename_buf, 'm', prof_dump_mseq); + prof_dump_mseq++; + malloc_mutex_unlock(&prof_dump_seq_mtx); + filename = filename_buf; + } + return (prof_dump(tsd, true, filename, false)); +} + +void +prof_gdump(void) +{ + tsd_t *tsd; + prof_tdata_t *tdata; + + cassert(config_prof); + + if (!prof_booted) + return; + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) + return; + if (tdata->enq) { + tdata->enq_gdump = true; + return; + } + + if (opt_prof_prefix[0] != '\0') { + char filename[DUMP_FILENAME_BUFSIZE]; + malloc_mutex_lock(&prof_dump_seq_mtx); + prof_dump_filename(filename, 'u', prof_dump_useq); + prof_dump_useq++; + malloc_mutex_unlock(&prof_dump_seq_mtx); + prof_dump(tsd, false, filename, false); + } +} + +static void +prof_bt_hash(const void *key, size_t r_hash[2]) +{ + prof_bt_t *bt = (prof_bt_t *)key; + + cassert(config_prof); + + hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash); +} + +static bool +prof_bt_keycomp(const void *k1, const void *k2) +{ + const prof_bt_t *bt1 = (prof_bt_t *)k1; + const prof_bt_t *bt2 = (prof_bt_t *)k2; + + cassert(config_prof); + + if (bt1->len != bt2->len) + return (false); + return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); +} + +JEMALLOC_INLINE_C uint64_t +prof_thr_uid_alloc(void) +{ + uint64_t thr_uid; + + malloc_mutex_lock(&next_thr_uid_mtx); + thr_uid = next_thr_uid; + next_thr_uid++; + malloc_mutex_unlock(&next_thr_uid_mtx); + + return (thr_uid); +} + +static prof_tdata_t * +prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, + char *thread_name, bool active) +{ + prof_tdata_t *tdata; + tcache_t *tcache; + + cassert(config_prof); + + /* Initialize an empty cache for this thread. */ + tcache = tcache_get(tsd, true); + tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false, + tcache, true, NULL); + if (tdata == NULL) + return (NULL); + + tdata->lock = prof_tdata_mutex_choose(thr_uid); + tdata->thr_uid = thr_uid; + tdata->thr_discrim = thr_discrim; + tdata->thread_name = thread_name; + tdata->attached = true; + tdata->expired = false; + tdata->tctx_uid_next = 0; + + if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, + prof_bt_hash, prof_bt_keycomp)) { + idalloctm(tsd, tdata, tcache, true); + return (NULL); + } + + tdata->prng_state = (uint64_t)(uintptr_t)tdata; + prof_sample_threshold_update(tdata); + + tdata->enq = false; + tdata->enq_idump = false; + tdata->enq_gdump = false; + + tdata->dumping = false; + tdata->active = active; + + malloc_mutex_lock(&tdatas_mtx); + tdata_tree_insert(&tdatas, tdata); + malloc_mutex_unlock(&tdatas_mtx); + + return (tdata); +} + +prof_tdata_t * +prof_tdata_init(tsd_t *tsd) +{ + + return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL, + prof_thread_active_init_get())); +} + +/* tdata->lock must be held. */ +static bool +prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached) +{ + + if (tdata->attached && !even_if_attached) + return (false); + if (ckh_count(&tdata->bt2tctx) != 0) + return (false); + return (true); +} + +/* tdatas_mtx must be held. */ +static void +prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached) +{ + tcache_t *tcache; + + assert(prof_tdata_should_destroy(tdata, even_if_attached)); + assert(tsd_prof_tdata_get(tsd) != tdata); + + tdata_tree_remove(&tdatas, tdata); + + tcache = tcache_get(tsd, false); + if (tdata->thread_name != NULL) + idalloctm(tsd, tdata->thread_name, tcache, true); + ckh_delete(tsd, &tdata->bt2tctx); + idalloctm(tsd, tdata, tcache, true); +} + +static void +prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) +{ + + malloc_mutex_lock(&tdatas_mtx); + prof_tdata_destroy_locked(tsd, tdata, even_if_attached); + malloc_mutex_unlock(&tdatas_mtx); +} + +static void +prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) +{ + bool destroy_tdata; + + malloc_mutex_lock(tdata->lock); + if (tdata->attached) { + destroy_tdata = prof_tdata_should_destroy(tdata, true); + /* + * Only detach if !destroy_tdata, because detaching would allow + * another thread to win the race to destroy tdata. + */ + if (!destroy_tdata) + tdata->attached = false; + tsd_prof_tdata_set(tsd, NULL); + } else + destroy_tdata = false; + malloc_mutex_unlock(tdata->lock); + if (destroy_tdata) + prof_tdata_destroy(tsd, tdata, true); +} + +prof_tdata_t * +prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) +{ + uint64_t thr_uid = tdata->thr_uid; + uint64_t thr_discrim = tdata->thr_discrim + 1; + char *thread_name = (tdata->thread_name != NULL) ? + prof_thread_name_alloc(tsd, tdata->thread_name) : NULL; + bool active = tdata->active; + + prof_tdata_detach(tsd, tdata); + return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, + active)); +} + +static bool +prof_tdata_expire(prof_tdata_t *tdata) +{ + bool destroy_tdata; + + malloc_mutex_lock(tdata->lock); + if (!tdata->expired) { + tdata->expired = true; + destroy_tdata = tdata->attached ? false : + prof_tdata_should_destroy(tdata, false); + } else + destroy_tdata = false; + malloc_mutex_unlock(tdata->lock); + + return (destroy_tdata); +} + +static prof_tdata_t * +prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) +{ + + return (prof_tdata_expire(tdata) ? tdata : NULL); +} + +void +prof_reset(tsd_t *tsd, size_t lg_sample) +{ + prof_tdata_t *next; + + assert(lg_sample < (sizeof(uint64_t) << 3)); + + malloc_mutex_lock(&prof_dump_mtx); + malloc_mutex_lock(&tdatas_mtx); + + lg_prof_sample = lg_sample; + + next = NULL; + do { + prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, + prof_tdata_reset_iter, NULL); + if (to_destroy != NULL) { + next = tdata_tree_next(&tdatas, to_destroy); + prof_tdata_destroy_locked(tsd, to_destroy, false); + } else + next = NULL; + } while (next != NULL); + + malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(&prof_dump_mtx); +} + +void +prof_tdata_cleanup(tsd_t *tsd) +{ + prof_tdata_t *tdata; + + if (!config_prof) + return; + + tdata = tsd_prof_tdata_get(tsd); + if (tdata != NULL) + prof_tdata_detach(tsd, tdata); +} + +bool +prof_active_get(void) +{ + bool prof_active_current; + + malloc_mutex_lock(&prof_active_mtx); + prof_active_current = prof_active; + malloc_mutex_unlock(&prof_active_mtx); + return (prof_active_current); +} + +bool +prof_active_set(bool active) +{ + bool prof_active_old; + + malloc_mutex_lock(&prof_active_mtx); + prof_active_old = prof_active; + prof_active = active; + malloc_mutex_unlock(&prof_active_mtx); + return (prof_active_old); +} + +const char * +prof_thread_name_get(void) +{ + tsd_t *tsd; + prof_tdata_t *tdata; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) + return (""); + return (tdata->thread_name != NULL ? tdata->thread_name : ""); +} + +static char * +prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) +{ + char *ret; + size_t size; + + if (thread_name == NULL) + return (NULL); + + size = strlen(thread_name) + 1; + if (size == 1) + return (""); + + ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL); + if (ret == NULL) + return (NULL); + memcpy(ret, thread_name, size); + return (ret); +} + +int +prof_thread_name_set(tsd_t *tsd, const char *thread_name) +{ + prof_tdata_t *tdata; + unsigned i; + char *s; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) + return (EAGAIN); + + /* Validate input. */ + if (thread_name == NULL) + return (EFAULT); + for (i = 0; thread_name[i] != '\0'; i++) { + char c = thread_name[i]; + if (!isgraph(c) && !isblank(c)) + return (EFAULT); + } + + s = prof_thread_name_alloc(tsd, thread_name); + if (s == NULL) + return (EAGAIN); + + if (tdata->thread_name != NULL) { + idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false), + true); + tdata->thread_name = NULL; + } + if (strlen(s) > 0) + tdata->thread_name = s; + return (0); +} + +bool +prof_thread_active_get(void) +{ + tsd_t *tsd; + prof_tdata_t *tdata; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) + return (false); + return (tdata->active); +} + +bool +prof_thread_active_set(bool active) +{ + tsd_t *tsd; + prof_tdata_t *tdata; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) + return (true); + tdata->active = active; + return (false); +} + +bool +prof_thread_active_init_get(void) +{ + bool active_init; + + malloc_mutex_lock(&prof_thread_active_init_mtx); + active_init = prof_thread_active_init; + malloc_mutex_unlock(&prof_thread_active_init_mtx); + return (active_init); +} + +bool +prof_thread_active_init_set(bool active_init) +{ + bool active_init_old; + + malloc_mutex_lock(&prof_thread_active_init_mtx); + active_init_old = prof_thread_active_init; + prof_thread_active_init = active_init; + malloc_mutex_unlock(&prof_thread_active_init_mtx); + return (active_init_old); +} + +bool +prof_gdump_get(void) +{ + bool prof_gdump_current; + + malloc_mutex_lock(&prof_gdump_mtx); + prof_gdump_current = prof_gdump_val; + malloc_mutex_unlock(&prof_gdump_mtx); + return (prof_gdump_current); +} + +bool +prof_gdump_set(bool gdump) +{ + bool prof_gdump_old; + + malloc_mutex_lock(&prof_gdump_mtx); + prof_gdump_old = prof_gdump_val; + prof_gdump_val = gdump; + malloc_mutex_unlock(&prof_gdump_mtx); + return (prof_gdump_old); +} + +void +prof_boot0(void) +{ + + cassert(config_prof); + + memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, + sizeof(PROF_PREFIX_DEFAULT)); +} + +void +prof_boot1(void) +{ + + cassert(config_prof); + + /* + * opt_prof must be in its final state before any arenas are + * initialized, so this function must be executed early. + */ + + if (opt_prof_leak && !opt_prof) { + /* + * Enable opt_prof, but in such a way that profiles are never + * automatically dumped. + */ + opt_prof = true; + opt_prof_gdump = false; + } else if (opt_prof) { + if (opt_lg_prof_interval >= 0) { + prof_interval = (((uint64_t)1U) << + opt_lg_prof_interval); + } + } +} + +bool +prof_boot2(void) +{ + + cassert(config_prof); + + if (opt_prof) { + tsd_t *tsd; + unsigned i; + + lg_prof_sample = opt_lg_prof_sample; + + prof_active = opt_prof_active; + if (malloc_mutex_init(&prof_active_mtx)) + return (true); + + prof_gdump_val = opt_prof_gdump; + if (malloc_mutex_init(&prof_gdump_mtx)) + return (true); + + prof_thread_active_init = opt_prof_thread_active_init; + if (malloc_mutex_init(&prof_thread_active_init_mtx)) + return (true); + + tsd = tsd_fetch(); + if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) + return (true); + if (malloc_mutex_init(&bt2gctx_mtx)) + return (true); + + tdata_tree_new(&tdatas); + if (malloc_mutex_init(&tdatas_mtx)) + return (true); + + next_thr_uid = 0; + if (malloc_mutex_init(&next_thr_uid_mtx)) + return (true); + + if (malloc_mutex_init(&prof_dump_seq_mtx)) + return (true); + if (malloc_mutex_init(&prof_dump_mtx)) + return (true); + + if (opt_prof_final && opt_prof_prefix[0] != '\0' && + atexit(prof_fdump) != 0) { + malloc_write(": Error in atexit()\n"); + if (opt_abort) + abort(); + } + + gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS * + sizeof(malloc_mutex_t)); + if (gctx_locks == NULL) + return (true); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + if (malloc_mutex_init(&gctx_locks[i])) + return (true); + } + + tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS * + sizeof(malloc_mutex_t)); + if (tdata_locks == NULL) + return (true); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + if (malloc_mutex_init(&tdata_locks[i])) + return (true); + } + } + +#ifdef JEMALLOC_PROF_LIBGCC + /* + * Cause the backtracing machinery to allocate its internal state + * before enabling profiling. + */ + _Unwind_Backtrace(prof_unwind_init_callback, NULL); +#endif + + prof_booted = true; + + return (false); +} + +void +prof_prefork(void) +{ + + if (opt_prof) { + unsigned i; + + malloc_mutex_prefork(&tdatas_mtx); + malloc_mutex_prefork(&bt2gctx_mtx); + malloc_mutex_prefork(&next_thr_uid_mtx); + malloc_mutex_prefork(&prof_dump_seq_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) + malloc_mutex_prefork(&gctx_locks[i]); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) + malloc_mutex_prefork(&tdata_locks[i]); + } +} + +void +prof_postfork_parent(void) +{ + + if (opt_prof) { + unsigned i; + + for (i = 0; i < PROF_NTDATA_LOCKS; i++) + malloc_mutex_postfork_parent(&tdata_locks[i]); + for (i = 0; i < PROF_NCTX_LOCKS; i++) + malloc_mutex_postfork_parent(&gctx_locks[i]); + malloc_mutex_postfork_parent(&prof_dump_seq_mtx); + malloc_mutex_postfork_parent(&next_thr_uid_mtx); + malloc_mutex_postfork_parent(&bt2gctx_mtx); + malloc_mutex_postfork_parent(&tdatas_mtx); + } +} + +void +prof_postfork_child(void) +{ + + if (opt_prof) { + unsigned i; + + for (i = 0; i < PROF_NTDATA_LOCKS; i++) + malloc_mutex_postfork_child(&tdata_locks[i]); + for (i = 0; i < PROF_NCTX_LOCKS; i++) + malloc_mutex_postfork_child(&gctx_locks[i]); + malloc_mutex_postfork_child(&prof_dump_seq_mtx); + malloc_mutex_postfork_child(&next_thr_uid_mtx); + malloc_mutex_postfork_child(&bt2gctx_mtx); + malloc_mutex_postfork_child(&tdatas_mtx); + } +} + +/******************************************************************************/ diff --git a/redis.submodule/jemalloc/src/quarantine.c b/redis.submodule/jemalloc/src/quarantine.c new file mode 100644 index 0000000..6c43dfc --- /dev/null +++ b/redis.submodule/jemalloc/src/quarantine.c @@ -0,0 +1,183 @@ +#define JEMALLOC_QUARANTINE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/* + * Quarantine pointers close to NULL are used to encode state information that + * is used for cleaning up during thread shutdown. + */ +#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1) +#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2) +#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine); +static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine); +static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, + size_t upper_bound); + +/******************************************************************************/ + +static quarantine_t * +quarantine_init(tsd_t *tsd, size_t lg_maxobjs) +{ + quarantine_t *quarantine; + + assert(tsd_nominal(tsd)); + + quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs) + + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false, + tcache_get(tsd, true), true, NULL); + if (quarantine == NULL) + return (NULL); + quarantine->curbytes = 0; + quarantine->curobjs = 0; + quarantine->first = 0; + quarantine->lg_maxobjs = lg_maxobjs; + + return (quarantine); +} + +void +quarantine_alloc_hook_work(tsd_t *tsd) +{ + quarantine_t *quarantine; + + if (!tsd_nominal(tsd)) + return; + + quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT); + /* + * Check again whether quarantine has been initialized, because + * quarantine_init() may have triggered recursive initialization. + */ + if (tsd_quarantine_get(tsd) == NULL) + tsd_quarantine_set(tsd, quarantine); + else + idalloctm(tsd, quarantine, tcache_get(tsd, false), true); +} + +static quarantine_t * +quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) +{ + quarantine_t *ret; + + ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1); + if (ret == NULL) { + quarantine_drain_one(tsd, quarantine); + return (quarantine); + } + + ret->curbytes = quarantine->curbytes; + ret->curobjs = quarantine->curobjs; + if (quarantine->first + quarantine->curobjs <= (ZU(1) << + quarantine->lg_maxobjs)) { + /* objs ring buffer data are contiguous. */ + memcpy(ret->objs, &quarantine->objs[quarantine->first], + quarantine->curobjs * sizeof(quarantine_obj_t)); + } else { + /* objs ring buffer data wrap around. */ + size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) - + quarantine->first; + size_t ncopy_b = quarantine->curobjs - ncopy_a; + + memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a + * sizeof(quarantine_obj_t)); + memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * + sizeof(quarantine_obj_t)); + } + idalloctm(tsd, quarantine, tcache_get(tsd, false), true); + + tsd_quarantine_set(tsd, ret); + return (ret); +} + +static void +quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine) +{ + quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; + assert(obj->usize == isalloc(obj->ptr, config_prof)); + idalloctm(tsd, obj->ptr, NULL, false); + quarantine->curbytes -= obj->usize; + quarantine->curobjs--; + quarantine->first = (quarantine->first + 1) & ((ZU(1) << + quarantine->lg_maxobjs) - 1); +} + +static void +quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound) +{ + + while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) + quarantine_drain_one(tsd, quarantine); +} + +void +quarantine(tsd_t *tsd, void *ptr) +{ + quarantine_t *quarantine; + size_t usize = isalloc(ptr, config_prof); + + cassert(config_fill); + assert(opt_quarantine); + + if ((quarantine = tsd_quarantine_get(tsd)) == NULL) { + idalloctm(tsd, ptr, NULL, false); + return; + } + /* + * Drain one or more objects if the quarantine size limit would be + * exceeded by appending ptr. + */ + if (quarantine->curbytes + usize > opt_quarantine) { + size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine + - usize : 0; + quarantine_drain(tsd, quarantine, upper_bound); + } + /* Grow the quarantine ring buffer if it's full. */ + if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) + quarantine = quarantine_grow(tsd, quarantine); + /* quarantine_grow() must free a slot if it fails to grow. */ + assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); + /* Append ptr if its size doesn't exceed the quarantine size. */ + if (quarantine->curbytes + usize <= opt_quarantine) { + size_t offset = (quarantine->first + quarantine->curobjs) & + ((ZU(1) << quarantine->lg_maxobjs) - 1); + quarantine_obj_t *obj = &quarantine->objs[offset]; + obj->ptr = ptr; + obj->usize = usize; + quarantine->curbytes += usize; + quarantine->curobjs++; + if (config_fill && unlikely(opt_junk_free)) { + /* + * Only do redzone validation if Valgrind isn't in + * operation. + */ + if ((!config_valgrind || likely(!in_valgrind)) + && usize <= SMALL_MAXCLASS) + arena_quarantine_junk_small(ptr, usize); + else + memset(ptr, 0x5a, usize); + } + } else { + assert(quarantine->curbytes == 0); + idalloctm(tsd, ptr, NULL, false); + } +} + +void +quarantine_cleanup(tsd_t *tsd) +{ + quarantine_t *quarantine; + + if (!config_fill) + return; + + quarantine = tsd_quarantine_get(tsd); + if (quarantine != NULL) { + quarantine_drain(tsd, quarantine, 0); + idalloctm(tsd, quarantine, tcache_get(tsd, false), true); + tsd_quarantine_set(tsd, NULL); + } +} diff --git a/redis.submodule/jemalloc/src/rtree.c b/redis.submodule/jemalloc/src/rtree.c new file mode 100644 index 0000000..af0d97e --- /dev/null +++ b/redis.submodule/jemalloc/src/rtree.c @@ -0,0 +1,127 @@ +#define JEMALLOC_RTREE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +static unsigned +hmin(unsigned ha, unsigned hb) +{ + + return (ha < hb ? ha : hb); +} + +/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */ +bool +rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, + rtree_node_dalloc_t *dalloc) +{ + unsigned bits_in_leaf, height, i; + + assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); + + bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL + : (bits % RTREE_BITS_PER_LEVEL); + if (bits > bits_in_leaf) { + height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL; + if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits) + height++; + } else + height = 1; + assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits); + + rtree->alloc = alloc; + rtree->dalloc = dalloc; + rtree->height = height; + + /* Root level. */ + rtree->levels[0].subtree = NULL; + rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL : + bits_in_leaf; + rtree->levels[0].cumbits = rtree->levels[0].bits; + /* Interior levels. */ + for (i = 1; i < height-1; i++) { + rtree->levels[i].subtree = NULL; + rtree->levels[i].bits = RTREE_BITS_PER_LEVEL; + rtree->levels[i].cumbits = rtree->levels[i-1].cumbits + + RTREE_BITS_PER_LEVEL; + } + /* Leaf level. */ + if (height > 1) { + rtree->levels[height-1].subtree = NULL; + rtree->levels[height-1].bits = bits_in_leaf; + rtree->levels[height-1].cumbits = bits; + } + + /* Compute lookup table to be used by rtree_start_level(). */ + for (i = 0; i < RTREE_HEIGHT_MAX; i++) { + rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height - + 1); + } + + return (false); +} + +static void +rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level) +{ + + if (level + 1 < rtree->height) { + size_t nchildren, i; + + nchildren = ZU(1) << rtree->levels[level].bits; + for (i = 0; i < nchildren; i++) { + rtree_node_elm_t *child = node[i].child; + if (child != NULL) + rtree_delete_subtree(rtree, child, level + 1); + } + } + rtree->dalloc(node); +} + +void +rtree_delete(rtree_t *rtree) +{ + unsigned i; + + for (i = 0; i < rtree->height; i++) { + rtree_node_elm_t *subtree = rtree->levels[i].subtree; + if (subtree != NULL) + rtree_delete_subtree(rtree, subtree, i); + } +} + +static rtree_node_elm_t * +rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp) +{ + rtree_node_elm_t *node; + + if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) { + /* + * Another thread is already in the process of initializing. + * Spin-wait until initialization is complete. + */ + do { + CPU_SPINWAIT; + node = atomic_read_p((void **)elmp); + } while (node == RTREE_NODE_INITIALIZING); + } else { + node = rtree->alloc(ZU(1) << rtree->levels[level].bits); + if (node == NULL) + return (NULL); + atomic_write_p((void **)elmp, node); + } + + return (node); +} + +rtree_node_elm_t * +rtree_subtree_read_hard(rtree_t *rtree, unsigned level) +{ + + return (rtree_node_init(rtree, level, &rtree->levels[level].subtree)); +} + +rtree_node_elm_t * +rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) +{ + + return (rtree_node_init(rtree, level, &elm->child)); +} diff --git a/redis.submodule/jemalloc/src/stats.c b/redis.submodule/jemalloc/src/stats.c new file mode 100644 index 0000000..154c3e7 --- /dev/null +++ b/redis.submodule/jemalloc/src/stats.c @@ -0,0 +1,640 @@ +#define JEMALLOC_STATS_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +#define CTL_GET(n, v, t) do { \ + size_t sz = sizeof(t); \ + xmallctl(n, v, &sz, NULL, 0); \ +} while (0) + +#define CTL_M2_GET(n, i, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = (i); \ + xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ +} while (0) + +#define CTL_M2_M4_GET(n, i, j, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = (i); \ + mib[4] = (j); \ + xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ +} while (0) + +/******************************************************************************/ +/* Data. */ + +bool opt_stats_print = false; + +size_t stats_cactive = 0; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void stats_arena_bins_print(void (*write_cb)(void *, const char *), + void *cbopaque, unsigned i); +static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), + void *cbopaque, unsigned i); +static void stats_arena_hchunks_print( + void (*write_cb)(void *, const char *), void *cbopaque, unsigned i); +static void stats_arena_print(void (*write_cb)(void *, const char *), + void *cbopaque, unsigned i, bool bins, bool large, bool huge); + +/******************************************************************************/ + +static void +stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, + unsigned i) +{ + size_t page; + bool config_tcache, in_gap; + unsigned nbins, j; + + CTL_GET("arenas.page", &page, size_t); + + CTL_GET("config.tcache", &config_tcache, bool); + if (config_tcache) { + malloc_cprintf(write_cb, cbopaque, + "bins: size ind allocated nmalloc" + " ndalloc nrequests curregs curruns regs" + " pgs util nfills nflushes newruns" + " reruns\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "bins: size ind allocated nmalloc" + " ndalloc nrequests curregs curruns regs" + " pgs util newruns reruns\n"); + } + CTL_GET("arenas.nbins", &nbins, unsigned); + for (j = 0, in_gap = false; j < nbins; j++) { + uint64_t nruns; + + CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns, + uint64_t); + if (nruns == 0) + in_gap = true; + else { + size_t reg_size, run_size, curregs, availregs, milli; + size_t curruns; + uint32_t nregs; + uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t reruns; + char util[6]; /* "x.yyy". */ + + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + in_gap = false; + } + CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); + CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); + CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, + size_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, + &nmalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, + &ndalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, + &curregs, size_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, + &nrequests, uint64_t); + if (config_tcache) { + CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, + j, &nfills, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", + i, j, &nflushes, uint64_t); + } + CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, + &reruns, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, + &curruns, size_t); + + availregs = nregs * curruns; + milli = (availregs != 0) ? (1000 * curregs) / availregs + : 1000; + assert(milli <= 1000); + if (milli < 10) { + malloc_snprintf(util, sizeof(util), + "0.00%zu", milli); + } else if (milli < 100) { + malloc_snprintf(util, sizeof(util), "0.0%zu", + milli); + } else if (milli < 1000) { + malloc_snprintf(util, sizeof(util), "0.%zu", + milli); + } else + malloc_snprintf(util, sizeof(util), "1"); + + if (config_tcache) { + malloc_cprintf(write_cb, cbopaque, + "%20zu %3u %12zu %12"FMTu64 + " %12"FMTu64" %12"FMTu64" %12zu" + " %12zu %4u %3zu %-5s %12"FMTu64 + " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n", + reg_size, j, curregs * reg_size, nmalloc, + ndalloc, nrequests, curregs, curruns, nregs, + run_size / page, util, nfills, nflushes, + nruns, reruns); + } else { + malloc_cprintf(write_cb, cbopaque, + "%20zu %3u %12zu %12"FMTu64 + " %12"FMTu64" %12"FMTu64" %12zu" + " %12zu %4u %3zu %-5s %12"FMTu64 + " %12"FMTu64"\n", + reg_size, j, curregs * reg_size, nmalloc, + ndalloc, nrequests, curregs, curruns, nregs, + run_size / page, util, nruns, reruns); + } + } + } + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } +} + +static void +stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, + unsigned i) +{ + unsigned nbins, nlruns, j; + bool in_gap; + + malloc_cprintf(write_cb, cbopaque, + "large: size ind allocated nmalloc ndalloc" + " nrequests curruns\n"); + CTL_GET("arenas.nbins", &nbins, unsigned); + CTL_GET("arenas.nlruns", &nlruns, unsigned); + for (j = 0, in_gap = false; j < nlruns; j++) { + uint64_t nmalloc, ndalloc, nrequests; + size_t run_size, curruns; + + CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j, + &nrequests, uint64_t); + if (nrequests == 0) + in_gap = true; + else { + CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t); + CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, + &curruns, size_t); + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + in_gap = false; + } + malloc_cprintf(write_cb, cbopaque, + "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64" %12zu\n", + run_size, nbins + j, curruns * run_size, nmalloc, + ndalloc, nrequests, curruns); + } + } + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } +} + +static void +stats_arena_hchunks_print(void (*write_cb)(void *, const char *), + void *cbopaque, unsigned i) +{ + unsigned nbins, nlruns, nhchunks, j; + bool in_gap; + + malloc_cprintf(write_cb, cbopaque, + "huge: size ind allocated nmalloc ndalloc" + " nrequests curhchunks\n"); + CTL_GET("arenas.nbins", &nbins, unsigned); + CTL_GET("arenas.nlruns", &nlruns, unsigned); + CTL_GET("arenas.nhchunks", &nhchunks, unsigned); + for (j = 0, in_gap = false; j < nhchunks; j++) { + uint64_t nmalloc, ndalloc, nrequests; + size_t hchunk_size, curhchunks; + + CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j, + &nmalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j, + &ndalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j, + &nrequests, uint64_t); + if (nrequests == 0) + in_gap = true; + else { + CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, + size_t); + CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, + j, &curhchunks, size_t); + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + in_gap = false; + } + malloc_cprintf(write_cb, cbopaque, + "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64" %12zu\n", + hchunk_size, nbins + nlruns + j, + curhchunks * hchunk_size, nmalloc, ndalloc, + nrequests, curhchunks); + } + } + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } +} + +static void +stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, + unsigned i, bool bins, bool large, bool huge) +{ + unsigned nthreads; + const char *dss; + ssize_t lg_dirty_mult; + size_t page, pactive, pdirty, mapped; + size_t metadata_mapped, metadata_allocated; + uint64_t npurge, nmadvise, purged; + size_t small_allocated; + uint64_t small_nmalloc, small_ndalloc, small_nrequests; + size_t large_allocated; + uint64_t large_nmalloc, large_ndalloc, large_nrequests; + size_t huge_allocated; + uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests; + + CTL_GET("arenas.page", &page, size_t); + + CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); + malloc_cprintf(write_cb, cbopaque, + "assigned threads: %u\n", nthreads); + CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); + malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n", + dss); + CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t); + if (lg_dirty_mult >= 0) { + malloc_cprintf(write_cb, cbopaque, + "min active:dirty page ratio: %u:1\n", + (1U << lg_dirty_mult)); + } else { + malloc_cprintf(write_cb, cbopaque, + "min active:dirty page ratio: N/A\n"); + } + CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); + CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); + CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t); + CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t); + CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64 + " madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge == + 1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged); + + malloc_cprintf(write_cb, cbopaque, + " allocated nmalloc ndalloc" + " nrequests\n"); + CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated, + size_t); + CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests, + uint64_t); + malloc_cprintf(write_cb, cbopaque, + "small: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + small_allocated, small_nmalloc, small_ndalloc, small_nrequests); + CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, + size_t); + CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, + uint64_t); + malloc_cprintf(write_cb, cbopaque, + "large: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + large_allocated, large_nmalloc, large_ndalloc, large_nrequests); + CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t); + CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests, + uint64_t); + malloc_cprintf(write_cb, cbopaque, + "huge: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); + malloc_cprintf(write_cb, cbopaque, + "total: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + small_allocated + large_allocated + huge_allocated, + small_nmalloc + large_nmalloc + huge_nmalloc, + small_ndalloc + large_ndalloc + huge_ndalloc, + small_nrequests + large_nrequests + huge_nrequests); + malloc_cprintf(write_cb, cbopaque, + "active: %12zu\n", pactive * page); + CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); + malloc_cprintf(write_cb, cbopaque, + "mapped: %12zu\n", mapped); + CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped, + size_t); + CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated, + size_t); + malloc_cprintf(write_cb, cbopaque, + "metadata: mapped: %zu, allocated: %zu\n", + metadata_mapped, metadata_allocated); + + if (bins) + stats_arena_bins_print(write_cb, cbopaque, i); + if (large) + stats_arena_lruns_print(write_cb, cbopaque, i); + if (huge) + stats_arena_hchunks_print(write_cb, cbopaque, i); +} + +void +stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) +{ + int err; + uint64_t epoch; + size_t u64sz; + bool general = true; + bool merged = true; + bool unmerged = true; + bool bins = true; + bool large = true; + bool huge = true; + + /* + * Refresh stats, in case mallctl() was called by the application. + * + * Check for OOM here, since refreshing the ctl cache can trigger + * allocation. In practice, none of the subsequent mallctl()-related + * calls in this function will cause OOM if this one succeeds. + * */ + epoch = 1; + u64sz = sizeof(uint64_t); + err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); + if (err != 0) { + if (err == EAGAIN) { + malloc_write(": Memory allocation failure in " + "mallctl(\"epoch\", ...)\n"); + return; + } + malloc_write(": Failure in mallctl(\"epoch\", " + "...)\n"); + abort(); + } + + if (opts != NULL) { + unsigned i; + + for (i = 0; opts[i] != '\0'; i++) { + switch (opts[i]) { + case 'g': + general = false; + break; + case 'm': + merged = false; + break; + case 'a': + unmerged = false; + break; + case 'b': + bins = false; + break; + case 'l': + large = false; + break; + case 'h': + huge = false; + break; + default:; + } + } + } + + malloc_cprintf(write_cb, cbopaque, + "___ Begin jemalloc statistics ___\n"); + if (general) { + const char *cpv; + bool bv; + unsigned uv; + ssize_t ssv; + size_t sv, bsz, ssz, sssz, cpsz; + + bsz = sizeof(bool); + ssz = sizeof(size_t); + sssz = sizeof(ssize_t); + cpsz = sizeof(const char *); + + CTL_GET("version", &cpv, const char *); + malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); + CTL_GET("config.debug", &bv, bool); + malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", + bv ? "enabled" : "disabled"); + +#define OPT_WRITE_BOOL(n) \ + if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %s\n", bv ? "true" : "false"); \ + } +#define OPT_WRITE_BOOL_MUTABLE(n, m) { \ + bool bv2; \ + if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \ + je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %s ("#m": %s)\n", bv ? "true" \ + : "false", bv2 ? "true" : "false"); \ + } \ +} +#define OPT_WRITE_SIZE_T(n) \ + if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zu\n", sv); \ + } +#define OPT_WRITE_SSIZE_T(n) \ + if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zd\n", ssv); \ + } +#define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \ + ssize_t ssv2; \ + if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \ + je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zd ("#m": %zd)\n", \ + ssv, ssv2); \ + } \ +} +#define OPT_WRITE_CHAR_P(n) \ + if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": \"%s\"\n", cpv); \ + } + + malloc_cprintf(write_cb, cbopaque, + "Run-time option settings:\n"); + OPT_WRITE_BOOL(abort) + OPT_WRITE_SIZE_T(lg_chunk) + OPT_WRITE_CHAR_P(dss) + OPT_WRITE_SIZE_T(narenas) + OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult) + OPT_WRITE_BOOL(stats_print) + OPT_WRITE_CHAR_P(junk) + OPT_WRITE_SIZE_T(quarantine) + OPT_WRITE_BOOL(redzone) + OPT_WRITE_BOOL(zero) + OPT_WRITE_BOOL(utrace) + OPT_WRITE_BOOL(valgrind) + OPT_WRITE_BOOL(xmalloc) + OPT_WRITE_BOOL(tcache) + OPT_WRITE_SSIZE_T(lg_tcache_max) + OPT_WRITE_BOOL(prof) + OPT_WRITE_CHAR_P(prof_prefix) + OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active) + OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, + prof.thread_active_init) + OPT_WRITE_SSIZE_T(lg_prof_sample) + OPT_WRITE_BOOL(prof_accum) + OPT_WRITE_SSIZE_T(lg_prof_interval) + OPT_WRITE_BOOL(prof_gdump) + OPT_WRITE_BOOL(prof_final) + OPT_WRITE_BOOL(prof_leak) + +#undef OPT_WRITE_BOOL +#undef OPT_WRITE_BOOL_MUTABLE +#undef OPT_WRITE_SIZE_T +#undef OPT_WRITE_SSIZE_T +#undef OPT_WRITE_CHAR_P + + malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); + + CTL_GET("arenas.narenas", &uv, unsigned); + malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); + + malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", + sizeof(void *)); + + CTL_GET("arenas.quantum", &sv, size_t); + malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", + sv); + + CTL_GET("arenas.page", &sv, size_t); + malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); + + CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); + if (ssv >= 0) { + malloc_cprintf(write_cb, cbopaque, + "Min active:dirty page ratio per arena: %u:1\n", + (1U << ssv)); + } else { + malloc_cprintf(write_cb, cbopaque, + "Min active:dirty page ratio per arena: N/A\n"); + } + if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) { + malloc_cprintf(write_cb, cbopaque, + "Maximum thread-cached size class: %zu\n", sv); + } + if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) { + CTL_GET("prof.lg_sample", &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "Average profile sample interval: %"FMTu64 + " (2^%zu)\n", (((uint64_t)1U) << sv), sv); + + CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); + if (ssv >= 0) { + malloc_cprintf(write_cb, cbopaque, + "Average profile dump interval: %"FMTu64 + " (2^%zd)\n", + (((uint64_t)1U) << ssv), ssv); + } else { + malloc_cprintf(write_cb, cbopaque, + "Average profile dump interval: N/A\n"); + } + } + CTL_GET("opt.lg_chunk", &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv); + } + + if (config_stats) { + size_t *cactive; + size_t allocated, active, metadata, resident, mapped; + + CTL_GET("stats.cactive", &cactive, size_t *); + CTL_GET("stats.allocated", &allocated, size_t); + CTL_GET("stats.active", &active, size_t); + CTL_GET("stats.metadata", &metadata, size_t); + CTL_GET("stats.resident", &resident, size_t); + CTL_GET("stats.mapped", &mapped, size_t); + malloc_cprintf(write_cb, cbopaque, + "Allocated: %zu, active: %zu, metadata: %zu," + " resident: %zu, mapped: %zu\n", + allocated, active, metadata, resident, mapped); + malloc_cprintf(write_cb, cbopaque, + "Current active ceiling: %zu\n", + atomic_read_z(cactive)); + + if (merged) { + unsigned narenas; + + CTL_GET("arenas.narenas", &narenas, unsigned); + { + VARIABLE_ARRAY(bool, initialized, narenas); + size_t isz; + unsigned i, ninitialized; + + isz = sizeof(bool) * narenas; + xmallctl("arenas.initialized", initialized, + &isz, NULL, 0); + for (i = ninitialized = 0; i < narenas; i++) { + if (initialized[i]) + ninitialized++; + } + + if (ninitialized > 1 || !unmerged) { + /* Print merged arena stats. */ + malloc_cprintf(write_cb, cbopaque, + "\nMerged arenas stats:\n"); + stats_arena_print(write_cb, cbopaque, + narenas, bins, large, huge); + } + } + } + + if (unmerged) { + unsigned narenas; + + /* Print stats for each arena. */ + + CTL_GET("arenas.narenas", &narenas, unsigned); + { + VARIABLE_ARRAY(bool, initialized, narenas); + size_t isz; + unsigned i; + + isz = sizeof(bool) * narenas; + xmallctl("arenas.initialized", initialized, + &isz, NULL, 0); + + for (i = 0; i < narenas; i++) { + if (initialized[i]) { + malloc_cprintf(write_cb, + cbopaque, + "\narenas[%u]:\n", i); + stats_arena_print(write_cb, + cbopaque, i, bins, large, + huge); + } + } + } + } + } + malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n"); +} diff --git a/redis.submodule/jemalloc/src/tcache.c b/redis.submodule/jemalloc/src/tcache.c new file mode 100644 index 0000000..fdafd0c --- /dev/null +++ b/redis.submodule/jemalloc/src/tcache.c @@ -0,0 +1,537 @@ +#define JEMALLOC_TCACHE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +bool opt_tcache = true; +ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; + +tcache_bin_info_t *tcache_bin_info; +static unsigned stack_nelms; /* Total stack elms per tcache. */ + +size_t nhbins; +size_t tcache_maxclass; + +tcaches_t *tcaches; + +/* Index of first element within tcaches that has never been used. */ +static unsigned tcaches_past; + +/* Head of singly linked list tracking available tcaches elements. */ +static tcaches_t *tcaches_avail; + +/******************************************************************************/ + +size_t tcache_salloc(const void *ptr) +{ + + return (arena_salloc(ptr, false)); +} + +void +tcache_event_hard(tsd_t *tsd, tcache_t *tcache) +{ + szind_t binind = tcache->next_gc_bin; + tcache_bin_t *tbin = &tcache->tbins[binind]; + tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; + + if (tbin->low_water > 0) { + /* + * Flush (ceiling) 3/4 of the objects below the low water mark. + */ + if (binind < NBINS) { + tcache_bin_flush_small(tsd, tcache, tbin, binind, + tbin->ncached - tbin->low_water + (tbin->low_water + >> 2)); + } else { + tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached + - tbin->low_water + (tbin->low_water >> 2), tcache); + } + /* + * Reduce fill count by 2X. Limit lg_fill_div such that the + * fill count is always at least 1. + */ + if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) + tbin->lg_fill_div++; + } else if (tbin->low_water < 0) { + /* + * Increase fill count by 2X. Make sure lg_fill_div stays + * greater than 0. + */ + if (tbin->lg_fill_div > 1) + tbin->lg_fill_div--; + } + tbin->low_water = tbin->ncached; + + tcache->next_gc_bin++; + if (tcache->next_gc_bin == nhbins) + tcache->next_gc_bin = 0; + tcache->ev_cnt = 0; +} + +void * +tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, szind_t binind) +{ + void *ret; + + arena_tcache_fill_small(arena, tbin, binind, config_prof ? + tcache->prof_accumbytes : 0); + if (config_prof) + tcache->prof_accumbytes = 0; + ret = tcache_alloc_easy(tbin); + + return (ret); +} + +void +tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, + szind_t binind, unsigned rem) +{ + arena_t *arena; + void *ptr; + unsigned i, nflush, ndeferred; + bool merged_stats = false; + + assert(binind < NBINS); + assert(rem <= tbin->ncached); + + arena = arena_choose(tsd, NULL); + assert(arena != NULL); + for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { + /* Lock the arena bin associated with the first object. */ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( + tbin->avail[0]); + arena_t *bin_arena = extent_node_arena_get(&chunk->node); + arena_bin_t *bin = &bin_arena->bins[binind]; + + if (config_prof && bin_arena == arena) { + if (arena_prof_accum(arena, tcache->prof_accumbytes)) + prof_idump(); + tcache->prof_accumbytes = 0; + } + + malloc_mutex_lock(&bin->lock); + if (config_stats && bin_arena == arena) { + assert(!merged_stats); + merged_stats = true; + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } + ndeferred = 0; + for (i = 0; i < nflush; i++) { + ptr = tbin->avail[i]; + assert(ptr != NULL); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (extent_node_arena_get(&chunk->node) == bin_arena) { + size_t pageind = ((uintptr_t)ptr - + (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_bits_t *bitselm = + arena_bitselm_get(chunk, pageind); + arena_dalloc_bin_junked_locked(bin_arena, chunk, + ptr, bitselm); + } else { + /* + * This object was allocated via a different + * arena bin than the one that is currently + * locked. Stash the object, so that it can be + * handled in a future pass. + */ + tbin->avail[ndeferred] = ptr; + ndeferred++; + } + } + malloc_mutex_unlock(&bin->lock); + } + if (config_stats && !merged_stats) { + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ + arena_bin_t *bin = &arena->bins[binind]; + malloc_mutex_lock(&bin->lock); + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + malloc_mutex_unlock(&bin->lock); + } + + memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], + rem * sizeof(void *)); + tbin->ncached = rem; + if ((int)tbin->ncached < tbin->low_water) + tbin->low_water = tbin->ncached; +} + +void +tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache) +{ + arena_t *arena; + void *ptr; + unsigned i, nflush, ndeferred; + bool merged_stats = false; + + assert(binind < nhbins); + assert(rem <= tbin->ncached); + + arena = arena_choose(tsd, NULL); + assert(arena != NULL); + for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { + /* Lock the arena associated with the first object. */ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( + tbin->avail[0]); + arena_t *locked_arena = extent_node_arena_get(&chunk->node); + UNUSED bool idump; + + if (config_prof) + idump = false; + malloc_mutex_lock(&locked_arena->lock); + if ((config_prof || config_stats) && locked_arena == arena) { + if (config_prof) { + idump = arena_prof_accum_locked(arena, + tcache->prof_accumbytes); + tcache->prof_accumbytes = 0; + } + if (config_stats) { + merged_stats = true; + arena->stats.nrequests_large += + tbin->tstats.nrequests; + arena->stats.lstats[binind - NBINS].nrequests += + tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } + } + ndeferred = 0; + for (i = 0; i < nflush; i++) { + ptr = tbin->avail[i]; + assert(ptr != NULL); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (extent_node_arena_get(&chunk->node) == + locked_arena) { + arena_dalloc_large_junked_locked(locked_arena, + chunk, ptr); + } else { + /* + * This object was allocated via a different + * arena than the one that is currently locked. + * Stash the object, so that it can be handled + * in a future pass. + */ + tbin->avail[ndeferred] = ptr; + ndeferred++; + } + } + malloc_mutex_unlock(&locked_arena->lock); + if (config_prof && idump) + prof_idump(); + } + if (config_stats && !merged_stats) { + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ + malloc_mutex_lock(&arena->lock); + arena->stats.nrequests_large += tbin->tstats.nrequests; + arena->stats.lstats[binind - NBINS].nrequests += + tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + malloc_mutex_unlock(&arena->lock); + } + + memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], + rem * sizeof(void *)); + tbin->ncached = rem; + if ((int)tbin->ncached < tbin->low_water) + tbin->low_water = tbin->ncached; +} + +void +tcache_arena_associate(tcache_t *tcache, arena_t *arena) +{ + + if (config_stats) { + /* Link into list of extant tcaches. */ + malloc_mutex_lock(&arena->lock); + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); + malloc_mutex_unlock(&arena->lock); + } +} + +void +tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena) +{ + + tcache_arena_dissociate(tcache, oldarena); + tcache_arena_associate(tcache, newarena); +} + +void +tcache_arena_dissociate(tcache_t *tcache, arena_t *arena) +{ + + if (config_stats) { + /* Unlink from list of extant tcaches. */ + malloc_mutex_lock(&arena->lock); + if (config_debug) { + bool in_ql = false; + tcache_t *iter; + ql_foreach(iter, &arena->tcache_ql, link) { + if (iter == tcache) { + in_ql = true; + break; + } + } + assert(in_ql); + } + ql_remove(&arena->tcache_ql, tcache, link); + tcache_stats_merge(tcache, arena); + malloc_mutex_unlock(&arena->lock); + } +} + +tcache_t * +tcache_get_hard(tsd_t *tsd) +{ + arena_t *arena; + + if (!tcache_enabled_get()) { + if (tsd_nominal(tsd)) + tcache_enabled_set(false); /* Memoize. */ + return (NULL); + } + arena = arena_choose(tsd, NULL); + if (unlikely(arena == NULL)) + return (NULL); + return (tcache_create(tsd, arena)); +} + +tcache_t * +tcache_create(tsd_t *tsd, arena_t *arena) +{ + tcache_t *tcache; + size_t size, stack_offset; + unsigned i; + + size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); + /* Naturally align the pointer stacks. */ + size = PTR_CEILING(size); + stack_offset = size; + size += stack_nelms * sizeof(void *); + /* Avoid false cacheline sharing. */ + size = sa2u(size, CACHELINE); + + tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get()); + if (tcache == NULL) + return (NULL); + + tcache_arena_associate(tcache, arena); + + assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); + for (i = 0; i < nhbins; i++) { + tcache->tbins[i].lg_fill_div = 1; + tcache->tbins[i].avail = (void **)((uintptr_t)tcache + + (uintptr_t)stack_offset); + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + } + + return (tcache); +} + +static void +tcache_destroy(tsd_t *tsd, tcache_t *tcache) +{ + arena_t *arena; + unsigned i; + + arena = arena_choose(tsd, NULL); + tcache_arena_dissociate(tcache, arena); + + for (i = 0; i < NBINS; i++) { + tcache_bin_t *tbin = &tcache->tbins[i]; + tcache_bin_flush_small(tsd, tcache, tbin, i, 0); + + if (config_stats && tbin->tstats.nrequests != 0) { + arena_bin_t *bin = &arena->bins[i]; + malloc_mutex_lock(&bin->lock); + bin->stats.nrequests += tbin->tstats.nrequests; + malloc_mutex_unlock(&bin->lock); + } + } + + for (; i < nhbins; i++) { + tcache_bin_t *tbin = &tcache->tbins[i]; + tcache_bin_flush_large(tsd, tbin, i, 0, tcache); + + if (config_stats && tbin->tstats.nrequests != 0) { + malloc_mutex_lock(&arena->lock); + arena->stats.nrequests_large += tbin->tstats.nrequests; + arena->stats.lstats[i - NBINS].nrequests += + tbin->tstats.nrequests; + malloc_mutex_unlock(&arena->lock); + } + } + + if (config_prof && tcache->prof_accumbytes > 0 && + arena_prof_accum(arena, tcache->prof_accumbytes)) + prof_idump(); + + idalloctm(tsd, tcache, false, true); +} + +void +tcache_cleanup(tsd_t *tsd) +{ + tcache_t *tcache; + + if (!config_tcache) + return; + + if ((tcache = tsd_tcache_get(tsd)) != NULL) { + tcache_destroy(tsd, tcache); + tsd_tcache_set(tsd, NULL); + } +} + +void +tcache_enabled_cleanup(tsd_t *tsd) +{ + + /* Do nothing. */ +} + +/* Caller must own arena->lock. */ +void +tcache_stats_merge(tcache_t *tcache, arena_t *arena) +{ + unsigned i; + + cassert(config_stats); + + /* Merge and reset tcache stats. */ + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + tcache_bin_t *tbin = &tcache->tbins[i]; + malloc_mutex_lock(&bin->lock); + bin->stats.nrequests += tbin->tstats.nrequests; + malloc_mutex_unlock(&bin->lock); + tbin->tstats.nrequests = 0; + } + + for (; i < nhbins; i++) { + malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; + tcache_bin_t *tbin = &tcache->tbins[i]; + arena->stats.nrequests_large += tbin->tstats.nrequests; + lstats->nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } +} + +bool +tcaches_create(tsd_t *tsd, unsigned *r_ind) +{ + tcache_t *tcache; + tcaches_t *elm; + + if (tcaches == NULL) { + tcaches = base_alloc(sizeof(tcache_t *) * + (MALLOCX_TCACHE_MAX+1)); + if (tcaches == NULL) + return (true); + } + + if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) + return (true); + tcache = tcache_create(tsd, a0get()); + if (tcache == NULL) + return (true); + + if (tcaches_avail != NULL) { + elm = tcaches_avail; + tcaches_avail = tcaches_avail->next; + elm->tcache = tcache; + *r_ind = elm - tcaches; + } else { + elm = &tcaches[tcaches_past]; + elm->tcache = tcache; + *r_ind = tcaches_past; + tcaches_past++; + } + + return (false); +} + +static void +tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) +{ + + if (elm->tcache == NULL) + return; + tcache_destroy(tsd, elm->tcache); + elm->tcache = NULL; +} + +void +tcaches_flush(tsd_t *tsd, unsigned ind) +{ + + tcaches_elm_flush(tsd, &tcaches[ind]); +} + +void +tcaches_destroy(tsd_t *tsd, unsigned ind) +{ + tcaches_t *elm = &tcaches[ind]; + tcaches_elm_flush(tsd, elm); + elm->next = tcaches_avail; + tcaches_avail = elm; +} + +bool +tcache_boot(void) +{ + unsigned i; + + /* + * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is + * known. + */ + if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) + tcache_maxclass = SMALL_MAXCLASS; + else if ((1U << opt_lg_tcache_max) > large_maxclass) + tcache_maxclass = large_maxclass; + else + tcache_maxclass = (1U << opt_lg_tcache_max); + + nhbins = size2index(tcache_maxclass) + 1; + + /* Initialize tcache_bin_info. */ + tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * + sizeof(tcache_bin_info_t)); + if (tcache_bin_info == NULL) + return (true); + stack_nelms = 0; + for (i = 0; i < NBINS; i++) { + if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MIN; + } else if ((arena_bin_info[i].nregs << 1) <= + TCACHE_NSLOTS_SMALL_MAX) { + tcache_bin_info[i].ncached_max = + (arena_bin_info[i].nregs << 1); + } else { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MAX; + } + stack_nelms += tcache_bin_info[i].ncached_max; + } + for (; i < nhbins; i++) { + tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; + stack_nelms += tcache_bin_info[i].ncached_max; + } + + return (false); +} diff --git a/redis.submodule/jemalloc/src/tsd.c b/redis.submodule/jemalloc/src/tsd.c new file mode 100644 index 0000000..9ffe9af --- /dev/null +++ b/redis.submodule/jemalloc/src/tsd.c @@ -0,0 +1,193 @@ +#define JEMALLOC_TSD_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +static unsigned ncleanups; +static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; + +malloc_tsd_data(, , tsd_t, TSD_INITIALIZER) + +/******************************************************************************/ + +void * +malloc_tsd_malloc(size_t size) +{ + + return (a0malloc(CACHELINE_CEILING(size))); +} + +void +malloc_tsd_dalloc(void *wrapper) +{ + + a0dalloc(wrapper); +} + +void +malloc_tsd_no_cleanup(void *arg) +{ + + not_reached(); +} + +#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) +#ifndef _WIN32 +JEMALLOC_EXPORT +#endif +void +_malloc_thread_cleanup(void) +{ + bool pending[MALLOC_TSD_CLEANUPS_MAX], again; + unsigned i; + + for (i = 0; i < ncleanups; i++) + pending[i] = true; + + do { + again = false; + for (i = 0; i < ncleanups; i++) { + if (pending[i]) { + pending[i] = cleanups[i](); + if (pending[i]) + again = true; + } + } + } while (again); +} +#endif + +void +malloc_tsd_cleanup_register(bool (*f)(void)) +{ + + assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); + cleanups[ncleanups] = f; + ncleanups++; +} + +void +tsd_cleanup(void *arg) +{ + tsd_t *tsd = (tsd_t *)arg; + + switch (tsd->state) { + case tsd_state_uninitialized: + /* Do nothing. */ + break; + case tsd_state_nominal: +#define O(n, t) \ + n##_cleanup(tsd); +MALLOC_TSD +#undef O + tsd->state = tsd_state_purgatory; + tsd_set(tsd); + break; + case tsd_state_purgatory: + /* + * The previous time this destructor was called, we set the + * state to tsd_state_purgatory so that other destructors + * wouldn't cause re-creation of the tsd. This time, do + * nothing, and do not request another callback. + */ + break; + case tsd_state_reincarnated: + /* + * Another destructor deallocated memory after this destructor + * was called. Reset state to tsd_state_purgatory and request + * another callback. + */ + tsd->state = tsd_state_purgatory; + tsd_set(tsd); + break; + default: + not_reached(); + } +} + +bool +malloc_tsd_boot0(void) +{ + + ncleanups = 0; + if (tsd_boot0()) + return (true); + *tsd_arenas_cache_bypassp_get(tsd_fetch()) = true; + return (false); +} + +void +malloc_tsd_boot1(void) +{ + + tsd_boot1(); + *tsd_arenas_cache_bypassp_get(tsd_fetch()) = false; +} + +#ifdef _WIN32 +static BOOL WINAPI +_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) +{ + + switch (fdwReason) { +#ifdef JEMALLOC_LAZY_LOCK + case DLL_THREAD_ATTACH: + isthreaded = true; + break; +#endif + case DLL_THREAD_DETACH: + _malloc_thread_cleanup(); + break; + default: + break; + } + return (true); +} + +#ifdef _MSC_VER +# ifdef _M_IX86 +# pragma comment(linker, "/INCLUDE:__tls_used") +# else +# pragma comment(linker, "/INCLUDE:_tls_used") +# endif +# pragma section(".CRT$XLY",long,read) +#endif +JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) +static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, + DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; +#endif + +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +void * +tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) +{ + pthread_t self = pthread_self(); + tsd_init_block_t *iter; + + /* Check whether this thread has already inserted into the list. */ + malloc_mutex_lock(&head->lock); + ql_foreach(iter, &head->blocks, link) { + if (iter->thread == self) { + malloc_mutex_unlock(&head->lock); + return (iter->data); + } + } + /* Insert block into list. */ + ql_elm_new(block, link); + block->thread = self; + ql_tail_insert(&head->blocks, block, link); + malloc_mutex_unlock(&head->lock); + return (NULL); +} + +void +tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) +{ + + malloc_mutex_lock(&head->lock); + ql_remove(&head->blocks, block, link); + malloc_mutex_unlock(&head->lock); +} +#endif diff --git a/redis.submodule/jemalloc/src/util.c b/redis.submodule/jemalloc/src/util.c new file mode 100644 index 0000000..4cb0d6c --- /dev/null +++ b/redis.submodule/jemalloc/src/util.c @@ -0,0 +1,650 @@ +#define assert(e) do { \ + if (config_debug && !(e)) { \ + malloc_write(": Failed assertion\n"); \ + abort(); \ + } \ +} while (0) + +#define not_reached() do { \ + if (config_debug) { \ + malloc_write(": Unreachable code reached\n"); \ + abort(); \ + } \ +} while (0) + +#define not_implemented() do { \ + if (config_debug) { \ + malloc_write(": Not implemented\n"); \ + abort(); \ + } \ +} while (0) + +#define JEMALLOC_UTIL_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void wrtmessage(void *cbopaque, const char *s); +#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) +static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, + size_t *slen_p); +#define D2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); +#define O2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); +#define X2S_BUFSIZE (2 + U2S_BUFSIZE) +static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, + size_t *slen_p); + +/******************************************************************************/ + +/* malloc_message() setup. */ +static void +wrtmessage(void *cbopaque, const char *s) +{ + +#ifdef SYS_write + /* + * Use syscall(2) rather than write(2) when possible in order to avoid + * the possibility of memory allocation within libc. This is necessary + * on FreeBSD; most operating systems do not have this problem though. + */ + UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); +#else + UNUSED int result = write(STDERR_FILENO, s, strlen(s)); +#endif +} + +JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); + +/* + * Wrapper around malloc_message() that avoids the need for + * je_malloc_message(...) throughout the code. + */ +void +malloc_write(const char *s) +{ + + if (je_malloc_message != NULL) + je_malloc_message(NULL, s); + else + wrtmessage(NULL, s); +} + +/* + * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so + * provide a wrapper. + */ +int +buferror(int err, char *buf, size_t buflen) +{ + +#ifdef _WIN32 + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, + (LPSTR)buf, buflen, NULL); + return (0); +#elif defined(__GLIBC__) && defined(_GNU_SOURCE) + char *b = strerror_r(err, buf, buflen); + if (b != buf) { + strncpy(buf, b, buflen); + buf[buflen-1] = '\0'; + } + return (0); +#else + return (strerror_r(err, buf, buflen)); +#endif +} + +uintmax_t +malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) +{ + uintmax_t ret, digit; + unsigned b; + bool neg; + const char *p, *ns; + + p = nptr; + if (base < 0 || base == 1 || base > 36) { + ns = p; + set_errno(EINVAL); + ret = UINTMAX_MAX; + goto label_return; + } + b = base; + + /* Swallow leading whitespace and get sign, if any. */ + neg = false; + while (true) { + switch (*p) { + case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': + p++; + break; + case '-': + neg = true; + /* Fall through. */ + case '+': + p++; + /* Fall through. */ + default: + goto label_prefix; + } + } + + /* Get prefix, if any. */ + label_prefix: + /* + * Note where the first non-whitespace/sign character is so that it is + * possible to tell whether any digits are consumed (e.g., " 0" vs. + * " -x"). + */ + ns = p; + if (*p == '0') { + switch (p[1]) { + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': + if (b == 0) + b = 8; + if (b == 8) + p++; + break; + case 'X': case 'x': + switch (p[2]) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + if (b == 0) + b = 16; + if (b == 16) + p += 2; + break; + default: + break; + } + break; + default: + p++; + ret = 0; + goto label_return; + } + } + if (b == 0) + b = 10; + + /* Convert. */ + ret = 0; + while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) + || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) + || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { + uintmax_t pret = ret; + ret *= b; + ret += digit; + if (ret < pret) { + /* Overflow. */ + set_errno(ERANGE); + ret = UINTMAX_MAX; + goto label_return; + } + p++; + } + if (neg) + ret = -ret; + + if (p == ns) { + /* No conversion performed. */ + set_errno(EINVAL); + ret = UINTMAX_MAX; + goto label_return; + } + +label_return: + if (endptr != NULL) { + if (p == ns) { + /* No characters were converted. */ + *endptr = (char *)nptr; + } else + *endptr = (char *)p; + } + return (ret); +} + +static char * +u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) +{ + unsigned i; + + i = U2S_BUFSIZE - 1; + s[i] = '\0'; + switch (base) { + case 10: + do { + i--; + s[i] = "0123456789"[x % (uint64_t)10]; + x /= (uint64_t)10; + } while (x > 0); + break; + case 16: { + const char *digits = (uppercase) + ? "0123456789ABCDEF" + : "0123456789abcdef"; + + do { + i--; + s[i] = digits[x & 0xf]; + x >>= 4; + } while (x > 0); + break; + } default: { + const char *digits = (uppercase) + ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + : "0123456789abcdefghijklmnopqrstuvwxyz"; + + assert(base >= 2 && base <= 36); + do { + i--; + s[i] = digits[x % (uint64_t)base]; + x /= (uint64_t)base; + } while (x > 0); + }} + + *slen_p = U2S_BUFSIZE - 1 - i; + return (&s[i]); +} + +static char * +d2s(intmax_t x, char sign, char *s, size_t *slen_p) +{ + bool neg; + + if ((neg = (x < 0))) + x = -x; + s = u2s(x, 10, false, s, slen_p); + if (neg) + sign = '-'; + switch (sign) { + case '-': + if (!neg) + break; + /* Fall through. */ + case ' ': + case '+': + s--; + (*slen_p)++; + *s = sign; + break; + default: not_reached(); + } + return (s); +} + +static char * +o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) +{ + + s = u2s(x, 8, false, s, slen_p); + if (alt_form && *s != '0') { + s--; + (*slen_p)++; + *s = '0'; + } + return (s); +} + +static char * +x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) +{ + + s = u2s(x, 16, uppercase, s, slen_p); + if (alt_form) { + s -= 2; + (*slen_p) += 2; + memcpy(s, uppercase ? "0X" : "0x", 2); + } + return (s); +} + +int +malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) +{ + int ret; + size_t i; + const char *f; + +#define APPEND_C(c) do { \ + if (i < size) \ + str[i] = (c); \ + i++; \ +} while (0) +#define APPEND_S(s, slen) do { \ + if (i < size) { \ + size_t cpylen = (slen <= size - i) ? slen : size - i; \ + memcpy(&str[i], s, cpylen); \ + } \ + i += slen; \ +} while (0) +#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ + /* Left padding. */ \ + size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ + (size_t)width - slen : 0); \ + if (!left_justify && pad_len != 0) { \ + size_t j; \ + for (j = 0; j < pad_len; j++) \ + APPEND_C(' '); \ + } \ + /* Value. */ \ + APPEND_S(s, slen); \ + /* Right padding. */ \ + if (left_justify && pad_len != 0) { \ + size_t j; \ + for (j = 0; j < pad_len; j++) \ + APPEND_C(' '); \ + } \ +} while (0) +#define GET_ARG_NUMERIC(val, len) do { \ + switch (len) { \ + case '?': \ + val = va_arg(ap, int); \ + break; \ + case '?' | 0x80: \ + val = va_arg(ap, unsigned int); \ + break; \ + case 'l': \ + val = va_arg(ap, long); \ + break; \ + case 'l' | 0x80: \ + val = va_arg(ap, unsigned long); \ + break; \ + case 'q': \ + val = va_arg(ap, long long); \ + break; \ + case 'q' | 0x80: \ + val = va_arg(ap, unsigned long long); \ + break; \ + case 'j': \ + val = va_arg(ap, intmax_t); \ + break; \ + case 'j' | 0x80: \ + val = va_arg(ap, uintmax_t); \ + break; \ + case 't': \ + val = va_arg(ap, ptrdiff_t); \ + break; \ + case 'z': \ + val = va_arg(ap, ssize_t); \ + break; \ + case 'z' | 0x80: \ + val = va_arg(ap, size_t); \ + break; \ + case 'p': /* Synthetic; used for %p. */ \ + val = va_arg(ap, uintptr_t); \ + break; \ + default: \ + not_reached(); \ + val = 0; \ + } \ +} while (0) + + i = 0; + f = format; + while (true) { + switch (*f) { + case '\0': goto label_out; + case '%': { + bool alt_form = false; + bool left_justify = false; + bool plus_space = false; + bool plus_plus = false; + int prec = -1; + int width = -1; + unsigned char len = '?'; + + f++; + /* Flags. */ + while (true) { + switch (*f) { + case '#': + assert(!alt_form); + alt_form = true; + break; + case '-': + assert(!left_justify); + left_justify = true; + break; + case ' ': + assert(!plus_space); + plus_space = true; + break; + case '+': + assert(!plus_plus); + plus_plus = true; + break; + default: goto label_width; + } + f++; + } + /* Width. */ + label_width: + switch (*f) { + case '*': + width = va_arg(ap, int); + f++; + if (width < 0) { + left_justify = true; + width = -width; + } + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': { + uintmax_t uwidth; + set_errno(0); + uwidth = malloc_strtoumax(f, (char **)&f, 10); + assert(uwidth != UINTMAX_MAX || get_errno() != + ERANGE); + width = (int)uwidth; + break; + } default: + break; + } + /* Width/precision separator. */ + if (*f == '.') + f++; + else + goto label_length; + /* Precision. */ + switch (*f) { + case '*': + prec = va_arg(ap, int); + f++; + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': { + uintmax_t uprec; + set_errno(0); + uprec = malloc_strtoumax(f, (char **)&f, 10); + assert(uprec != UINTMAX_MAX || get_errno() != + ERANGE); + prec = (int)uprec; + break; + } + default: break; + } + /* Length. */ + label_length: + switch (*f) { + case 'l': + f++; + if (*f == 'l') { + len = 'q'; + f++; + } else + len = 'l'; + break; + case 'q': case 'j': case 't': case 'z': + len = *f; + f++; + break; + default: break; + } + /* Conversion specifier. */ + switch (*f) { + char *s; + size_t slen; + case '%': + /* %% */ + APPEND_C(*f); + f++; + break; + case 'd': case 'i': { + intmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[D2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len); + s = d2s(val, (plus_plus ? '+' : (plus_space ? + ' ' : '-')), buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'o': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[O2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len | 0x80); + s = o2s(val, alt_form, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'u': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[U2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len | 0x80); + s = u2s(val, 10, false, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'x': case 'X': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[X2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len | 0x80); + s = x2s(val, alt_form, *f == 'X', buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'c': { + unsigned char val; + char buf[2]; + + assert(len == '?' || len == 'l'); + assert_not_implemented(len != 'l'); + val = va_arg(ap, int); + buf[0] = val; + buf[1] = '\0'; + APPEND_PADDED_S(buf, 1, width, left_justify); + f++; + break; + } case 's': + assert(len == '?' || len == 'l'); + assert_not_implemented(len != 'l'); + s = va_arg(ap, char *); + slen = (prec < 0) ? strlen(s) : (size_t)prec; + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + case 'p': { + uintmax_t val; + char buf[X2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, 'p'); + s = x2s(val, true, false, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } default: not_reached(); + } + break; + } default: { + APPEND_C(*f); + f++; + break; + }} + } + label_out: + if (i < size) + str[i] = '\0'; + else + str[size - 1] = '\0'; + ret = i; + +#undef APPEND_C +#undef APPEND_S +#undef APPEND_PADDED_S +#undef GET_ARG_NUMERIC + return (ret); +} + +JEMALLOC_FORMAT_PRINTF(3, 4) +int +malloc_snprintf(char *str, size_t size, const char *format, ...) +{ + int ret; + va_list ap; + + va_start(ap, format); + ret = malloc_vsnprintf(str, size, format, ap); + va_end(ap); + + return (ret); +} + +void +malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap) +{ + char buf[MALLOC_PRINTF_BUFSIZE]; + + if (write_cb == NULL) { + /* + * The caller did not provide an alternate write_cb callback + * function, so use the default one. malloc_write() is an + * inline function, so use malloc_message() directly here. + */ + write_cb = (je_malloc_message != NULL) ? je_malloc_message : + wrtmessage; + cbopaque = NULL; + } + + malloc_vsnprintf(buf, sizeof(buf), format, ap); + write_cb(cbopaque, buf); +} + +/* + * Print to a callback function in such a way as to (hopefully) avoid memory + * allocation. + */ +JEMALLOC_FORMAT_PRINTF(3, 4) +void +malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(write_cb, cbopaque, format, ap); + va_end(ap); +} + +/* Print to stderr in such a way as to avoid memory allocation. */ +JEMALLOC_FORMAT_PRINTF(1, 2) +void +malloc_printf(const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); +} diff --git a/redis.submodule/jemalloc/src/valgrind.c b/redis.submodule/jemalloc/src/valgrind.c new file mode 100644 index 0000000..8e7ef3a --- /dev/null +++ b/redis.submodule/jemalloc/src/valgrind.c @@ -0,0 +1,34 @@ +#include "jemalloc/internal/jemalloc_internal.h" +#ifndef JEMALLOC_VALGRIND +# error "This source file is for Valgrind integration." +#endif + +#include + +void +valgrind_make_mem_noaccess(void *ptr, size_t usize) +{ + + VALGRIND_MAKE_MEM_NOACCESS(ptr, usize); +} + +void +valgrind_make_mem_undefined(void *ptr, size_t usize) +{ + + VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize); +} + +void +valgrind_make_mem_defined(void *ptr, size_t usize) +{ + + VALGRIND_MAKE_MEM_DEFINED(ptr, usize); +} + +void +valgrind_freelike_block(void *ptr, size_t usize) +{ + + VALGRIND_FREELIKE_BLOCK(ptr, usize); +} diff --git a/redis.submodule/jemalloc/src/zone.c b/redis.submodule/jemalloc/src/zone.c new file mode 100644 index 0000000..12e1734 --- /dev/null +++ b/redis.submodule/jemalloc/src/zone.c @@ -0,0 +1,274 @@ +#include "jemalloc/internal/jemalloc_internal.h" +#ifndef JEMALLOC_ZONE +# error "This source file is for zones on Darwin (OS X)." +#endif + +/* + * The malloc_default_purgeable_zone function is only available on >= 10.6. + * We need to check whether it is present at runtime, thus the weak_import. + */ +extern malloc_zone_t *malloc_default_purgeable_zone(void) +JEMALLOC_ATTR(weak_import); + +/******************************************************************************/ +/* Data. */ + +static malloc_zone_t zone; +static struct malloc_introspection_t zone_introspect; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static size_t zone_size(malloc_zone_t *zone, void *ptr); +static void *zone_malloc(malloc_zone_t *zone, size_t size); +static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); +static void *zone_valloc(malloc_zone_t *zone, size_t size); +static void zone_free(malloc_zone_t *zone, void *ptr); +static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); +#if (JEMALLOC_ZONE_VERSION >= 5) +static void *zone_memalign(malloc_zone_t *zone, size_t alignment, +#endif +#if (JEMALLOC_ZONE_VERSION >= 6) + size_t size); +static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, + size_t size); +#endif +static void *zone_destroy(malloc_zone_t *zone); +static size_t zone_good_size(malloc_zone_t *zone, size_t size); +static void zone_force_lock(malloc_zone_t *zone); +static void zone_force_unlock(malloc_zone_t *zone); + +/******************************************************************************/ +/* + * Functions. + */ + +static size_t +zone_size(malloc_zone_t *zone, void *ptr) +{ + + /* + * There appear to be places within Darwin (such as setenv(3)) that + * cause calls to this function with pointers that *no* zone owns. If + * we knew that all pointers were owned by *some* zone, we could split + * our zone into two parts, and use one as the default allocator and + * the other as the default deallocator/reallocator. Since that will + * not work in practice, we must check all pointers to assure that they + * reside within a mapped chunk before determining size. + */ + return (ivsalloc(ptr, config_prof)); +} + +static void * +zone_malloc(malloc_zone_t *zone, size_t size) +{ + + return (je_malloc(size)); +} + +static void * +zone_calloc(malloc_zone_t *zone, size_t num, size_t size) +{ + + return (je_calloc(num, size)); +} + +static void * +zone_valloc(malloc_zone_t *zone, size_t size) +{ + void *ret = NULL; /* Assignment avoids useless compiler warning. */ + + je_posix_memalign(&ret, PAGE, size); + + return (ret); +} + +static void +zone_free(malloc_zone_t *zone, void *ptr) +{ + + if (ivsalloc(ptr, config_prof) != 0) { + je_free(ptr); + return; + } + + free(ptr); +} + +static void * +zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) +{ + + if (ivsalloc(ptr, config_prof) != 0) + return (je_realloc(ptr, size)); + + return (realloc(ptr, size)); +} + +#if (JEMALLOC_ZONE_VERSION >= 5) +static void * +zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) +{ + void *ret = NULL; /* Assignment avoids useless compiler warning. */ + + je_posix_memalign(&ret, alignment, size); + + return (ret); +} +#endif + +#if (JEMALLOC_ZONE_VERSION >= 6) +static void +zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) +{ + + if (ivsalloc(ptr, config_prof) != 0) { + assert(ivsalloc(ptr, config_prof) == size); + je_free(ptr); + return; + } + + free(ptr); +} +#endif + +static void * +zone_destroy(malloc_zone_t *zone) +{ + + /* This function should never be called. */ + not_reached(); + return (NULL); +} + +static size_t +zone_good_size(malloc_zone_t *zone, size_t size) +{ + + if (size == 0) + size = 1; + return (s2u(size)); +} + +static void +zone_force_lock(malloc_zone_t *zone) +{ + + if (isthreaded) + jemalloc_prefork(); +} + +static void +zone_force_unlock(malloc_zone_t *zone) +{ + + if (isthreaded) + jemalloc_postfork_parent(); +} + +JEMALLOC_ATTR(constructor) +void +register_zone(void) +{ + + /* + * If something else replaced the system default zone allocator, don't + * register jemalloc's. + */ + malloc_zone_t *default_zone = malloc_default_zone(); + malloc_zone_t *purgeable_zone = NULL; + if (!default_zone->zone_name || + strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { + return; + } + + zone.size = (void *)zone_size; + zone.malloc = (void *)zone_malloc; + zone.calloc = (void *)zone_calloc; + zone.valloc = (void *)zone_valloc; + zone.free = (void *)zone_free; + zone.realloc = (void *)zone_realloc; + zone.destroy = (void *)zone_destroy; + zone.zone_name = "jemalloc_zone"; + zone.batch_malloc = NULL; + zone.batch_free = NULL; + zone.introspect = &zone_introspect; + zone.version = JEMALLOC_ZONE_VERSION; +#if (JEMALLOC_ZONE_VERSION >= 5) + zone.memalign = zone_memalign; +#endif +#if (JEMALLOC_ZONE_VERSION >= 6) + zone.free_definite_size = zone_free_definite_size; +#endif +#if (JEMALLOC_ZONE_VERSION >= 8) + zone.pressure_relief = NULL; +#endif + + zone_introspect.enumerator = NULL; + zone_introspect.good_size = (void *)zone_good_size; + zone_introspect.check = NULL; + zone_introspect.print = NULL; + zone_introspect.log = NULL; + zone_introspect.force_lock = (void *)zone_force_lock; + zone_introspect.force_unlock = (void *)zone_force_unlock; + zone_introspect.statistics = NULL; +#if (JEMALLOC_ZONE_VERSION >= 6) + zone_introspect.zone_locked = NULL; +#endif +#if (JEMALLOC_ZONE_VERSION >= 7) + zone_introspect.enable_discharge_checking = NULL; + zone_introspect.disable_discharge_checking = NULL; + zone_introspect.discharge = NULL; +#ifdef __BLOCKS__ + zone_introspect.enumerate_discharged_pointers = NULL; +#else + zone_introspect.enumerate_unavailable_without_blocks = NULL; +#endif +#endif + + /* + * The default purgeable zone is created lazily by OSX's libc. It uses + * the default zone when it is created for "small" allocations + * (< 15 KiB), but assumes the default zone is a scalable_zone. This + * obviously fails when the default zone is the jemalloc zone, so + * malloc_default_purgeable_zone is called beforehand so that the + * default purgeable zone is created when the default zone is still + * a scalable_zone. As purgeable zones only exist on >= 10.6, we need + * to check for the existence of malloc_default_purgeable_zone() at + * run time. + */ + if (malloc_default_purgeable_zone != NULL) + purgeable_zone = malloc_default_purgeable_zone(); + + /* Register the custom zone. At this point it won't be the default. */ + malloc_zone_register(&zone); + + do { + default_zone = malloc_default_zone(); + /* + * Unregister and reregister the default zone. On OSX >= 10.6, + * unregistering takes the last registered zone and places it + * at the location of the specified zone. Unregistering the + * default zone thus makes the last registered one the default. + * On OSX < 10.6, unregistering shifts all registered zones. + * The first registered zone then becomes the default. + */ + malloc_zone_unregister(default_zone); + malloc_zone_register(default_zone); + /* + * On OSX 10.6, having the default purgeable zone appear before + * the default zone makes some things crash because it thinks it + * owns the default zone allocated pointers. We thus + * unregister/re-register it in order to ensure it's always + * after the default zone. On OSX < 10.6, there is no purgeable + * zone, so this does nothing. On OSX >= 10.6, unregistering + * replaces the purgeable zone with the last registered zone + * above, i.e. the default zone. Registering it again then puts + * it at the end, obviously after the default zone. + */ + if (purgeable_zone) { + malloc_zone_unregister(purgeable_zone); + malloc_zone_register(purgeable_zone); + } + } while (malloc_default_zone() != &zone); +} diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-alti.h b/redis.submodule/jemalloc/test/include/test/SFMT-alti.h new file mode 100644 index 0000000..0005df6 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-alti.h @@ -0,0 +1,186 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT-alti.h + * + * @brief SIMD oriented Fast Mersenne Twister(SFMT) + * pseudorandom number generator + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software. + * see LICENSE.txt + */ + +#ifndef SFMT_ALTI_H +#define SFMT_ALTI_H + +/** + * This function represents the recursion formula in AltiVec and BIG ENDIAN. + * @param a a 128-bit part of the interal state array + * @param b a 128-bit part of the interal state array + * @param c a 128-bit part of the interal state array + * @param d a 128-bit part of the interal state array + * @return output + */ +JEMALLOC_ALWAYS_INLINE +vector unsigned int vec_recursion(vector unsigned int a, + vector unsigned int b, + vector unsigned int c, + vector unsigned int d) { + + const vector unsigned int sl1 = ALTI_SL1; + const vector unsigned int sr1 = ALTI_SR1; +#ifdef ONLY64 + const vector unsigned int mask = ALTI_MSK64; + const vector unsigned char perm_sl = ALTI_SL2_PERM64; + const vector unsigned char perm_sr = ALTI_SR2_PERM64; +#else + const vector unsigned int mask = ALTI_MSK; + const vector unsigned char perm_sl = ALTI_SL2_PERM; + const vector unsigned char perm_sr = ALTI_SR2_PERM; +#endif + vector unsigned int v, w, x, y, z; + x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl); + v = a; + y = vec_sr(b, sr1); + z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr); + w = vec_sl(d, sl1); + z = vec_xor(z, w); + y = vec_and(y, mask); + v = vec_xor(v, x); + z = vec_xor(z, y); + z = vec_xor(z, v); + return z; +} + +/** + * This function fills the internal state array with pseudorandom + * integers. + */ +JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { + int i; + vector unsigned int r, r1, r2; + + r1 = ctx->sfmt[N - 2].s; + r2 = ctx->sfmt[N - 1].s; + for (i = 0; i < N - POS1; i++) { + r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); + ctx->sfmt[i].s = r; + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2); + ctx->sfmt[i].s = r; + r1 = r2; + r2 = r; + } +} + +/** + * This function fills the user-specified array with pseudorandom + * integers. + * + * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param size number of 128-bit pesudorandom numbers to be generated. + */ +JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + vector unsigned int r, r1, r2; + + r1 = ctx->sfmt[N - 2].s; + r2 = ctx->sfmt[N - 1].s; + for (i = 0; i < N - POS1; i++) { + r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + /* main loop */ + for (; i < size - N; i++) { + r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + r1 = r2; + r2 = r; + } + for (j = 0; j < 2 * N - size; j++) { + ctx->sfmt[j].s = array[j + size - N].s; + } + for (; i < size; i++) { + r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); + array[i].s = r; + ctx->sfmt[j++].s = r; + r1 = r2; + r2 = r; + } +} + +#ifndef ONLY64 +#if defined(__APPLE__) +#define ALTI_SWAP (vector unsigned char) \ + (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11) +#else +#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} +#endif +/** + * This function swaps high and low 32-bit of 64-bit integers in user + * specified array. + * + * @param array an 128-bit array to be swaped. + * @param size size of 128-bit array. + */ +JEMALLOC_INLINE void swap(w128_t *array, int size) { + int i; + const vector unsigned char perm = ALTI_SWAP; + + for (i = 0; i < size; i++) { + array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm); + } +} +#endif + +#endif diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-params.h b/redis.submodule/jemalloc/test/include/test/SFMT-params.h new file mode 100644 index 0000000..ade6622 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-params.h @@ -0,0 +1,132 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS_H +#define SFMT_PARAMS_H + +#if !defined(MEXP) +#ifdef __GNUC__ + #warning "MEXP is not defined. I assume MEXP is 19937." +#endif + #define MEXP 19937 +#endif +/*----------------- + BASIC DEFINITIONS + -----------------*/ +/** Mersenne Exponent. The period of the sequence + * is a multiple of 2^MEXP-1. + * #define MEXP 19937 */ +/** SFMT generator has an internal state array of 128-bit integers, + * and N is its size. */ +#define N (MEXP / 128 + 1) +/** N32 is the size of internal state array when regarded as an array + * of 32-bit integers.*/ +#define N32 (N * 4) +/** N64 is the size of internal state array when regarded as an array + * of 64-bit integers.*/ +#define N64 (N * 2) + +/*---------------------- + the parameters of SFMT + following definitions are in paramsXXXX.h file. + ----------------------*/ +/** the pick up position of the array. +#define POS1 122 +*/ + +/** the parameter of shift left as four 32-bit registers. +#define SL1 18 + */ + +/** the parameter of shift left as one 128-bit register. + * The 128-bit integer is shifted by (SL2 * 8) bits. +#define SL2 1 +*/ + +/** the parameter of shift right as four 32-bit registers. +#define SR1 11 +*/ + +/** the parameter of shift right as one 128-bit register. + * The 128-bit integer is shifted by (SL2 * 8) bits. +#define SR2 1 +*/ + +/** A bitmask, used in the recursion. These parameters are introduced + * to break symmetry of SIMD. +#define MSK1 0xdfffffefU +#define MSK2 0xddfecb7fU +#define MSK3 0xbffaffffU +#define MSK4 0xbffffff6U +*/ + +/** These definitions are part of a 128-bit period certification vector. +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0xc98e126aU +*/ + +#if MEXP == 607 + #include "test/SFMT-params607.h" +#elif MEXP == 1279 + #include "test/SFMT-params1279.h" +#elif MEXP == 2281 + #include "test/SFMT-params2281.h" +#elif MEXP == 4253 + #include "test/SFMT-params4253.h" +#elif MEXP == 11213 + #include "test/SFMT-params11213.h" +#elif MEXP == 19937 + #include "test/SFMT-params19937.h" +#elif MEXP == 44497 + #include "test/SFMT-params44497.h" +#elif MEXP == 86243 + #include "test/SFMT-params86243.h" +#elif MEXP == 132049 + #include "test/SFMT-params132049.h" +#elif MEXP == 216091 + #include "test/SFMT-params216091.h" +#else +#ifdef __GNUC__ + #error "MEXP is not valid." + #undef MEXP +#else + #undef MEXP +#endif + +#endif + +#endif /* SFMT_PARAMS_H */ diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-params11213.h b/redis.submodule/jemalloc/test/include/test/SFMT-params11213.h new file mode 100644 index 0000000..2994bd2 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-params11213.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS11213_H +#define SFMT_PARAMS11213_H + +#define POS1 68 +#define SL1 14 +#define SL2 3 +#define SR1 7 +#define SR2 3 +#define MSK1 0xeffff7fbU +#define MSK2 0xffffffefU +#define MSK3 0xdfdfbfffU +#define MSK4 0x7fffdbfdU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xe8148000U +#define PARITY4 0xd0c7afa3U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} + #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} +#endif /* For OSX */ +#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd" + +#endif /* SFMT_PARAMS11213_H */ diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-params1279.h b/redis.submodule/jemalloc/test/include/test/SFMT-params1279.h new file mode 100644 index 0000000..d7959f9 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-params1279.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS1279_H +#define SFMT_PARAMS1279_H + +#define POS1 7 +#define SL1 14 +#define SL2 3 +#define SR1 5 +#define SR2 1 +#define MSK1 0xf7fefffdU +#define MSK2 0x7fefcfffU +#define MSK3 0xaff3ef3fU +#define MSK4 0xb5ffff7fU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x20000000U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f" + +#endif /* SFMT_PARAMS1279_H */ diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-params132049.h b/redis.submodule/jemalloc/test/include/test/SFMT-params132049.h new file mode 100644 index 0000000..a1dcec3 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-params132049.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS132049_H +#define SFMT_PARAMS132049_H + +#define POS1 110 +#define SL1 19 +#define SL2 1 +#define SR1 21 +#define SR2 1 +#define MSK1 0xffffbb5fU +#define MSK2 0xfb6ebf95U +#define MSK3 0xfffefffaU +#define MSK4 0xcff77fffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xcb520000U +#define PARITY4 0xc7e91c7dU + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff" + +#endif /* SFMT_PARAMS132049_H */ diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-params19937.h b/redis.submodule/jemalloc/test/include/test/SFMT-params19937.h new file mode 100644 index 0000000..fb92b4c --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-params19937.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS19937_H +#define SFMT_PARAMS19937_H + +#define POS1 122 +#define SL1 18 +#define SL2 1 +#define SR1 11 +#define SR2 1 +#define MSK1 0xdfffffefU +#define MSK2 0xddfecb7fU +#define MSK3 0xbffaffffU +#define MSK4 0xbffffff6U +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x13c9e684U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6" + +#endif /* SFMT_PARAMS19937_H */ diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-params216091.h b/redis.submodule/jemalloc/test/include/test/SFMT-params216091.h new file mode 100644 index 0000000..125ce28 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-params216091.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS216091_H +#define SFMT_PARAMS216091_H + +#define POS1 627 +#define SL1 11 +#define SL2 3 +#define SR1 10 +#define SR2 1 +#define MSK1 0xbff7bff7U +#define MSK2 0xbfffffffU +#define MSK3 0xbffffa7fU +#define MSK4 0xffddfbfbU +#define PARITY1 0xf8000001U +#define PARITY2 0x89e80709U +#define PARITY3 0x3bd2b64bU +#define PARITY4 0x0c64b1e4U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb" + +#endif /* SFMT_PARAMS216091_H */ diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-params2281.h b/redis.submodule/jemalloc/test/include/test/SFMT-params2281.h new file mode 100644 index 0000000..0ef85c4 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-params2281.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS2281_H +#define SFMT_PARAMS2281_H + +#define POS1 12 +#define SL1 19 +#define SL2 1 +#define SR1 5 +#define SR2 1 +#define MSK1 0xbff7ffbfU +#define MSK2 0xfdfffffeU +#define MSK3 0xf7ffef7fU +#define MSK4 0xf2f7cbbfU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x41dfa600U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf" + +#endif /* SFMT_PARAMS2281_H */ diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-params4253.h b/redis.submodule/jemalloc/test/include/test/SFMT-params4253.h new file mode 100644 index 0000000..9f07bc6 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-params4253.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS4253_H +#define SFMT_PARAMS4253_H + +#define POS1 17 +#define SL1 20 +#define SL2 1 +#define SR1 7 +#define SR2 1 +#define MSK1 0x9f7bffffU +#define MSK2 0x9fffff5fU +#define MSK3 0x3efffffbU +#define MSK4 0xfffff7bbU +#define PARITY1 0xa8000001U +#define PARITY2 0xaf5390a3U +#define PARITY3 0xb740b3f8U +#define PARITY4 0x6c11486dU + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} + #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb" + +#endif /* SFMT_PARAMS4253_H */ diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-params44497.h b/redis.submodule/jemalloc/test/include/test/SFMT-params44497.h new file mode 100644 index 0000000..85598fe --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-params44497.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS44497_H +#define SFMT_PARAMS44497_H + +#define POS1 330 +#define SL1 5 +#define SL2 3 +#define SR1 9 +#define SR2 3 +#define MSK1 0xeffffffbU +#define MSK2 0xdfbebfffU +#define MSK3 0xbfbf7befU +#define MSK4 0x9ffd7bffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0xa3ac4000U +#define PARITY4 0xecc1327aU + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} + #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} +#endif /* For OSX */ +#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff" + +#endif /* SFMT_PARAMS44497_H */ diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-params607.h b/redis.submodule/jemalloc/test/include/test/SFMT-params607.h new file mode 100644 index 0000000..bc76485 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-params607.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS607_H +#define SFMT_PARAMS607_H + +#define POS1 2 +#define SL1 15 +#define SL2 3 +#define SR1 13 +#define SR2 3 +#define MSK1 0xfdff37ffU +#define MSK2 0xef7f3f7dU +#define MSK3 0xff777b7dU +#define MSK4 0x7ff7fb2fU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0x5986f054U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) + #define ALTI_SR2_PERM \ + (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} + #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} + #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} + #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} +#endif /* For OSX */ +#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f" + +#endif /* SFMT_PARAMS607_H */ diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-params86243.h b/redis.submodule/jemalloc/test/include/test/SFMT-params86243.h new file mode 100644 index 0000000..5e4d783 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-params86243.h @@ -0,0 +1,81 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef SFMT_PARAMS86243_H +#define SFMT_PARAMS86243_H + +#define POS1 366 +#define SL1 6 +#define SL2 7 +#define SR1 19 +#define SR2 1 +#define MSK1 0xfdbffbffU +#define MSK2 0xbff7ff3fU +#define MSK3 0xfd77efffU +#define MSK4 0xbf9ff3ffU +#define PARITY1 0x00000001U +#define PARITY2 0x00000000U +#define PARITY3 0x00000000U +#define PARITY4 0xe9528d85U + + +/* PARAMETERS FOR ALTIVEC */ +#if defined(__APPLE__) /* For OSX */ + #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) + #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) + #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) + #define ALTI_MSK64 \ + (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) + #define ALTI_SL2_PERM \ + (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6) + #define ALTI_SL2_PERM64 \ + (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6) + #define ALTI_SR2_PERM \ + (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) + #define ALTI_SR2_PERM64 \ + (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) +#else /* For OTHER OSs(Linux?) */ + #define ALTI_SL1 {SL1, SL1, SL1, SL1} + #define ALTI_SR1 {SR1, SR1, SR1, SR1} + #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} + #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} + #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6} + #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6} + #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} + #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} +#endif /* For OSX */ +#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff" + +#endif /* SFMT_PARAMS86243_H */ diff --git a/redis.submodule/jemalloc/test/include/test/SFMT-sse2.h b/redis.submodule/jemalloc/test/include/test/SFMT-sse2.h new file mode 100644 index 0000000..0314a16 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT-sse2.h @@ -0,0 +1,157 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT-sse2.h + * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2 + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * @note We assume LITTLE ENDIAN in this file + * + * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software, see LICENSE.txt + */ + +#ifndef SFMT_SSE2_H +#define SFMT_SSE2_H + +/** + * This function represents the recursion formula. + * @param a a 128-bit part of the interal state array + * @param b a 128-bit part of the interal state array + * @param c a 128-bit part of the interal state array + * @param d a 128-bit part of the interal state array + * @param mask 128-bit mask + * @return output + */ +JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, + __m128i c, __m128i d, __m128i mask) { + __m128i v, x, y, z; + + x = _mm_load_si128(a); + y = _mm_srli_epi32(*b, SR1); + z = _mm_srli_si128(c, SR2); + v = _mm_slli_epi32(d, SL1); + z = _mm_xor_si128(z, x); + z = _mm_xor_si128(z, v); + x = _mm_slli_si128(x, SL2); + y = _mm_and_si128(y, mask); + z = _mm_xor_si128(z, x); + z = _mm_xor_si128(z, y); + return z; +} + +/** + * This function fills the internal state array with pseudorandom + * integers. + */ +JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { + int i; + __m128i r, r1, r2, mask; + mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); + + r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); + r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); + for (i = 0; i < N - POS1; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, + mask); + _mm_store_si128(&ctx->sfmt[i].si, r); + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&ctx->sfmt[i].si, r); + r1 = r2; + r2 = r; + } +} + +/** + * This function fills the user-specified array with pseudorandom + * integers. + * + * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param size number of 128-bit pesudorandom numbers to be generated. + */ +JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + __m128i r, r1, r2, mask; + mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); + + r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); + r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); + for (i = 0; i < N - POS1; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + for (; i < N; i++) { + r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + /* main loop */ + for (; i < size - N; i++) { + r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + r1 = r2; + r2 = r; + } + for (j = 0; j < 2 * N - size; j++) { + r = _mm_load_si128(&array[j + size - N].si); + _mm_store_si128(&ctx->sfmt[j].si, r); + } + for (; i < size; i++) { + r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, + mask); + _mm_store_si128(&array[i].si, r); + _mm_store_si128(&ctx->sfmt[j++].si, r); + r1 = r2; + r2 = r; + } +} + +#endif diff --git a/redis.submodule/jemalloc/test/include/test/SFMT.h b/redis.submodule/jemalloc/test/include/test/SFMT.h new file mode 100644 index 0000000..09c1607 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/SFMT.h @@ -0,0 +1,171 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT.h + * + * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom + * number generator + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software. + * see LICENSE.txt + * + * @note We assume that your system has inttypes.h. If your system + * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t, + * and you have to define PRIu64 and PRIx64 in this file as follows: + * @verbatim + typedef unsigned int uint32_t + typedef unsigned long long uint64_t + #define PRIu64 "llu" + #define PRIx64 "llx" +@endverbatim + * uint32_t must be exactly 32-bit unsigned integer type (no more, no + * less), and uint64_t must be exactly 64-bit unsigned integer type. + * PRIu64 and PRIx64 are used for printf function to print 64-bit + * unsigned int and 64-bit unsigned int in hexadecimal format. + */ + +#ifndef SFMT_H +#define SFMT_H + +typedef struct sfmt_s sfmt_t; + +uint32_t gen_rand32(sfmt_t *ctx); +uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit); +uint64_t gen_rand64(sfmt_t *ctx); +uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit); +void fill_array32(sfmt_t *ctx, uint32_t *array, int size); +void fill_array64(sfmt_t *ctx, uint64_t *array, int size); +sfmt_t *init_gen_rand(uint32_t seed); +sfmt_t *init_by_array(uint32_t *init_key, int key_length); +void fini_gen_rand(sfmt_t *ctx); +const char *get_idstring(void); +int get_min_array_size32(void); +int get_min_array_size64(void); + +#ifndef JEMALLOC_ENABLE_INLINE +double to_real1(uint32_t v); +double genrand_real1(sfmt_t *ctx); +double to_real2(uint32_t v); +double genrand_real2(sfmt_t *ctx); +double to_real3(uint32_t v); +double genrand_real3(sfmt_t *ctx); +double to_res53(uint64_t v); +double to_res53_mix(uint32_t x, uint32_t y); +double genrand_res53(sfmt_t *ctx); +double genrand_res53_mix(sfmt_t *ctx); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_)) +/* These real versions are due to Isaku Wada */ +/** generates a random number on [0,1]-real-interval */ +JEMALLOC_INLINE double to_real1(uint32_t v) +{ + return v * (1.0/4294967295.0); + /* divided by 2^32-1 */ +} + +/** generates a random number on [0,1]-real-interval */ +JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx) +{ + return to_real1(gen_rand32(ctx)); +} + +/** generates a random number on [0,1)-real-interval */ +JEMALLOC_INLINE double to_real2(uint32_t v) +{ + return v * (1.0/4294967296.0); + /* divided by 2^32 */ +} + +/** generates a random number on [0,1)-real-interval */ +JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx) +{ + return to_real2(gen_rand32(ctx)); +} + +/** generates a random number on (0,1)-real-interval */ +JEMALLOC_INLINE double to_real3(uint32_t v) +{ + return (((double)v) + 0.5)*(1.0/4294967296.0); + /* divided by 2^32 */ +} + +/** generates a random number on (0,1)-real-interval */ +JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx) +{ + return to_real3(gen_rand32(ctx)); +} +/** These real versions are due to Isaku Wada */ + +/** generates a random number on [0,1) with 53-bit resolution*/ +JEMALLOC_INLINE double to_res53(uint64_t v) +{ + return v * (1.0/18446744073709551616.0L); +} + +/** generates a random number on [0,1) with 53-bit resolution from two + * 32 bit integers */ +JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y) +{ + return to_res53(x | ((uint64_t)y << 32)); +} + +/** generates a random number on [0,1) with 53-bit resolution + */ +JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx) +{ + return to_res53(gen_rand64(ctx)); +} + +/** generates a random number on [0,1) with 53-bit resolution + using 32bit integer. + */ +JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) +{ + uint32_t x, y; + + x = gen_rand32(ctx); + y = gen_rand32(ctx); + return to_res53_mix(x, y); +} +#endif +#endif diff --git a/redis.submodule/jemalloc/test/include/test/btalloc.h b/redis.submodule/jemalloc/test/include/test/btalloc.h new file mode 100644 index 0000000..c3f9d4d --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/btalloc.h @@ -0,0 +1,31 @@ +/* btalloc() provides a mechanism for allocating via permuted backtraces. */ +void *btalloc(size_t size, unsigned bits); + +#define btalloc_n_proto(n) \ +void *btalloc_##n(size_t size, unsigned bits); +btalloc_n_proto(0) +btalloc_n_proto(1) + +#define btalloc_n_gen(n) \ +void * \ +btalloc_##n(size_t size, unsigned bits) \ +{ \ + void *p; \ + \ + if (bits == 0) \ + p = mallocx(size, 0); \ + else { \ + switch (bits & 0x1U) { \ + case 0: \ + p = (btalloc_0(size, bits >> 1)); \ + break; \ + case 1: \ + p = (btalloc_1(size, bits >> 1)); \ + break; \ + default: not_reached(); \ + } \ + } \ + /* Intentionally sabotage tail call optimization. */ \ + assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ + return (p); \ +} diff --git a/redis.submodule/jemalloc/test/include/test/jemalloc_test.h.in b/redis.submodule/jemalloc/test/include/test/jemalloc_test.h.in new file mode 100644 index 0000000..455569d --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/jemalloc_test.h.in @@ -0,0 +1,151 @@ +#include +#ifndef SIZE_T_MAX +# define SIZE_T_MAX SIZE_MAX +#endif +#include +#include +#include +#include +#include +#include +#ifdef _WIN32 +# include "msvc_compat/strings.h" +#endif +#include + +#ifdef _WIN32 +# include +# include "msvc_compat/windows_extra.h" +#else +# include +#endif + +/******************************************************************************/ +/* + * Define always-enabled assertion macros, so that test assertions execute even + * if assertions are disabled in the library code. These definitions must + * exist prior to including "jemalloc/internal/util.h". + */ +#define assert(e) do { \ + if (!(e)) { \ + malloc_printf( \ + ": %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) + +#define not_reached() do { \ + malloc_printf( \ + ": %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ +} while (0) + +#define not_implemented() do { \ + malloc_printf(": %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ +} while (0) + +#define assert_not_implemented(e) do { \ + if (!(e)) \ + not_implemented(); \ +} while (0) + +#include "test/jemalloc_test_defs.h" + +#ifdef JEMALLOC_OSSPIN +# include +#endif + +#if defined(HAVE_ALTIVEC) && !defined(__APPLE__) +# include +#endif +#ifdef HAVE_SSE2 +# include +#endif + +/******************************************************************************/ +/* + * For unit tests, expose all public and private interfaces. + */ +#ifdef JEMALLOC_UNIT_TEST +# define JEMALLOC_JET +# define JEMALLOC_MANGLE +# include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* + * For integration tests, expose the public jemalloc interfaces, but only + * expose the minimum necessary internal utility code (to avoid re-implementing + * essentially identical code within the test infrastructure). + */ +#elif defined(JEMALLOC_INTEGRATION_TEST) +# define JEMALLOC_MANGLE +# include "jemalloc/jemalloc@install_suffix@.h" +# include "jemalloc/internal/jemalloc_internal_defs.h" +# include "jemalloc/internal/jemalloc_internal_macros.h" + +# define JEMALLOC_N(n) @private_namespace@##n +# include "jemalloc/internal/private_namespace.h" + +# define JEMALLOC_H_TYPES +# define JEMALLOC_H_STRUCTS +# define JEMALLOC_H_EXTERNS +# define JEMALLOC_H_INLINES +# include "jemalloc/internal/util.h" +# include "jemalloc/internal/qr.h" +# include "jemalloc/internal/ql.h" +# undef JEMALLOC_H_TYPES +# undef JEMALLOC_H_STRUCTS +# undef JEMALLOC_H_EXTERNS +# undef JEMALLOC_H_INLINES + +/******************************************************************************/ +/* + * For stress tests, expose the public jemalloc interfaces with name mangling + * so that they can be tested as e.g. malloc() and free(). Also expose the + * public jemalloc interfaces with jet_ prefixes, so that stress tests can use + * a separate allocator for their internal data structures. + */ +#elif defined(JEMALLOC_STRESS_TEST) +# include "jemalloc/jemalloc@install_suffix@.h" + +# include "jemalloc/jemalloc_protos_jet.h" + +# define JEMALLOC_JET +# include "jemalloc/internal/jemalloc_internal.h" +# include "jemalloc/internal/public_unnamespace.h" +# undef JEMALLOC_JET + +# include "jemalloc/jemalloc_rename.h" +# define JEMALLOC_MANGLE +# ifdef JEMALLOC_STRESS_TESTLIB +# include "jemalloc/jemalloc_mangle_jet.h" +# else +# include "jemalloc/jemalloc_mangle.h" +# endif + +/******************************************************************************/ +/* + * This header does dangerous things, the effects of which only test code + * should be subject to. + */ +#else +# error "This header cannot be included outside a testing context" +#endif + +/******************************************************************************/ +/* + * Common test utilities. + */ +#include "test/btalloc.h" +#include "test/math.h" +#include "test/mtx.h" +#include "test/mq.h" +#include "test/test.h" +#include "test/timer.h" +#include "test/thd.h" +#define MEXP 19937 +#include "test/SFMT.h" diff --git a/redis.submodule/jemalloc/test/include/test/jemalloc_test_defs.h.in b/redis.submodule/jemalloc/test/include/test/jemalloc_test_defs.h.in new file mode 100644 index 0000000..5cc8532 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/jemalloc_test_defs.h.in @@ -0,0 +1,9 @@ +#include "jemalloc/internal/jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +/* + * For use by SFMT. configure.ac doesn't actually define HAVE_SSE2 because its + * dependencies are notoriously unportable in practice. + */ +#undef HAVE_SSE2 +#undef HAVE_ALTIVEC diff --git a/redis.submodule/jemalloc/test/include/test/math.h b/redis.submodule/jemalloc/test/include/test/math.h new file mode 100644 index 0000000..b057b29 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/math.h @@ -0,0 +1,311 @@ +#ifndef JEMALLOC_ENABLE_INLINE +double ln_gamma(double x); +double i_gamma(double x, double p, double ln_gamma_p); +double pt_norm(double p); +double pt_chi2(double p, double df, double ln_gamma_df_2); +double pt_gamma(double p, double shape, double scale, double ln_gamma_shape); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_)) +/* + * Compute the natural log of Gamma(x), accurate to 10 decimal places. + * + * This implementation is based on: + * + * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function + * [S14]. Communications of the ACM 9(9):684. + */ +JEMALLOC_INLINE double +ln_gamma(double x) +{ + double f, z; + + assert(x > 0.0); + + if (x < 7.0) { + f = 1.0; + z = x; + while (z < 7.0) { + f *= z; + z += 1.0; + } + x = z; + f = -log(f); + } else + f = 0.0; + + z = 1.0 / (x * x); + + return (f + (x-0.5) * log(x) - x + 0.918938533204673 + + (((-0.000595238095238 * z + 0.000793650793651) * z - + 0.002777777777778) * z + 0.083333333333333) / x); +} + +/* + * Compute the incomplete Gamma ratio for [0..x], where p is the shape + * parameter, and ln_gamma_p is ln_gamma(p). + * + * This implementation is based on: + * + * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral. + * Applied Statistics 19:285-287. + */ +JEMALLOC_INLINE double +i_gamma(double x, double p, double ln_gamma_p) +{ + double acu, factor, oflo, gin, term, rn, a, b, an, dif; + double pn[6]; + unsigned i; + + assert(p > 0.0); + assert(x >= 0.0); + + if (x == 0.0) + return (0.0); + + acu = 1.0e-10; + oflo = 1.0e30; + gin = 0.0; + factor = exp(p * log(x) - x - ln_gamma_p); + + if (x <= 1.0 || x < p) { + /* Calculation by series expansion. */ + gin = 1.0; + term = 1.0; + rn = p; + + while (true) { + rn += 1.0; + term *= x / rn; + gin += term; + if (term <= acu) { + gin *= factor / p; + return (gin); + } + } + } else { + /* Calculation by continued fraction. */ + a = 1.0 - p; + b = a + x + 1.0; + term = 0.0; + pn[0] = 1.0; + pn[1] = x; + pn[2] = x + 1.0; + pn[3] = x * b; + gin = pn[2] / pn[3]; + + while (true) { + a += 1.0; + b += 2.0; + term += 1.0; + an = a * term; + for (i = 0; i < 2; i++) + pn[i+4] = b * pn[i+2] - an * pn[i]; + if (pn[5] != 0.0) { + rn = pn[4] / pn[5]; + dif = fabs(gin - rn); + if (dif <= acu && dif <= acu * rn) { + gin = 1.0 - factor * gin; + return (gin); + } + gin = rn; + } + for (i = 0; i < 4; i++) + pn[i] = pn[i+2]; + + if (fabs(pn[4]) >= oflo) { + for (i = 0; i < 4; i++) + pn[i] /= oflo; + } + } + } +} + +/* + * Given a value p in [0..1] of the lower tail area of the normal distribution, + * compute the limit on the definite integral from [-inf..z] that satisfies p, + * accurate to 16 decimal places. + * + * This implementation is based on: + * + * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal + * distribution. Applied Statistics 37(3):477-484. + */ +JEMALLOC_INLINE double +pt_norm(double p) +{ + double q, r, ret; + + assert(p > 0.0 && p < 1.0); + + q = p - 0.5; + if (fabs(q) <= 0.425) { + /* p close to 1/2. */ + r = 0.180625 - q * q; + return (q * (((((((2.5090809287301226727e3 * r + + 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r + + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * + r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) + * r + 3.3871328727963666080e0) / + (((((((5.2264952788528545610e3 * r + + 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r + + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * + r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) + * r + 1.0)); + } else { + if (q < 0.0) + r = p; + else + r = 1.0 - p; + assert(r > 0.0); + + r = sqrt(-log(r)); + if (r <= 5.0) { + /* p neither close to 1/2 nor 0 or 1. */ + r -= 1.6; + ret = ((((((((7.74545014278341407640e-4 * r + + 2.27238449892691845833e-2) * r + + 2.41780725177450611770e-1) * r + + 1.27045825245236838258e0) * r + + 3.64784832476320460504e0) * r + + 5.76949722146069140550e0) * r + + 4.63033784615654529590e0) * r + + 1.42343711074968357734e0) / + (((((((1.05075007164441684324e-9 * r + + 5.47593808499534494600e-4) * r + + 1.51986665636164571966e-2) + * r + 1.48103976427480074590e-1) * r + + 6.89767334985100004550e-1) * r + + 1.67638483018380384940e0) * r + + 2.05319162663775882187e0) * r + 1.0)); + } else { + /* p near 0 or 1. */ + r -= 5.0; + ret = ((((((((2.01033439929228813265e-7 * r + + 2.71155556874348757815e-5) * r + + 1.24266094738807843860e-3) * r + + 2.65321895265761230930e-2) * r + + 2.96560571828504891230e-1) * r + + 1.78482653991729133580e0) * r + + 5.46378491116411436990e0) * r + + 6.65790464350110377720e0) / + (((((((2.04426310338993978564e-15 * r + + 1.42151175831644588870e-7) * r + + 1.84631831751005468180e-5) * r + + 7.86869131145613259100e-4) * r + + 1.48753612908506148525e-2) * r + + 1.36929880922735805310e-1) * r + + 5.99832206555887937690e-1) + * r + 1.0)); + } + if (q < 0.0) + ret = -ret; + return (ret); + } +} + +/* + * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution + * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute + * the upper limit on the definite integral from [0..z] that satisfies p, + * accurate to 12 decimal places. + * + * This implementation is based on: + * + * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of + * the Chi^2 distribution. Applied Statistics 24(3):385-388. + * + * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage + * points of the Chi^2 distribution. Applied Statistics 40(1):233-235. + */ +JEMALLOC_INLINE double +pt_chi2(double p, double df, double ln_gamma_df_2) +{ + double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6; + unsigned i; + + assert(p >= 0.0 && p < 1.0); + assert(df > 0.0); + + e = 5.0e-7; + aa = 0.6931471805; + + xx = 0.5 * df; + c = xx - 1.0; + + if (df < -1.24 * log(p)) { + /* Starting approximation for small Chi^2. */ + ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx); + if (ch - e < 0.0) + return (ch); + } else { + if (df > 0.32) { + x = pt_norm(p); + /* + * Starting approximation using Wilson and Hilferty + * estimate. + */ + p1 = 0.222222 / df; + ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0); + /* Starting approximation for p tending to 1. */ + if (ch > 2.2 * df + 6.0) { + ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) + + ln_gamma_df_2); + } + } else { + ch = 0.4; + a = log(1.0 - p); + while (true) { + q = ch; + p1 = 1.0 + ch * (4.67 + ch); + p2 = ch * (6.73 + ch * (6.66 + ch)); + t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch + * (13.32 + 3.0 * ch)) / p2; + ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + + c * aa) * p2 / p1) / t; + if (fabs(q / ch - 1.0) - 0.01 <= 0.0) + break; + } + } + } + + for (i = 0; i < 20; i++) { + /* Calculation of seven-term Taylor series. */ + q = ch; + p1 = 0.5 * ch; + if (p1 < 0.0) + return (-1.0); + p2 = p - i_gamma(p1, xx, ln_gamma_df_2); + t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); + b = t / ch; + a = 0.5 * t - b * c; + s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 + + 60.0 * a))))) / 420.0; + s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 * + a)))) / 2520.0; + s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0; + s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a * + (889.0 + 1740.0 * a))) / 5040.0; + s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0; + s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; + ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 + - b * (s4 - b * (s5 - b * s6)))))); + if (fabs(q / ch - 1.0) <= e) + break; + } + + return (ch); +} + +/* + * Given a value p in [0..1] and Gamma distribution shape and scale parameters, + * compute the upper limit on the definite integral from [0..z] that satisfies + * p. + */ +JEMALLOC_INLINE double +pt_gamma(double p, double shape, double scale, double ln_gamma_shape) +{ + + return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale); +} +#endif diff --git a/redis.submodule/jemalloc/test/include/test/mq.h b/redis.submodule/jemalloc/test/include/test/mq.h new file mode 100644 index 0000000..7c4df49 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/mq.h @@ -0,0 +1,109 @@ +void mq_nanosleep(unsigned ns); + +/* + * Simple templated message queue implementation that relies on only mutexes for + * synchronization (which reduces portability issues). Given the following + * setup: + * + * typedef struct mq_msg_s mq_msg_t; + * struct mq_msg_s { + * mq_msg(mq_msg_t) link; + * [message data] + * }; + * mq_gen(, mq_, mq_t, mq_msg_t, link) + * + * The API is as follows: + * + * bool mq_init(mq_t *mq); + * void mq_fini(mq_t *mq); + * unsigned mq_count(mq_t *mq); + * mq_msg_t *mq_tryget(mq_t *mq); + * mq_msg_t *mq_get(mq_t *mq); + * void mq_put(mq_t *mq, mq_msg_t *msg); + * + * The message queue linkage embedded in each message is to be treated as + * externally opaque (no need to initialize or clean up externally). mq_fini() + * does not perform any cleanup of messages, since it knows nothing of their + * payloads. + */ +#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) + +#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ +typedef struct { \ + mtx_t lock; \ + ql_head(a_mq_msg_type) msgs; \ + unsigned count; \ +} a_mq_type; \ +a_attr bool \ +a_prefix##init(a_mq_type *mq) { \ + \ + if (mtx_init(&mq->lock)) \ + return (true); \ + ql_new(&mq->msgs); \ + mq->count = 0; \ + return (false); \ +} \ +a_attr void \ +a_prefix##fini(a_mq_type *mq) \ +{ \ + \ + mtx_fini(&mq->lock); \ +} \ +a_attr unsigned \ +a_prefix##count(a_mq_type *mq) \ +{ \ + unsigned count; \ + \ + mtx_lock(&mq->lock); \ + count = mq->count; \ + mtx_unlock(&mq->lock); \ + return (count); \ +} \ +a_attr a_mq_msg_type * \ +a_prefix##tryget(a_mq_type *mq) \ +{ \ + a_mq_msg_type *msg; \ + \ + mtx_lock(&mq->lock); \ + msg = ql_first(&mq->msgs); \ + if (msg != NULL) { \ + ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ + mq->count--; \ + } \ + mtx_unlock(&mq->lock); \ + return (msg); \ +} \ +a_attr a_mq_msg_type * \ +a_prefix##get(a_mq_type *mq) \ +{ \ + a_mq_msg_type *msg; \ + unsigned ns; \ + \ + msg = a_prefix##tryget(mq); \ + if (msg != NULL) \ + return (msg); \ + \ + ns = 1; \ + while (true) { \ + mq_nanosleep(ns); \ + msg = a_prefix##tryget(mq); \ + if (msg != NULL) \ + return (msg); \ + if (ns < 1000*1000*1000) { \ + /* Double sleep time, up to max 1 second. */ \ + ns <<= 1; \ + if (ns > 1000*1000*1000) \ + ns = 1000*1000*1000; \ + } \ + } \ +} \ +a_attr void \ +a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \ +{ \ + \ + mtx_lock(&mq->lock); \ + ql_elm_new(msg, a_field); \ + ql_tail_insert(&mq->msgs, msg, a_field); \ + mq->count++; \ + mtx_unlock(&mq->lock); \ +} diff --git a/redis.submodule/jemalloc/test/include/test/mtx.h b/redis.submodule/jemalloc/test/include/test/mtx.h new file mode 100644 index 0000000..bbe822f --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/mtx.h @@ -0,0 +1,21 @@ +/* + * mtx is a slightly simplified version of malloc_mutex. This code duplication + * is unfortunate, but there are allocator bootstrapping considerations that + * would leak into the test infrastructure if malloc_mutex were used directly + * in tests. + */ + +typedef struct { +#ifdef _WIN32 + CRITICAL_SECTION lock; +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLock lock; +#else + pthread_mutex_t lock; +#endif +} mtx_t; + +bool mtx_init(mtx_t *mtx); +void mtx_fini(mtx_t *mtx); +void mtx_lock(mtx_t *mtx); +void mtx_unlock(mtx_t *mtx); diff --git a/redis.submodule/jemalloc/test/include/test/test.h b/redis.submodule/jemalloc/test/include/test/test.h new file mode 100644 index 0000000..3cf901f --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/test.h @@ -0,0 +1,329 @@ +#define ASSERT_BUFSIZE 256 + +#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \ + t a_ = (a); \ + t b_ = (b); \ + if (!(a_ cmp b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) "#cmp" (%s) --> " \ + "%"pri" "#neg_cmp" %"pri": ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_, b_); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) + +#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ + !=, "p", __VA_ARGS__) +#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ + ==, "p", __VA_ARGS__) +#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ + !=, "p", __VA_ARGS__) +#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ + ==, "p", __VA_ARGS__) + +#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) +#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) +#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) +#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) +#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) +#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) + +#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) +#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) +#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) +#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) +#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) +#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) + +#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) +#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) +#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) +#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) +#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) +#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) + +#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) +#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) +#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) +#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) +#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) +#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) + +#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ + !=, "ld", __VA_ARGS__) +#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ + ==, "ld", __VA_ARGS__) +#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ + >=, "ld", __VA_ARGS__) +#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ + >, "ld", __VA_ARGS__) +#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ + <, "ld", __VA_ARGS__) +#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ + <=, "ld", __VA_ARGS__) + +#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ + a, b, ==, !=, "lu", __VA_ARGS__) +#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ + a, b, !=, ==, "lu", __VA_ARGS__) +#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ + a, b, <, >=, "lu", __VA_ARGS__) +#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ + a, b, <=, >, "lu", __VA_ARGS__) +#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ + a, b, >=, <, "lu", __VA_ARGS__) +#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ + a, b, >, <=, "lu", __VA_ARGS__) + +#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ + !=, "qd", __VA_ARGS__) +#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ + ==, "qd", __VA_ARGS__) +#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ + >=, "qd", __VA_ARGS__) +#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ + >, "qd", __VA_ARGS__) +#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ + <, "qd", __VA_ARGS__) +#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ + <=, "qd", __VA_ARGS__) + +#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ + a, b, ==, !=, "qu", __VA_ARGS__) +#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ + a, b, !=, ==, "qu", __VA_ARGS__) +#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ + a, b, <, >=, "qu", __VA_ARGS__) +#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ + a, b, <=, >, "qu", __VA_ARGS__) +#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ + a, b, >=, <, "qu", __VA_ARGS__) +#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ + a, b, >, <=, "qu", __VA_ARGS__) + +#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ + !=, "jd", __VA_ARGS__) +#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ + ==, "jd", __VA_ARGS__) +#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ + >=, "jd", __VA_ARGS__) +#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ + >, "jd", __VA_ARGS__) +#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ + <, "jd", __VA_ARGS__) +#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ + <=, "jd", __VA_ARGS__) + +#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ + !=, "ju", __VA_ARGS__) +#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ + ==, "ju", __VA_ARGS__) +#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ + >=, "ju", __VA_ARGS__) +#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ + >, "ju", __VA_ARGS__) +#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ + <, "ju", __VA_ARGS__) +#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ + <=, "ju", __VA_ARGS__) + +#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ + !=, "zd", __VA_ARGS__) +#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ + ==, "zd", __VA_ARGS__) +#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ + >=, "zd", __VA_ARGS__) +#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ + >, "zd", __VA_ARGS__) +#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ + <, "zd", __VA_ARGS__) +#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ + <=, "zd", __VA_ARGS__) + +#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ + !=, "zu", __VA_ARGS__) +#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ + ==, "zu", __VA_ARGS__) +#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ + >=, "zu", __VA_ARGS__) +#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ + >, "zu", __VA_ARGS__) +#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ + <, "zu", __VA_ARGS__) +#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ + <=, "zu", __VA_ARGS__) + +#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ + !=, FMTd32, __VA_ARGS__) +#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ + ==, FMTd32, __VA_ARGS__) +#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ + >=, FMTd32, __VA_ARGS__) +#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ + >, FMTd32, __VA_ARGS__) +#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ + <, FMTd32, __VA_ARGS__) +#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ + <=, FMTd32, __VA_ARGS__) + +#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ + !=, FMTu32, __VA_ARGS__) +#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ + ==, FMTu32, __VA_ARGS__) +#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ + >=, FMTu32, __VA_ARGS__) +#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ + >, FMTu32, __VA_ARGS__) +#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ + <, FMTu32, __VA_ARGS__) +#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ + <=, FMTu32, __VA_ARGS__) + +#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ + !=, FMTd64, __VA_ARGS__) +#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ + ==, FMTd64, __VA_ARGS__) +#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ + >=, FMTd64, __VA_ARGS__) +#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ + >, FMTd64, __VA_ARGS__) +#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ + <, FMTd64, __VA_ARGS__) +#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ + <=, FMTd64, __VA_ARGS__) + +#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ + !=, FMTu64, __VA_ARGS__) +#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ + ==, FMTu64, __VA_ARGS__) +#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ + >=, FMTu64, __VA_ARGS__) +#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ + >, FMTu64, __VA_ARGS__) +#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ + <, FMTu64, __VA_ARGS__) +#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ + <=, FMTu64, __VA_ARGS__) + +#define assert_b_eq(a, b, ...) do { \ + bool a_ = (a); \ + bool b_ = (b); \ + if (!(a_ == b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) == (%s) --> %s != %s: ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_ ? "true" : "false", \ + b_ ? "true" : "false"); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) +#define assert_b_ne(a, b, ...) do { \ + bool a_ = (a); \ + bool b_ = (b); \ + if (!(a_ != b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) != (%s) --> %s == %s: ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_ ? "true" : "false", \ + b_ ? "true" : "false"); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) +#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) +#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) + +#define assert_str_eq(a, b, ...) do { \ + if (strcmp((a), (b))) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) same as (%s) --> " \ + "\"%s\" differs from \"%s\": ", \ + __func__, __FILE__, __LINE__, #a, #b, a, b); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) +#define assert_str_ne(a, b, ...) do { \ + if (!strcmp((a), (b))) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) differs from (%s) --> " \ + "\"%s\" same as \"%s\": ", \ + __func__, __FILE__, __LINE__, #a, #b, a, b); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) + +#define assert_not_reached(...) do { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Unreachable code reached: ", \ + __func__, __FILE__, __LINE__); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ +} while (0) + +/* + * If this enum changes, corresponding changes in test/test.sh.in are also + * necessary. + */ +typedef enum { + test_status_pass = 0, + test_status_skip = 1, + test_status_fail = 2, + + test_status_count = 3 +} test_status_t; + +typedef void (test_t)(void); + +#define TEST_BEGIN(f) \ +static void \ +f(void) \ +{ \ + p_test_init(#f); + +#define TEST_END \ + goto label_test_end; \ +label_test_end: \ + p_test_fini(); \ +} + +#define test(...) \ + p_test(__VA_ARGS__, NULL) + +#define test_skip_if(e) do { \ + if (e) { \ + test_skip("%s:%s:%d: Test skipped: (%s)", \ + __func__, __FILE__, __LINE__, #e); \ + goto label_test_end; \ + } \ +} while (0) + +void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); +void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); + +/* For private use by macros. */ +test_status_t p_test(test_t *t, ...); +void p_test_init(const char *name); +void p_test_fini(void); +void p_test_fail(const char *prefix, const char *message); diff --git a/redis.submodule/jemalloc/test/include/test/thd.h b/redis.submodule/jemalloc/test/include/test/thd.h new file mode 100644 index 0000000..47a5126 --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/thd.h @@ -0,0 +1,9 @@ +/* Abstraction layer for threading in tests. */ +#ifdef _WIN32 +typedef HANDLE thd_t; +#else +typedef pthread_t thd_t; +#endif + +void thd_create(thd_t *thd, void *(*proc)(void *), void *arg); +void thd_join(thd_t thd, void **ret); diff --git a/redis.submodule/jemalloc/test/include/test/timer.h b/redis.submodule/jemalloc/test/include/test/timer.h new file mode 100644 index 0000000..a7fefdf --- /dev/null +++ b/redis.submodule/jemalloc/test/include/test/timer.h @@ -0,0 +1,26 @@ +/* Simple timer, for use in benchmark reporting. */ + +#include +#include + +#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \ + && _POSIX_MONOTONIC_CLOCK >= 0 + +typedef struct { +#ifdef _WIN32 + FILETIME ft0; + FILETIME ft1; +#elif JEMALLOC_CLOCK_GETTIME + struct timespec ts0; + struct timespec ts1; + int clock_id; +#else + struct timeval tv0; + struct timeval tv1; +#endif +} timedelta_t; + +void timer_start(timedelta_t *timer); +void timer_stop(timedelta_t *timer); +uint64_t timer_usec(const timedelta_t *timer); +void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen); diff --git a/redis.submodule/jemalloc/test/integration/MALLOCX_ARENA.c b/redis.submodule/jemalloc/test/integration/MALLOCX_ARENA.c new file mode 100644 index 0000000..30c203a --- /dev/null +++ b/redis.submodule/jemalloc/test/integration/MALLOCX_ARENA.c @@ -0,0 +1,69 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 10 + +static bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; + +void * +thd_start(void *arg) +{ + unsigned thread_ind = (unsigned)(uintptr_t)arg; + unsigned arena_ind; + void *p; + size_t sz; + + sz = sizeof(arena_ind); + assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, + "Error in arenas.extend"); + + if (thread_ind % 4 != 3) { + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + const char *dss_precs[] = {"disabled", "primary", "secondary"}; + unsigned prec_ind = thread_ind % + (sizeof(dss_precs)/sizeof(char*)); + const char *dss = dss_precs[prec_ind]; + int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT; + assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, + "Error in mallctlnametomib()"); + mib[1] = arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss, + sizeof(const char *)), expected_err, + "Error in mallctlbymib()"); + } + + p = mallocx(1, MALLOCX_ARENA(arena_ind)); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + dallocx(p, 0); + + return (NULL); +} + +TEST_BEGIN(test_MALLOCX_ARENA) +{ + thd_t thds[NTHREADS]; + unsigned i; + + for (i = 0; i < NTHREADS; i++) { + thd_create(&thds[i], thd_start, + (void *)(uintptr_t)i); + } + + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); +} +TEST_END + +int +main(void) +{ + + return (test( + test_MALLOCX_ARENA)); +} diff --git a/redis.submodule/jemalloc/test/integration/aligned_alloc.c b/redis.submodule/jemalloc/test/integration/aligned_alloc.c new file mode 100644 index 0000000..6090014 --- /dev/null +++ b/redis.submodule/jemalloc/test/integration/aligned_alloc.c @@ -0,0 +1,125 @@ +#include "test/jemalloc_test.h" + +#define CHUNK 0x400000 +/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ +#define MAXALIGN ((size_t)0x2000000LU) +#define NITER 4 + +TEST_BEGIN(test_alignment_errors) +{ + size_t alignment; + void *p; + + alignment = 0; + set_errno(0); + p = aligned_alloc(alignment, 1); + assert_false(p != NULL || get_errno() != EINVAL, + "Expected error for invalid alignment %zu", alignment); + + for (alignment = sizeof(size_t); alignment < MAXALIGN; + alignment <<= 1) { + set_errno(0); + p = aligned_alloc(alignment + 1, 1); + assert_false(p != NULL || get_errno() != EINVAL, + "Expected error for invalid alignment %zu", + alignment + 1); + } +} +TEST_END + +TEST_BEGIN(test_oom_errors) +{ + size_t alignment, size; + void *p; + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x8000000000000000); + size = UINT64_C(0x8000000000000000); +#else + alignment = 0x80000000LU; + size = 0x80000000LU; +#endif + set_errno(0); + p = aligned_alloc(alignment, size); + assert_false(p != NULL || get_errno() != ENOMEM, + "Expected error for aligned_alloc(%zu, %zu)", + alignment, size); + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x4000000000000000); + size = UINT64_C(0xc000000000000001); +#else + alignment = 0x40000000LU; + size = 0xc0000001LU; +#endif + set_errno(0); + p = aligned_alloc(alignment, size); + assert_false(p != NULL || get_errno() != ENOMEM, + "Expected error for aligned_alloc(%zu, %zu)", + alignment, size); + + alignment = 0x10LU; +#if LG_SIZEOF_PTR == 3 + size = UINT64_C(0xfffffffffffffff0); +#else + size = 0xfffffff0LU; +#endif + set_errno(0); + p = aligned_alloc(alignment, size); + assert_false(p != NULL || get_errno() != ENOMEM, + "Expected error for aligned_alloc(&p, %zu, %zu)", + alignment, size); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ + size_t alignment, size, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) + ps[i] = NULL; + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (size = 1; + size < 3 * alignment && size < (1U << 31); + size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + ps[i] = aligned_alloc(alignment, size); + if (ps[i] == NULL) { + char buf[BUFERROR_BUF]; + + buferror(get_errno(), buf, sizeof(buf)); + test_fail( + "Error for alignment=%zu, " + "size=%zu (%#zx): %s", + alignment, size, size, buf); + } + total += malloc_usable_size(ps[i]); + if (total >= (MAXALIGN << 1)) + break; + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + free(ps[i]); + ps[i] = NULL; + } + } + } + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_alignment_errors, + test_oom_errors, + test_alignment_and_size)); +} diff --git a/redis.submodule/jemalloc/test/integration/allocated.c b/redis.submodule/jemalloc/test/integration/allocated.c new file mode 100644 index 0000000..3630e80 --- /dev/null +++ b/redis.submodule/jemalloc/test/integration/allocated.c @@ -0,0 +1,125 @@ +#include "test/jemalloc_test.h" + +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; + +void * +thd_start(void *arg) +{ + int err; + void *p; + uint64_t a0, a1, d0, d1; + uint64_t *ap0, *ap1, *dp0, *dp1; + size_t sz, usize; + + sz = sizeof(a0); + if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) { + if (err == ENOENT) + goto label_ENOENT; + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + sz = sizeof(ap0); + if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) { + if (err == ENOENT) + goto label_ENOENT; + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + assert_u64_eq(*ap0, a0, + "\"thread.allocatedp\" should provide a pointer to internal " + "storage"); + + sz = sizeof(d0); + if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) { + if (err == ENOENT) + goto label_ENOENT; + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + sz = sizeof(dp0); + if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) { + if (err == ENOENT) + goto label_ENOENT; + test_fail("%s(): Error in mallctl(): %s", __func__, + strerror(err)); + } + assert_u64_eq(*dp0, d0, + "\"thread.deallocatedp\" should provide a pointer to internal " + "storage"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() error"); + + sz = sizeof(a1); + mallctl("thread.allocated", &a1, &sz, NULL, 0); + sz = sizeof(ap1); + mallctl("thread.allocatedp", &ap1, &sz, NULL, 0); + assert_u64_eq(*ap1, a1, + "Dereferenced \"thread.allocatedp\" value should equal " + "\"thread.allocated\" value"); + assert_ptr_eq(ap0, ap1, + "Pointer returned by \"thread.allocatedp\" should not change"); + + usize = malloc_usable_size(p); + assert_u64_le(a0 + usize, a1, + "Allocated memory counter should increase by at least the amount " + "explicitly allocated"); + + free(p); + + sz = sizeof(d1); + mallctl("thread.deallocated", &d1, &sz, NULL, 0); + sz = sizeof(dp1); + mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0); + assert_u64_eq(*dp1, d1, + "Dereferenced \"thread.deallocatedp\" value should equal " + "\"thread.deallocated\" value"); + assert_ptr_eq(dp0, dp1, + "Pointer returned by \"thread.deallocatedp\" should not change"); + + assert_u64_le(d0 + usize, d1, + "Deallocated memory counter should increase by at least the amount " + "explicitly deallocated"); + + return (NULL); +label_ENOENT: + assert_false(config_stats, + "ENOENT should only be returned if stats are disabled"); + test_skip("\"thread.allocated\" mallctl not available"); + return (NULL); +} + +TEST_BEGIN(test_main_thread) +{ + + thd_start(NULL); +} +TEST_END + +TEST_BEGIN(test_subthread) +{ + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} +TEST_END + +int +main(void) +{ + + /* Run tests multiple times to check for bad interactions. */ + return (test( + test_main_thread, + test_subthread, + test_main_thread, + test_subthread, + test_main_thread)); +} diff --git a/redis.submodule/jemalloc/test/integration/chunk.c b/redis.submodule/jemalloc/test/integration/chunk.c new file mode 100644 index 0000000..af1c9a5 --- /dev/null +++ b/redis.submodule/jemalloc/test/integration/chunk.c @@ -0,0 +1,276 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_FILL +const char *malloc_conf = "junk:false"; +#endif + +static chunk_hooks_t orig_hooks; +static chunk_hooks_t old_hooks; + +static bool do_dalloc = true; +static bool do_decommit; + +static bool did_alloc; +static bool did_dalloc; +static bool did_commit; +static bool did_decommit; +static bool did_purge; +static bool did_split; +static bool did_merge; + +#if 0 +# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__) +#else +# define TRACE_HOOK(fmt, ...) +#endif + +void * +chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit, unsigned arena_ind) +{ + + TRACE_HOOK("%s(new_addr=%p, size=%zu, alignment=%zu, *zero=%s, " + "*commit=%s, arena_ind=%u)\n", __func__, new_addr, size, alignment, + *zero ? "true" : "false", *commit ? "true" : "false", arena_ind); + did_alloc = true; + return (old_hooks.alloc(new_addr, size, alignment, zero, commit, + arena_ind)); +} + +bool +chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind) +{ + + TRACE_HOOK("%s(chunk=%p, size=%zu, committed=%s, arena_ind=%u)\n", + __func__, chunk, size, committed ? "true" : "false", arena_ind); + did_dalloc = true; + if (!do_dalloc) + return (true); + return (old_hooks.dalloc(chunk, size, committed, arena_ind)); +} + +bool +chunk_commit(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + bool err; + + TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, " + "arena_ind=%u)\n", __func__, chunk, size, offset, length, + arena_ind); + err = old_hooks.commit(chunk, size, offset, length, arena_ind); + did_commit = !err; + return (err); +} + +bool +chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + bool err; + + TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, " + "arena_ind=%u)\n", __func__, chunk, size, offset, length, + arena_ind); + if (!do_decommit) + return (true); + err = old_hooks.decommit(chunk, size, offset, length, arena_ind); + did_decommit = !err; + return (err); +} + +bool +chunk_purge(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + + TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu " + "arena_ind=%u)\n", __func__, chunk, size, offset, length, + arena_ind); + did_purge = true; + return (old_hooks.purge(chunk, size, offset, length, arena_ind)); +} + +bool +chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, + bool committed, unsigned arena_ind) +{ + + TRACE_HOOK("%s(chunk=%p, size=%zu, size_a=%zu, size_b=%zu, " + "committed=%s, arena_ind=%u)\n", __func__, chunk, size, size_a, + size_b, committed ? "true" : "false", arena_ind); + did_split = true; + return (old_hooks.split(chunk, size, size_a, size_b, committed, + arena_ind)); +} + +bool +chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, + bool committed, unsigned arena_ind) +{ + + TRACE_HOOK("%s(chunk_a=%p, size_a=%zu, chunk_b=%p size_b=%zu, " + "committed=%s, arena_ind=%u)\n", __func__, chunk_a, size_a, chunk_b, + size_b, committed ? "true" : "false", arena_ind); + did_merge = true; + return (old_hooks.merge(chunk_a, size_a, chunk_b, size_b, + committed, arena_ind)); +} + +TEST_BEGIN(test_chunk) +{ + void *p; + size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz; + chunk_hooks_t new_hooks = { + chunk_alloc, + chunk_dalloc, + chunk_commit, + chunk_decommit, + chunk_purge, + chunk_split, + chunk_merge + }; + bool xallocx_success_a, xallocx_success_b, xallocx_success_c; + + /* Install custom chunk hooks. */ + old_size = sizeof(chunk_hooks_t); + new_size = sizeof(chunk_hooks_t); + assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size, + &new_hooks, new_size), 0, "Unexpected chunk_hooks error"); + orig_hooks = old_hooks; + assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error"); + assert_ptr_ne(old_hooks.dalloc, chunk_dalloc, + "Unexpected dalloc error"); + assert_ptr_ne(old_hooks.commit, chunk_commit, + "Unexpected commit error"); + assert_ptr_ne(old_hooks.decommit, chunk_decommit, + "Unexpected decommit error"); + assert_ptr_ne(old_hooks.purge, chunk_purge, "Unexpected purge error"); + assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error"); + assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error"); + + /* Get large size classes. */ + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lrun.0.size", &large0, &sz, NULL, 0), 0, + "Unexpected arenas.lrun.0.size failure"); + assert_d_eq(mallctl("arenas.lrun.1.size", &large1, &sz, NULL, 0), 0, + "Unexpected arenas.lrun.1.size failure"); + + /* Get huge size classes. */ + assert_d_eq(mallctl("arenas.hchunk.0.size", &huge0, &sz, NULL, 0), 0, + "Unexpected arenas.hchunk.0.size failure"); + assert_d_eq(mallctl("arenas.hchunk.1.size", &huge1, &sz, NULL, 0), 0, + "Unexpected arenas.hchunk.1.size failure"); + assert_d_eq(mallctl("arenas.hchunk.2.size", &huge2, &sz, NULL, 0), 0, + "Unexpected arenas.hchunk.2.size failure"); + + /* Test dalloc/decommit/purge cascade. */ + do_dalloc = false; + do_decommit = false; + p = mallocx(huge0 * 2, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + did_dalloc = false; + did_decommit = false; + did_purge = false; + did_split = false; + xallocx_success_a = (xallocx(p, huge0, 0, 0) == huge0); + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected arena.0.purge error"); + if (xallocx_success_a) { + assert_true(did_dalloc, "Expected dalloc"); + assert_false(did_decommit, "Unexpected decommit"); + assert_true(did_purge, "Expected purge"); + } + assert_true(did_split, "Expected split"); + dallocx(p, 0); + do_dalloc = true; + + /* Test decommit/commit and observe split/merge. */ + do_dalloc = false; + do_decommit = true; + p = mallocx(huge0 * 2, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + did_decommit = false; + did_commit = false; + did_split = false; + did_merge = false; + xallocx_success_b = (xallocx(p, huge0, 0, 0) == huge0); + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected arena.0.purge error"); + if (xallocx_success_b) + assert_true(did_split, "Expected split"); + xallocx_success_c = (xallocx(p, huge0 * 2, 0, 0) == huge0 * 2); + assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); + if (xallocx_success_b && xallocx_success_c) + assert_true(did_merge, "Expected merge"); + dallocx(p, 0); + do_dalloc = true; + do_decommit = false; + + /* Test purge for partial-chunk huge allocations. */ + if (huge0 * 2 > huge2) { + /* + * There are at least four size classes per doubling, so a + * successful xallocx() from size=huge2 to size=huge1 is + * guaranteed to leave trailing purgeable memory. + */ + p = mallocx(huge2, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + did_purge = false; + assert_zu_eq(xallocx(p, huge1, 0, 0), huge1, + "Unexpected xallocx() failure"); + assert_true(did_purge, "Expected purge"); + dallocx(p, 0); + } + + /* Test decommit for large allocations. */ + do_decommit = true; + p = mallocx(large1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected arena.0.purge error"); + did_decommit = false; + assert_zu_eq(xallocx(p, large0, 0, 0), large0, + "Unexpected xallocx() failure"); + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected arena.0.purge error"); + did_commit = false; + assert_zu_eq(xallocx(p, large1, 0, 0), large1, + "Unexpected xallocx() failure"); + assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); + dallocx(p, 0); + do_decommit = false; + + /* Make sure non-huge allocation succeeds. */ + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + dallocx(p, 0); + + /* Restore chunk hooks. */ + assert_d_eq(mallctl("arena.0.chunk_hooks", NULL, NULL, &old_hooks, + new_size), 0, "Unexpected chunk_hooks error"); + assert_d_eq(mallctl("arena.0.chunk_hooks", &old_hooks, &old_size, + NULL, 0), 0, "Unexpected chunk_hooks error"); + assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc, + "Unexpected alloc error"); + assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc, + "Unexpected dalloc error"); + assert_ptr_eq(old_hooks.commit, orig_hooks.commit, + "Unexpected commit error"); + assert_ptr_eq(old_hooks.decommit, orig_hooks.decommit, + "Unexpected decommit error"); + assert_ptr_eq(old_hooks.purge, orig_hooks.purge, + "Unexpected purge error"); + assert_ptr_eq(old_hooks.split, orig_hooks.split, + "Unexpected split error"); + assert_ptr_eq(old_hooks.merge, orig_hooks.merge, + "Unexpected merge error"); +} +TEST_END + +int +main(void) +{ + + return (test(test_chunk)); +} diff --git a/redis.submodule/jemalloc/test/integration/mallocx.c b/redis.submodule/jemalloc/test/integration/mallocx.c new file mode 100644 index 0000000..6253175 --- /dev/null +++ b/redis.submodule/jemalloc/test/integration/mallocx.c @@ -0,0 +1,182 @@ +#include "test/jemalloc_test.h" + +static unsigned +get_nsizes_impl(const char *cmd) +{ + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return (ret); +} + +static unsigned +get_nhuge(void) +{ + + return (get_nsizes_impl("arenas.nhchunks")); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) +{ + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return (ret); +} + +static size_t +get_huge_size(size_t ind) +{ + + return (get_size_impl("arenas.hchunk.0.size", ind)); +} + +TEST_BEGIN(test_oom) +{ + size_t hugemax, size, alignment; + + hugemax = get_huge_size(get_nhuge()-1); + + /* + * It should be impossible to allocate two objects that each consume + * more than half the virtual address space. + */ + { + void *p; + + p = mallocx(hugemax, 0); + if (p != NULL) { + assert_ptr_null(mallocx(hugemax, 0), + "Expected OOM for mallocx(size=%#zx, 0)", hugemax); + dallocx(p, 0); + } + } + +#if LG_SIZEOF_PTR == 3 + size = ZU(0x8000000000000000); + alignment = ZU(0x8000000000000000); +#else + size = ZU(0x80000000); + alignment = ZU(0x80000000); +#endif + assert_ptr_null(mallocx(size, MALLOCX_ALIGN(alignment)), + "Expected OOM for mallocx(size=%#zx, MALLOCX_ALIGN(%#zx)", size, + alignment); +} +TEST_END + +TEST_BEGIN(test_basic) +{ +#define MAXSZ (((size_t)1) << 26) + size_t sz; + + for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { + size_t nsz, rsz; + void *p; + nsz = nallocx(sz, 0); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + rsz = sallocx(p, 0); + assert_zu_ge(rsz, sz, "Real size smaller than expected"); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); + dallocx(p, 0); + + p = mallocx(sz, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + dallocx(p, 0); + + nsz = nallocx(sz, MALLOCX_ZERO); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, MALLOCX_ZERO); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + rsz = sallocx(p, 0); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); + dallocx(p, 0); + } +#undef MAXSZ +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ +#define MAXALIGN (((size_t)1) << 25) +#define NITER 4 + size_t nsz, rsz, sz, alignment, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) + ps[i] = NULL; + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (sz = 1; + sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + assert_zu_ne(nsz, 0, + "nallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + assert_ptr_not_null(ps[i], + "mallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + rsz = sallocx(ps[i], 0); + assert_zu_ge(rsz, sz, + "Real size smaller than expected for " + "alignment=%zu, size=%zu", alignment, sz); + assert_zu_eq(nsz, rsz, + "nallocx()/sallocx() size mismatch for " + "alignment=%zu, size=%zu", alignment, sz); + assert_ptr_null( + (void *)((uintptr_t)ps[i] & (alignment-1)), + "%p inadequately aligned for" + " alignment=%zu, size=%zu", ps[i], + alignment, sz); + total += rsz; + if (total >= (MAXALIGN << 1)) + break; + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + dallocx(ps[i], 0); + ps[i] = NULL; + } + } + } + } +#undef MAXALIGN +#undef NITER +} +TEST_END + +int +main(void) +{ + + return (test( + test_oom, + test_basic, + test_alignment_and_size)); +} diff --git a/redis.submodule/jemalloc/test/integration/overflow.c b/redis.submodule/jemalloc/test/integration/overflow.c new file mode 100644 index 0000000..303d9b2 --- /dev/null +++ b/redis.submodule/jemalloc/test/integration/overflow.c @@ -0,0 +1,49 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_overflow) +{ + unsigned nhchunks; + size_t mib[4]; + size_t sz, miblen, max_size_class; + void *p; + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, + "Unexpected mallctl() error"); + + miblen = sizeof(mib) / sizeof(size_t); + assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib() error"); + mib[2] = nhchunks - 1; + + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() error"); + + assert_ptr_null(malloc(max_size_class + 1), + "Expected OOM due to over-sized allocation request"); + assert_ptr_null(malloc(SIZE_T_MAX), + "Expected OOM due to over-sized allocation request"); + + assert_ptr_null(calloc(1, max_size_class + 1), + "Expected OOM due to over-sized allocation request"); + assert_ptr_null(calloc(1, SIZE_T_MAX), + "Expected OOM due to over-sized allocation request"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() OOM"); + assert_ptr_null(realloc(p, max_size_class + 1), + "Expected OOM due to over-sized allocation request"); + assert_ptr_null(realloc(p, SIZE_T_MAX), + "Expected OOM due to over-sized allocation request"); + free(p); +} +TEST_END + +int +main(void) +{ + + return (test( + test_overflow)); +} diff --git a/redis.submodule/jemalloc/test/integration/posix_memalign.c b/redis.submodule/jemalloc/test/integration/posix_memalign.c new file mode 100644 index 0000000..19741c6 --- /dev/null +++ b/redis.submodule/jemalloc/test/integration/posix_memalign.c @@ -0,0 +1,119 @@ +#include "test/jemalloc_test.h" + +#define CHUNK 0x400000 +/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ +#define MAXALIGN ((size_t)0x2000000LU) +#define NITER 4 + +TEST_BEGIN(test_alignment_errors) +{ + size_t alignment; + void *p; + + for (alignment = 0; alignment < sizeof(void *); alignment++) { + assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL, + "Expected error for invalid alignment %zu", + alignment); + } + + for (alignment = sizeof(size_t); alignment < MAXALIGN; + alignment <<= 1) { + assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0, + "Expected error for invalid alignment %zu", + alignment + 1); + } +} +TEST_END + +TEST_BEGIN(test_oom_errors) +{ + size_t alignment, size; + void *p; + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x8000000000000000); + size = UINT64_C(0x8000000000000000); +#else + alignment = 0x80000000LU; + size = 0x80000000LU; +#endif + assert_d_ne(posix_memalign(&p, alignment, size), 0, + "Expected error for posix_memalign(&p, %zu, %zu)", + alignment, size); + +#if LG_SIZEOF_PTR == 3 + alignment = UINT64_C(0x4000000000000000); + size = UINT64_C(0xc000000000000001); +#else + alignment = 0x40000000LU; + size = 0xc0000001LU; +#endif + assert_d_ne(posix_memalign(&p, alignment, size), 0, + "Expected error for posix_memalign(&p, %zu, %zu)", + alignment, size); + + alignment = 0x10LU; +#if LG_SIZEOF_PTR == 3 + size = UINT64_C(0xfffffffffffffff0); +#else + size = 0xfffffff0LU; +#endif + assert_d_ne(posix_memalign(&p, alignment, size), 0, + "Expected error for posix_memalign(&p, %zu, %zu)", + alignment, size); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ + size_t alignment, size, total; + unsigned i; + int err; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) + ps[i] = NULL; + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (size = 1; + size < 3 * alignment && size < (1U << 31); + size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + err = posix_memalign(&ps[i], + alignment, size); + if (err) { + char buf[BUFERROR_BUF]; + + buferror(get_errno(), buf, sizeof(buf)); + test_fail( + "Error for alignment=%zu, " + "size=%zu (%#zx): %s", + alignment, size, size, buf); + } + total += malloc_usable_size(ps[i]); + if (total >= (MAXALIGN << 1)) + break; + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + free(ps[i]); + ps[i] = NULL; + } + } + } + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_alignment_errors, + test_oom_errors, + test_alignment_and_size)); +} diff --git a/redis.submodule/jemalloc/test/integration/rallocx.c b/redis.submodule/jemalloc/test/integration/rallocx.c new file mode 100644 index 0000000..be1b27b --- /dev/null +++ b/redis.submodule/jemalloc/test/integration/rallocx.c @@ -0,0 +1,185 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_grow_and_shrink) +{ + void *p, *q; + size_t tsz; +#define NCYCLES 3 + unsigned i, j; +#define NSZS 2500 + size_t szs[NSZS]; +#define MAXSZ ZU(12 * 1024 * 1024) + + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + szs[0] = sallocx(p, 0); + + for (i = 0; i < NCYCLES; i++) { + for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { + q = rallocx(p, szs[j-1]+1, 0); + assert_ptr_not_null(q, + "Unexpected rallocx() error for size=%zu-->%zu", + szs[j-1], szs[j-1]+1); + szs[j] = sallocx(q, 0); + assert_zu_ne(szs[j], szs[j-1]+1, + "Expected size to be at least: %zu", szs[j-1]+1); + p = q; + } + + for (j--; j > 0; j--) { + q = rallocx(p, szs[j-1], 0); + assert_ptr_not_null(q, + "Unexpected rallocx() error for size=%zu-->%zu", + szs[j], szs[j-1]); + tsz = sallocx(q, 0); + assert_zu_eq(tsz, szs[j-1], + "Expected size=%zu, got size=%zu", szs[j-1], tsz); + p = q; + } + } + + dallocx(p, 0); +#undef MAXSZ +#undef NSZS +#undef NCYCLES +} +TEST_END + +static bool +validate_fill(const void *p, uint8_t c, size_t offset, size_t len) +{ + bool ret = false; + const uint8_t *buf = (const uint8_t *)p; + size_t i; + + for (i = 0; i < len; i++) { + uint8_t b = buf[offset+i]; + if (b != c) { + test_fail("Allocation at %p (len=%zu) contains %#x " + "rather than %#x at offset %zu", p, len, b, c, + offset+i); + ret = true; + } + } + + return (ret); +} + +TEST_BEGIN(test_zero) +{ + void *p, *q; + size_t psz, qsz, i, j; + size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024}; +#define FILL_BYTE 0xaaU +#define RANGE 2048 + + for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) { + size_t start_size = start_sizes[i]; + p = mallocx(start_size, MALLOCX_ZERO); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + psz = sallocx(p, 0); + + assert_false(validate_fill(p, 0, 0, psz), + "Expected zeroed memory"); + memset(p, FILL_BYTE, psz); + assert_false(validate_fill(p, FILL_BYTE, 0, psz), + "Expected filled memory"); + + for (j = 1; j < RANGE; j++) { + q = rallocx(p, start_size+j, MALLOCX_ZERO); + assert_ptr_not_null(q, "Unexpected rallocx() error"); + qsz = sallocx(q, 0); + if (q != p || qsz != psz) { + assert_false(validate_fill(q, FILL_BYTE, 0, + psz), "Expected filled memory"); + assert_false(validate_fill(q, 0, psz, qsz-psz), + "Expected zeroed memory"); + } + if (psz != qsz) { + memset((void *)((uintptr_t)q+psz), FILL_BYTE, + qsz-psz); + psz = qsz; + } + p = q; + } + assert_false(validate_fill(p, FILL_BYTE, 0, psz), + "Expected filled memory"); + dallocx(p, 0); + } +#undef FILL_BYTE +} +TEST_END + +TEST_BEGIN(test_align) +{ + void *p, *q; + size_t align; +#define MAX_ALIGN (ZU(1) << 25) + + align = ZU(1); + p = mallocx(1, MALLOCX_ALIGN(align)); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { + q = rallocx(p, 1, MALLOCX_ALIGN(align)); + assert_ptr_not_null(q, + "Unexpected rallocx() error for align=%zu", align); + assert_ptr_null( + (void *)((uintptr_t)q & (align-1)), + "%p inadequately aligned for align=%zu", + q, align); + p = q; + } + dallocx(p, 0); +#undef MAX_ALIGN +} +TEST_END + +TEST_BEGIN(test_lg_align_and_zero) +{ + void *p, *q; + size_t lg_align, sz; +#define MAX_LG_ALIGN 25 +#define MAX_VALIDATE (ZU(1) << 22) + + lg_align = ZU(0); + p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) { + q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); + assert_ptr_not_null(q, + "Unexpected rallocx() error for lg_align=%zu", lg_align); + assert_ptr_null( + (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)), + "%p inadequately aligned for lg_align=%zu", + q, lg_align); + sz = sallocx(q, 0); + if ((sz << 1) <= MAX_VALIDATE) { + assert_false(validate_fill(q, 0, 0, sz), + "Expected zeroed memory"); + } else { + assert_false(validate_fill(q, 0, 0, MAX_VALIDATE), + "Expected zeroed memory"); + assert_false(validate_fill( + (void *)((uintptr_t)q+sz-MAX_VALIDATE), + 0, 0, MAX_VALIDATE), "Expected zeroed memory"); + } + p = q; + } + dallocx(p, 0); +#undef MAX_VALIDATE +#undef MAX_LG_ALIGN +} +TEST_END + +int +main(void) +{ + + return (test( + test_grow_and_shrink, + test_zero, + test_align, + test_lg_align_and_zero)); +} diff --git a/redis.submodule/jemalloc/test/integration/sdallocx.c b/redis.submodule/jemalloc/test/integration/sdallocx.c new file mode 100644 index 0000000..b84817d --- /dev/null +++ b/redis.submodule/jemalloc/test/integration/sdallocx.c @@ -0,0 +1,57 @@ +#include "test/jemalloc_test.h" + +#define MAXALIGN (((size_t)1) << 25) +#define NITER 4 + +TEST_BEGIN(test_basic) +{ + void *ptr = mallocx(64, 0); + sdallocx(ptr, 64, 0); +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) +{ + size_t nsz, sz, alignment, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) + ps[i] = NULL; + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (sz = 1; + sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + total += nsz; + if (total >= (MAXALIGN << 1)) + break; + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + sdallocx(ps[i], sz, + MALLOCX_ALIGN(alignment)); + ps[i] = NULL; + } + } + } + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_basic, + test_alignment_and_size)); +} diff --git a/redis.submodule/jemalloc/test/integration/thread_arena.c b/redis.submodule/jemalloc/test/integration/thread_arena.c new file mode 100644 index 0000000..67be535 --- /dev/null +++ b/redis.submodule/jemalloc/test/integration/thread_arena.c @@ -0,0 +1,79 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 10 + +void * +thd_start(void *arg) +{ + unsigned main_arena_ind = *(unsigned *)arg; + void *p; + unsigned arena_ind; + size_t size; + int err; + + p = malloc(1); + assert_ptr_not_null(p, "Error in malloc()"); + free(p); + + size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind, + sizeof(main_arena_ind)))) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); + } + + size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); + } + assert_u_eq(arena_ind, main_arena_ind, + "Arena index should be same as for main thread"); + + return (NULL); +} + +TEST_BEGIN(test_thread_arena) +{ + void *p; + unsigned arena_ind; + size_t size; + int err; + thd_t thds[NTHREADS]; + unsigned i; + + p = malloc(1); + assert_ptr_not_null(p, "Error in malloc()"); + + size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); + } + + for (i = 0; i < NTHREADS; i++) { + thd_create(&thds[i], thd_start, + (void *)&arena_ind); + } + + for (i = 0; i < NTHREADS; i++) { + intptr_t join_ret; + thd_join(thds[i], (void *)&join_ret); + assert_zd_eq(join_ret, 0, "Unexpected thread join error"); + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_thread_arena)); +} diff --git a/redis.submodule/jemalloc/test/integration/thread_tcache_enabled.c b/redis.submodule/jemalloc/test/integration/thread_tcache_enabled.c new file mode 100644 index 0000000..f4e89c6 --- /dev/null +++ b/redis.submodule/jemalloc/test/integration/thread_tcache_enabled.c @@ -0,0 +1,113 @@ +#include "test/jemalloc_test.h" + +static const bool config_tcache = +#ifdef JEMALLOC_TCACHE + true +#else + false +#endif + ; + +void * +thd_start(void *arg) +{ + int err; + size_t sz; + bool e0, e1; + + sz = sizeof(bool); + if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) { + if (err == ENOENT) { + assert_false(config_tcache, + "ENOENT should only be returned if tcache is " + "disabled"); + } + goto label_ENOENT; + } + + if (e0) { + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), + 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + } + + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + free(malloc(1)); + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + free(malloc(1)); + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, + "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + return (NULL); +label_ENOENT: + test_skip("\"thread.tcache.enabled\" mallctl not available"); + return (NULL); +} + +TEST_BEGIN(test_main_thread) +{ + + thd_start(NULL); +} +TEST_END + +TEST_BEGIN(test_subthread) +{ + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} +TEST_END + +int +main(void) +{ + + /* Run tests multiple times to check for bad interactions. */ + return (test( + test_main_thread, + test_subthread, + test_main_thread, + test_subthread, + test_main_thread)); +} diff --git a/redis.submodule/jemalloc/test/integration/xallocx.c b/redis.submodule/jemalloc/test/integration/xallocx.c new file mode 100644 index 0000000..0045196 --- /dev/null +++ b/redis.submodule/jemalloc/test/integration/xallocx.c @@ -0,0 +1,493 @@ +#include "test/jemalloc_test.h" + +/* + * Use a separate arena for xallocx() extension/contraction tests so that + * internal allocation e.g. by heap profiling can't interpose allocations where + * xallocx() would ordinarily be able to extend. + */ +static unsigned +arena_ind(void) +{ + static unsigned ind = 0; + + if (ind == 0) { + size_t sz = sizeof(ind); + assert_d_eq(mallctl("arenas.extend", &ind, &sz, NULL, 0), 0, + "Unexpected mallctl failure creating arena"); + } + + return (ind); +} + +TEST_BEGIN(test_same_size) +{ + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz, 0, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_no_move) +{ + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz, sz-42, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_no_move_fail) +{ + void *p; + size_t sz, tsz; + + p = mallocx(42, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + sz = sallocx(p, 0); + + tsz = xallocx(p, sz + 5, 0, 0); + assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); + + dallocx(p, 0); +} +TEST_END + +static unsigned +get_nsizes_impl(const char *cmd) +{ + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return (ret); +} + +static unsigned +get_nsmall(void) +{ + + return (get_nsizes_impl("arenas.nbins")); +} + +static unsigned +get_nlarge(void) +{ + + return (get_nsizes_impl("arenas.nlruns")); +} + +static unsigned +get_nhuge(void) +{ + + return (get_nsizes_impl("arenas.nhchunks")); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) +{ + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return (ret); +} + +static size_t +get_small_size(size_t ind) +{ + + return (get_size_impl("arenas.bin.0.size", ind)); +} + +static size_t +get_large_size(size_t ind) +{ + + return (get_size_impl("arenas.lrun.0.size", ind)); +} + +static size_t +get_huge_size(size_t ind) +{ + + return (get_size_impl("arenas.hchunk.0.size", ind)); +} + +TEST_BEGIN(test_size) +{ + size_t small0, hugemax; + void *p; + + /* Get size classes. */ + small0 = get_small_size(0); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(small0, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + /* Test smallest supported size. */ + assert_zu_eq(xallocx(p, 1, 0, 0), small0, + "Unexpected xallocx() behavior"); + + /* Test largest supported size. */ + assert_zu_le(xallocx(p, hugemax, 0, 0), hugemax, + "Unexpected xallocx() behavior"); + + /* Test size overflow. */ + assert_zu_le(xallocx(p, hugemax+1, 0, 0), hugemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), hugemax, + "Unexpected xallocx() behavior"); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_size_extra_overflow) +{ + size_t small0, hugemax; + void *p; + + /* Get size classes. */ + small0 = get_small_size(0); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(small0, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + /* Test overflows that can be resolved by clamping extra. */ + assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax, + "Unexpected xallocx() behavior"); + + /* Test overflow such that hugemax-size underflows. */ + assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax, + "Unexpected xallocx() behavior"); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_small) +{ + size_t small0, small1, hugemax; + void *p; + + /* Get size classes. */ + small0 = get_small_size(0); + small1 = get_small_size(1); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(small0, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + assert_zu_eq(xallocx(p, small1, 0, 0), small0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, small1, 0, 0), small0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0, + "Unexpected xallocx() behavior"); + + /* Test size+extra overflow. */ + assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, + "Unexpected xallocx() behavior"); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_large) +{ + int flags = MALLOCX_ARENA(arena_ind()); + size_t smallmax, large0, large1, large2, huge0, hugemax; + void *p; + + /* Get size classes. */ + smallmax = get_small_size(get_nsmall()-1); + large0 = get_large_size(0); + large1 = get_large_size(1); + large2 = get_large_size(2); + huge0 = get_huge_size(0); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(large2, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + assert_zu_eq(xallocx(p, large2, 0, flags), large2, + "Unexpected xallocx() behavior"); + /* Test size decrease with zero extra. */ + assert_zu_eq(xallocx(p, large0, 0, flags), large0, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, smallmax, 0, flags), large0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large2, 0, flags), large2, + "Unexpected xallocx() behavior"); + /* Test size decrease with non-zero extra. */ + assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, large1, large2 - large1, flags), large2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, large0, large1 - large0, flags), large1, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, flags), large0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large0, 0, flags), large0, + "Unexpected xallocx() behavior"); + /* Test size increase with zero extra. */ + assert_zu_eq(xallocx(p, large2, 0, flags), large2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, huge0, 0, flags), large2, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large0, 0, flags), large0, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_lt(xallocx(p, large0, huge0 - large0, flags), huge0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large0, 0, flags), large0, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large2, 0, flags), large2, + "Unexpected xallocx() behavior"); + /* Test size+extra overflow. */ + assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, flags), huge0, + "Unexpected xallocx() behavior"); + + dallocx(p, flags); +} +TEST_END + +TEST_BEGIN(test_extra_huge) +{ + int flags = MALLOCX_ARENA(arena_ind()); + size_t largemax, huge0, huge1, huge2, hugemax; + void *p; + + /* Get size classes. */ + largemax = get_large_size(get_nlarge()-1); + huge0 = get_huge_size(0); + huge1 = get_huge_size(1); + huge2 = get_huge_size(2); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(huge2, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + assert_zu_eq(xallocx(p, huge2, 0, flags), huge2, + "Unexpected xallocx() behavior"); + /* Test size decrease with zero extra. */ + assert_zu_ge(xallocx(p, huge0, 0, flags), huge0, + "Unexpected xallocx() behavior"); + assert_zu_ge(xallocx(p, largemax, 0, flags), huge0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, huge2, 0, flags), huge2, + "Unexpected xallocx() behavior"); + /* Test size decrease with non-zero extra. */ + assert_zu_eq(xallocx(p, huge0, huge2 - huge0, flags), huge2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, huge0, huge1 - huge0, flags), huge1, + "Unexpected xallocx() behavior"); + assert_zu_ge(xallocx(p, largemax, huge0 - largemax, flags), huge0, + "Unexpected xallocx() behavior"); + + assert_zu_ge(xallocx(p, huge0, 0, flags), huge0, + "Unexpected xallocx() behavior"); + /* Test size increase with zero extra. */ + assert_zu_le(xallocx(p, huge2, 0, flags), huge2, + "Unexpected xallocx() behavior"); + assert_zu_le(xallocx(p, hugemax+1, 0, flags), huge2, + "Unexpected xallocx() behavior"); + + assert_zu_ge(xallocx(p, huge0, 0, flags), huge0, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_le(xallocx(p, huge0, SIZE_T_MAX - huge0, flags), hugemax, + "Unexpected xallocx() behavior"); + + assert_zu_ge(xallocx(p, huge0, 0, flags), huge0, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_le(xallocx(p, huge0, huge2 - huge0, flags), huge2, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, huge2, 0, flags), huge2, + "Unexpected xallocx() behavior"); + /* Test size+extra overflow. */ + assert_zu_le(xallocx(p, huge2, hugemax - huge2 + 1, flags), hugemax, + "Unexpected xallocx() behavior"); + + dallocx(p, flags); +} +TEST_END + +static void +print_filled_extents(const void *p, uint8_t c, size_t len) +{ + const uint8_t *pc = (const uint8_t *)p; + size_t i, range0; + uint8_t c0; + + malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len); + range0 = 0; + c0 = pc[0]; + for (i = 0; i < len; i++) { + if (pc[i] != c0) { + malloc_printf(" %#x[%zu..%zu)", c0, range0, i); + range0 = i; + c0 = pc[i]; + } + } + malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i); +} + +static bool +validate_fill(const void *p, uint8_t c, size_t offset, size_t len) +{ + const uint8_t *pc = (const uint8_t *)p; + bool err; + size_t i; + + for (i = offset, err = false; i < offset+len; i++) { + if (pc[i] != c) + err = true; + } + + if (err) + print_filled_extents(p, c, offset + len); + + return (err); +} + +static void +test_zero(size_t szmin, size_t szmax) +{ + int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO; + size_t sz, nsz; + void *p; +#define FILL_BYTE 0x7aU + + sz = szmax; + p = mallocx(sz, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu", + sz); + + /* + * Fill with non-zero so that non-debug builds are more likely to detect + * errors. + */ + memset(p, FILL_BYTE, sz); + assert_false(validate_fill(p, FILL_BYTE, 0, sz), + "Memory not filled: sz=%zu", sz); + + /* Shrink in place so that we can expect growing in place to succeed. */ + sz = szmin; + assert_zu_eq(xallocx(p, sz, 0, flags), sz, + "Unexpected xallocx() error"); + assert_false(validate_fill(p, FILL_BYTE, 0, sz), + "Memory not filled: sz=%zu", sz); + + for (sz = szmin; sz < szmax; sz = nsz) { + nsz = nallocx(sz+1, flags); + assert_zu_eq(xallocx(p, sz+1, 0, flags), nsz, + "Unexpected xallocx() failure"); + assert_false(validate_fill(p, FILL_BYTE, 0, sz), + "Memory not filled: sz=%zu", sz); + assert_false(validate_fill(p, 0x00, sz, nsz-sz), + "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz); + memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz); + assert_false(validate_fill(p, FILL_BYTE, 0, nsz), + "Memory not filled: nsz=%zu", nsz); + } + + dallocx(p, flags); +} + +TEST_BEGIN(test_zero_large) +{ + size_t large0, largemax; + + /* Get size classes. */ + large0 = get_large_size(0); + largemax = get_large_size(get_nlarge()-1); + + test_zero(large0, largemax); +} +TEST_END + +TEST_BEGIN(test_zero_huge) +{ + size_t huge0, huge1; + + /* Get size classes. */ + huge0 = get_huge_size(0); + huge1 = get_huge_size(1); + + test_zero(huge1, huge0 * 2); +} +TEST_END + +int +main(void) +{ + + return (test( + test_same_size, + test_extra_no_move, + test_no_move_fail, + test_size, + test_size_extra_overflow, + test_extra_small, + test_extra_large, + test_extra_huge, + test_zero_large, + test_zero_huge)); +} diff --git a/redis.submodule/jemalloc/test/src/SFMT.c b/redis.submodule/jemalloc/test/src/SFMT.c new file mode 100644 index 0000000..80cabe0 --- /dev/null +++ b/redis.submodule/jemalloc/test/src/SFMT.c @@ -0,0 +1,719 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * @file SFMT.c + * @brief SIMD oriented Fast Mersenne Twister(SFMT) + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * The new BSD License is applied to this software, see LICENSE.txt + */ +#define SFMT_C_ +#include "test/jemalloc_test.h" +#include "test/SFMT-params.h" + +#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64) +#define BIG_ENDIAN64 1 +#endif +#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64) +#define BIG_ENDIAN64 1 +#endif +#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64) +#define BIG_ENDIAN64 1 +#endif +#if defined(ONLY64) && !defined(BIG_ENDIAN64) + #if defined(__GNUC__) + #error "-DONLY64 must be specified with -DBIG_ENDIAN64" + #endif +#undef ONLY64 +#endif +/*------------------------------------------------------ + 128-bit SIMD data type for Altivec, SSE2 or standard C + ------------------------------------------------------*/ +#if defined(HAVE_ALTIVEC) +/** 128-bit data structure */ +union W128_T { + vector unsigned int s; + uint32_t u[4]; +}; +/** 128-bit data type */ +typedef union W128_T w128_t; + +#elif defined(HAVE_SSE2) +/** 128-bit data structure */ +union W128_T { + __m128i si; + uint32_t u[4]; +}; +/** 128-bit data type */ +typedef union W128_T w128_t; + +#else + +/** 128-bit data structure */ +struct W128_T { + uint32_t u[4]; +}; +/** 128-bit data type */ +typedef struct W128_T w128_t; + +#endif + +struct sfmt_s { + /** the 128-bit internal state array */ + w128_t sfmt[N]; + /** index counter to the 32-bit internal state array */ + int idx; + /** a flag: it is 0 if and only if the internal state is not yet + * initialized. */ + int initialized; +}; + +/*-------------------------------------- + FILE GLOBAL VARIABLES + internal state, index counter and flag + --------------------------------------*/ + +/** a parity check vector which certificate the period of 2^{MEXP} */ +static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4}; + +/*---------------- + STATIC FUNCTIONS + ----------------*/ +JEMALLOC_INLINE_C int idxof(int i); +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift); +JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift); +#endif +JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx); +JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); +JEMALLOC_INLINE_C uint32_t func1(uint32_t x); +JEMALLOC_INLINE_C uint32_t func2(uint32_t x); +static void period_certification(sfmt_t *ctx); +#if defined(BIG_ENDIAN64) && !defined(ONLY64) +JEMALLOC_INLINE_C void swap(w128_t *array, int size); +#endif + +#if defined(HAVE_ALTIVEC) + #include "test/SFMT-alti.h" +#elif defined(HAVE_SSE2) + #include "test/SFMT-sse2.h" +#endif + +/** + * This function simulate a 64-bit index of LITTLE ENDIAN + * in BIG ENDIAN machine. + */ +#ifdef ONLY64 +JEMALLOC_INLINE_C int idxof(int i) { + return i ^ 1; +} +#else +JEMALLOC_INLINE_C int idxof(int i) { + return i; +} +#endif +/** + * This function simulates SIMD 128-bit right shift by the standard C. + * The 128-bit integer given in in is shifted by (shift * 8) bits. + * This function simulates the LITTLE ENDIAN SIMD. + * @param out the output of this function + * @param in the 128-bit data to be shifted + * @param shift the shift value + */ +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +#ifdef ONLY64 +JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); + tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); + + oh = th >> (shift * 8); + ol = tl >> (shift * 8); + ol |= th << (64 - shift * 8); + out->u[0] = (uint32_t)(ol >> 32); + out->u[1] = (uint32_t)ol; + out->u[2] = (uint32_t)(oh >> 32); + out->u[3] = (uint32_t)oh; +} +#else +JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); + tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); + + oh = th >> (shift * 8); + ol = tl >> (shift * 8); + ol |= th << (64 - shift * 8); + out->u[1] = (uint32_t)(ol >> 32); + out->u[0] = (uint32_t)ol; + out->u[3] = (uint32_t)(oh >> 32); + out->u[2] = (uint32_t)oh; +} +#endif +/** + * This function simulates SIMD 128-bit left shift by the standard C. + * The 128-bit integer given in in is shifted by (shift * 8) bits. + * This function simulates the LITTLE ENDIAN SIMD. + * @param out the output of this function + * @param in the 128-bit data to be shifted + * @param shift the shift value + */ +#ifdef ONLY64 +JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); + tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); + + oh = th << (shift * 8); + ol = tl << (shift * 8); + oh |= tl >> (64 - shift * 8); + out->u[0] = (uint32_t)(ol >> 32); + out->u[1] = (uint32_t)ol; + out->u[2] = (uint32_t)(oh >> 32); + out->u[3] = (uint32_t)oh; +} +#else +JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { + uint64_t th, tl, oh, ol; + + th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); + tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); + + oh = th << (shift * 8); + ol = tl << (shift * 8); + oh |= tl >> (64 - shift * 8); + out->u[1] = (uint32_t)(ol >> 32); + out->u[0] = (uint32_t)ol; + out->u[3] = (uint32_t)(oh >> 32); + out->u[2] = (uint32_t)oh; +} +#endif +#endif + +/** + * This function represents the recursion formula. + * @param r output + * @param a a 128-bit part of the internal state array + * @param b a 128-bit part of the internal state array + * @param c a 128-bit part of the internal state array + * @param d a 128-bit part of the internal state array + */ +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +#ifdef ONLY64 +JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, + w128_t *d) { + w128_t x; + w128_t y; + + lshift128(&x, a, SL2); + rshift128(&y, c, SR2); + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] + ^ (d->u[0] << SL1); + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] + ^ (d->u[1] << SL1); + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] + ^ (d->u[2] << SL1); + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] + ^ (d->u[3] << SL1); +} +#else +JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, + w128_t *d) { + w128_t x; + w128_t y; + + lshift128(&x, a, SL2); + rshift128(&y, c, SR2); + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] + ^ (d->u[0] << SL1); + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] + ^ (d->u[1] << SL1); + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] + ^ (d->u[2] << SL1); + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] + ^ (d->u[3] << SL1); +} +#endif +#endif + +#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) +/** + * This function fills the internal state array with pseudorandom + * integers. + */ +JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) { + int i; + w128_t *r1, *r2; + + r1 = &ctx->sfmt[N - 2]; + r2 = &ctx->sfmt[N - 1]; + for (i = 0; i < N - POS1; i++) { + do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, + r2); + r1 = r2; + r2 = &ctx->sfmt[i]; + } + for (; i < N; i++) { + do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1, + r2); + r1 = r2; + r2 = &ctx->sfmt[i]; + } +} + +/** + * This function fills the user-specified array with pseudorandom + * integers. + * + * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param size number of 128-bit pseudorandom numbers to be generated. + */ +JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { + int i, j; + w128_t *r1, *r2; + + r1 = &ctx->sfmt[N - 2]; + r2 = &ctx->sfmt[N - 1]; + for (i = 0; i < N - POS1; i++) { + do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (; i < N; i++) { + do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (; i < size - N; i++) { + do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + } + for (j = 0; j < 2 * N - size; j++) { + ctx->sfmt[j] = array[j + size - N]; + } + for (; i < size; i++, j++) { + do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); + r1 = r2; + r2 = &array[i]; + ctx->sfmt[j] = array[i]; + } +} +#endif + +#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC) +JEMALLOC_INLINE_C void swap(w128_t *array, int size) { + int i; + uint32_t x, y; + + for (i = 0; i < size; i++) { + x = array[i].u[0]; + y = array[i].u[2]; + array[i].u[0] = array[i].u[1]; + array[i].u[2] = array[i].u[3]; + array[i].u[1] = x; + array[i].u[3] = y; + } +} +#endif +/** + * This function represents a function used in the initialization + * by init_by_array + * @param x 32-bit integer + * @return 32-bit integer + */ +static uint32_t func1(uint32_t x) { + return (x ^ (x >> 27)) * (uint32_t)1664525UL; +} + +/** + * This function represents a function used in the initialization + * by init_by_array + * @param x 32-bit integer + * @return 32-bit integer + */ +static uint32_t func2(uint32_t x) { + return (x ^ (x >> 27)) * (uint32_t)1566083941UL; +} + +/** + * This function certificate the period of 2^{MEXP} + */ +static void period_certification(sfmt_t *ctx) { + int inner = 0; + int i, j; + uint32_t work; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; + + for (i = 0; i < 4; i++) + inner ^= psfmt32[idxof(i)] & parity[i]; + for (i = 16; i > 0; i >>= 1) + inner ^= inner >> i; + inner &= 1; + /* check OK */ + if (inner == 1) { + return; + } + /* check NG, and modification */ + for (i = 0; i < 4; i++) { + work = 1; + for (j = 0; j < 32; j++) { + if ((work & parity[i]) != 0) { + psfmt32[idxof(i)] ^= work; + return; + } + work = work << 1; + } + } +} + +/*---------------- + PUBLIC FUNCTIONS + ----------------*/ +/** + * This function returns the identification string. + * The string shows the word size, the Mersenne exponent, + * and all parameters of this generator. + */ +const char *get_idstring(void) { + return IDSTR; +} + +/** + * This function returns the minimum size of array used for \b + * fill_array32() function. + * @return minimum size of array used for fill_array32() function. + */ +int get_min_array_size32(void) { + return N32; +} + +/** + * This function returns the minimum size of array used for \b + * fill_array64() function. + * @return minimum size of array used for fill_array64() function. + */ +int get_min_array_size64(void) { + return N64; +} + +#ifndef ONLY64 +/** + * This function generates and returns 32-bit pseudorandom number. + * init_gen_rand or init_by_array must be called before this function. + * @return 32-bit pseudorandom number + */ +uint32_t gen_rand32(sfmt_t *ctx) { + uint32_t r; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; + + assert(ctx->initialized); + if (ctx->idx >= N32) { + gen_rand_all(ctx); + ctx->idx = 0; + } + r = psfmt32[ctx->idx++]; + return r; +} + +/* Generate a random integer in [0..limit). */ +uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { + uint32_t ret, above; + + above = 0xffffffffU - (0xffffffffU % limit); + while (1) { + ret = gen_rand32(ctx); + if (ret < above) { + ret %= limit; + break; + } + } + return ret; +} +#endif +/** + * This function generates and returns 64-bit pseudorandom number. + * init_gen_rand or init_by_array must be called before this function. + * The function gen_rand64 should not be called after gen_rand32, + * unless an initialization is again executed. + * @return 64-bit pseudorandom number + */ +uint64_t gen_rand64(sfmt_t *ctx) { +#if defined(BIG_ENDIAN64) && !defined(ONLY64) + uint32_t r1, r2; + uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; +#else + uint64_t r; + uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0]; +#endif + + assert(ctx->initialized); + assert(ctx->idx % 2 == 0); + + if (ctx->idx >= N32) { + gen_rand_all(ctx); + ctx->idx = 0; + } +#if defined(BIG_ENDIAN64) && !defined(ONLY64) + r1 = psfmt32[ctx->idx]; + r2 = psfmt32[ctx->idx + 1]; + ctx->idx += 2; + return ((uint64_t)r2 << 32) | r1; +#else + r = psfmt64[ctx->idx / 2]; + ctx->idx += 2; + return r; +#endif +} + +/* Generate a random integer in [0..limit). */ +uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) { + uint64_t ret, above; + + above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit); + while (1) { + ret = gen_rand64(ctx); + if (ret < above) { + ret %= limit; + break; + } + } + return ret; +} + +#ifndef ONLY64 +/** + * This function generates pseudorandom 32-bit integers in the + * specified array[] by one call. The number of pseudorandom integers + * is specified by the argument size, which must be at least 624 and a + * multiple of four. The generation by this function is much faster + * than the following gen_rand function. + * + * For initialization, init_gen_rand or init_by_array must be called + * before the first call of this function. This function can not be + * used after calling gen_rand function, without initialization. + * + * @param array an array where pseudorandom 32-bit integers are filled + * by this function. The pointer to the array must be \b "aligned" + * (namely, must be a multiple of 16) in the SIMD version, since it + * refers to the address of a 128-bit integer. In the standard C + * version, the pointer is arbitrary. + * + * @param size the number of 32-bit pseudorandom integers to be + * generated. size must be a multiple of 4, and greater than or equal + * to (MEXP / 128 + 1) * 4. + * + * @note \b memalign or \b posix_memalign is available to get aligned + * memory. Mac OSX doesn't have these functions, but \b malloc of OSX + * returns the pointer to the aligned memory block. + */ +void fill_array32(sfmt_t *ctx, uint32_t *array, int size) { + assert(ctx->initialized); + assert(ctx->idx == N32); + assert(size % 4 == 0); + assert(size >= N32); + + gen_rand_array(ctx, (w128_t *)array, size / 4); + ctx->idx = N32; +} +#endif + +/** + * This function generates pseudorandom 64-bit integers in the + * specified array[] by one call. The number of pseudorandom integers + * is specified by the argument size, which must be at least 312 and a + * multiple of two. The generation by this function is much faster + * than the following gen_rand function. + * + * For initialization, init_gen_rand or init_by_array must be called + * before the first call of this function. This function can not be + * used after calling gen_rand function, without initialization. + * + * @param array an array where pseudorandom 64-bit integers are filled + * by this function. The pointer to the array must be "aligned" + * (namely, must be a multiple of 16) in the SIMD version, since it + * refers to the address of a 128-bit integer. In the standard C + * version, the pointer is arbitrary. + * + * @param size the number of 64-bit pseudorandom integers to be + * generated. size must be a multiple of 2, and greater than or equal + * to (MEXP / 128 + 1) * 2 + * + * @note \b memalign or \b posix_memalign is available to get aligned + * memory. Mac OSX doesn't have these functions, but \b malloc of OSX + * returns the pointer to the aligned memory block. + */ +void fill_array64(sfmt_t *ctx, uint64_t *array, int size) { + assert(ctx->initialized); + assert(ctx->idx == N32); + assert(size % 2 == 0); + assert(size >= N64); + + gen_rand_array(ctx, (w128_t *)array, size / 2); + ctx->idx = N32; + +#if defined(BIG_ENDIAN64) && !defined(ONLY64) + swap((w128_t *)array, size /2); +#endif +} + +/** + * This function initializes the internal state array with a 32-bit + * integer seed. + * + * @param seed a 32-bit integer used as the seed. + */ +sfmt_t *init_gen_rand(uint32_t seed) { + void *p; + sfmt_t *ctx; + int i; + uint32_t *psfmt32; + + if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { + return NULL; + } + ctx = (sfmt_t *)p; + psfmt32 = &ctx->sfmt[0].u[0]; + + psfmt32[idxof(0)] = seed; + for (i = 1; i < N32; i++) { + psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] + ^ (psfmt32[idxof(i - 1)] >> 30)) + + i; + } + ctx->idx = N32; + period_certification(ctx); + ctx->initialized = 1; + + return ctx; +} + +/** + * This function initializes the internal state array, + * with an array of 32-bit integers used as the seeds + * @param init_key the array of 32-bit integers, used as a seed. + * @param key_length the length of init_key. + */ +sfmt_t *init_by_array(uint32_t *init_key, int key_length) { + void *p; + sfmt_t *ctx; + int i, j, count; + uint32_t r; + int lag; + int mid; + int size = N * 4; + uint32_t *psfmt32; + + if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { + return NULL; + } + ctx = (sfmt_t *)p; + psfmt32 = &ctx->sfmt[0].u[0]; + + if (size >= 623) { + lag = 11; + } else if (size >= 68) { + lag = 7; + } else if (size >= 39) { + lag = 5; + } else { + lag = 3; + } + mid = (size - lag) / 2; + + memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt)); + if (key_length + 1 > N32) { + count = key_length + 1; + } else { + count = N32; + } + r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] + ^ psfmt32[idxof(N32 - 1)]); + psfmt32[idxof(mid)] += r; + r += key_length; + psfmt32[idxof(mid + lag)] += r; + psfmt32[idxof(0)] = r; + + count--; + for (i = 1, j = 0; (j < count) && (j < key_length); j++) { + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + ^ psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] += r; + r += init_key[j] + i; + psfmt32[idxof((i + mid + lag) % N32)] += r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + for (; j < count; j++) { + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + ^ psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] += r; + r += i; + psfmt32[idxof((i + mid + lag) % N32)] += r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + for (j = 0; j < N32; j++) { + r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + + psfmt32[idxof((i + N32 - 1) % N32)]); + psfmt32[idxof((i + mid) % N32)] ^= r; + r -= i; + psfmt32[idxof((i + mid + lag) % N32)] ^= r; + psfmt32[idxof(i)] = r; + i = (i + 1) % N32; + } + + ctx->idx = N32; + period_certification(ctx); + ctx->initialized = 1; + + return ctx; +} + +void fini_gen_rand(sfmt_t *ctx) { + assert(ctx != NULL); + + ctx->initialized = 0; + free(ctx); +} diff --git a/redis.submodule/jemalloc/test/src/btalloc.c b/redis.submodule/jemalloc/test/src/btalloc.c new file mode 100644 index 0000000..9a253d9 --- /dev/null +++ b/redis.submodule/jemalloc/test/src/btalloc.c @@ -0,0 +1,8 @@ +#include "test/jemalloc_test.h" + +void * +btalloc(size_t size, unsigned bits) +{ + + return (btalloc_0(size, bits)); +} diff --git a/redis.submodule/jemalloc/test/src/btalloc_0.c b/redis.submodule/jemalloc/test/src/btalloc_0.c new file mode 100644 index 0000000..77d8904 --- /dev/null +++ b/redis.submodule/jemalloc/test/src/btalloc_0.c @@ -0,0 +1,3 @@ +#include "test/jemalloc_test.h" + +btalloc_n_gen(0) diff --git a/redis.submodule/jemalloc/test/src/btalloc_1.c b/redis.submodule/jemalloc/test/src/btalloc_1.c new file mode 100644 index 0000000..4c126c3 --- /dev/null +++ b/redis.submodule/jemalloc/test/src/btalloc_1.c @@ -0,0 +1,3 @@ +#include "test/jemalloc_test.h" + +btalloc_n_gen(1) diff --git a/redis.submodule/jemalloc/test/src/math.c b/redis.submodule/jemalloc/test/src/math.c new file mode 100644 index 0000000..887a363 --- /dev/null +++ b/redis.submodule/jemalloc/test/src/math.c @@ -0,0 +1,2 @@ +#define MATH_C_ +#include "test/jemalloc_test.h" diff --git a/redis.submodule/jemalloc/test/src/mq.c b/redis.submodule/jemalloc/test/src/mq.c new file mode 100644 index 0000000..40b31c1 --- /dev/null +++ b/redis.submodule/jemalloc/test/src/mq.c @@ -0,0 +1,29 @@ +#include "test/jemalloc_test.h" + +/* + * Sleep for approximately ns nanoseconds. No lower *nor* upper bound on sleep + * time is guaranteed. + */ +void +mq_nanosleep(unsigned ns) +{ + + assert(ns <= 1000*1000*1000); + +#ifdef _WIN32 + Sleep(ns / 1000); +#else + { + struct timespec timeout; + + if (ns < 1000*1000*1000) { + timeout.tv_sec = 0; + timeout.tv_nsec = ns; + } else { + timeout.tv_sec = 1; + timeout.tv_nsec = 0; + } + nanosleep(&timeout, NULL); + } +#endif +} diff --git a/redis.submodule/jemalloc/test/src/mtx.c b/redis.submodule/jemalloc/test/src/mtx.c new file mode 100644 index 0000000..73bd02f --- /dev/null +++ b/redis.submodule/jemalloc/test/src/mtx.c @@ -0,0 +1,66 @@ +#include "test/jemalloc_test.h" + +#ifndef _CRT_SPINCOUNT +#define _CRT_SPINCOUNT 4000 +#endif + +bool +mtx_init(mtx_t *mtx) +{ + +#ifdef _WIN32 + if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT)) + return (true); +#elif (defined(JEMALLOC_OSSPIN)) + mtx->lock = 0; +#else + pthread_mutexattr_t attr; + + if (pthread_mutexattr_init(&attr) != 0) + return (true); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); + if (pthread_mutex_init(&mtx->lock, &attr) != 0) { + pthread_mutexattr_destroy(&attr); + return (true); + } + pthread_mutexattr_destroy(&attr); +#endif + return (false); +} + +void +mtx_fini(mtx_t *mtx) +{ + +#ifdef _WIN32 +#elif (defined(JEMALLOC_OSSPIN)) +#else + pthread_mutex_destroy(&mtx->lock); +#endif +} + +void +mtx_lock(mtx_t *mtx) +{ + +#ifdef _WIN32 + EnterCriticalSection(&mtx->lock); +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLockLock(&mtx->lock); +#else + pthread_mutex_lock(&mtx->lock); +#endif +} + +void +mtx_unlock(mtx_t *mtx) +{ + +#ifdef _WIN32 + LeaveCriticalSection(&mtx->lock); +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLockUnlock(&mtx->lock); +#else + pthread_mutex_unlock(&mtx->lock); +#endif +} diff --git a/redis.submodule/jemalloc/test/src/test.c b/redis.submodule/jemalloc/test/src/test.c new file mode 100644 index 0000000..8173614 --- /dev/null +++ b/redis.submodule/jemalloc/test/src/test.c @@ -0,0 +1,107 @@ +#include "test/jemalloc_test.h" + +static unsigned test_count = 0; +static test_status_t test_counts[test_status_count] = {0, 0, 0}; +static test_status_t test_status = test_status_pass; +static const char * test_name = ""; + +JEMALLOC_FORMAT_PRINTF(1, 2) +void +test_skip(const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); + malloc_printf("\n"); + test_status = test_status_skip; +} + +JEMALLOC_FORMAT_PRINTF(1, 2) +void +test_fail(const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); + malloc_printf("\n"); + test_status = test_status_fail; +} + +static const char * +test_status_string(test_status_t test_status) +{ + + switch (test_status) { + case test_status_pass: return "pass"; + case test_status_skip: return "skip"; + case test_status_fail: return "fail"; + default: not_reached(); + } +} + +void +p_test_init(const char *name) +{ + + test_count++; + test_status = test_status_pass; + test_name = name; +} + +void +p_test_fini(void) +{ + + test_counts[test_status]++; + malloc_printf("%s: %s\n", test_name, test_status_string(test_status)); +} + +test_status_t +p_test(test_t *t, ...) +{ + test_status_t ret; + va_list ap; + + /* + * Make sure initialization occurs prior to running tests. Tests are + * special because they may use internal facilities prior to triggering + * initialization as a side effect of calling into the public API. This + * is a final safety that works even if jemalloc_constructor() doesn't + * run, as for MSVC builds. + */ + if (nallocx(1, 0) == 0) { + malloc_printf("Initialization error"); + return (test_status_fail); + } + + ret = test_status_pass; + va_start(ap, t); + for (; t != NULL; t = va_arg(ap, test_t *)) { + t(); + if (test_status > ret) + ret = test_status; + } + va_end(ap); + + malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", + test_status_string(test_status_pass), + test_counts[test_status_pass], test_count, + test_status_string(test_status_skip), + test_counts[test_status_skip], test_count, + test_status_string(test_status_fail), + test_counts[test_status_fail], test_count); + + return (ret); +} + +void +p_test_fail(const char *prefix, const char *message) +{ + + malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message); + test_status = test_status_fail; +} diff --git a/redis.submodule/jemalloc/test/src/thd.c b/redis.submodule/jemalloc/test/src/thd.c new file mode 100644 index 0000000..c9d0065 --- /dev/null +++ b/redis.submodule/jemalloc/test/src/thd.c @@ -0,0 +1,39 @@ +#include "test/jemalloc_test.h" + +#ifdef _WIN32 +void +thd_create(thd_t *thd, void *(*proc)(void *), void *arg) +{ + LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; + *thd = CreateThread(NULL, 0, routine, arg, 0, NULL); + if (*thd == NULL) + test_fail("Error in CreateThread()\n"); +} + +void +thd_join(thd_t thd, void **ret) +{ + + if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) { + DWORD exit_code; + GetExitCodeThread(thd, (LPDWORD) &exit_code); + *ret = (void *)(uintptr_t)exit_code; + } +} + +#else +void +thd_create(thd_t *thd, void *(*proc)(void *), void *arg) +{ + + if (pthread_create(thd, NULL, proc, arg) != 0) + test_fail("Error in pthread_create()\n"); +} + +void +thd_join(thd_t thd, void **ret) +{ + + pthread_join(thd, ret); +} +#endif diff --git a/redis.submodule/jemalloc/test/src/timer.c b/redis.submodule/jemalloc/test/src/timer.c new file mode 100644 index 0000000..0c93aba --- /dev/null +++ b/redis.submodule/jemalloc/test/src/timer.c @@ -0,0 +1,85 @@ +#include "test/jemalloc_test.h" + +void +timer_start(timedelta_t *timer) +{ + +#ifdef _WIN32 + GetSystemTimeAsFileTime(&timer->ft0); +#elif JEMALLOC_CLOCK_GETTIME + if (sysconf(_SC_MONOTONIC_CLOCK) <= 0) + timer->clock_id = CLOCK_REALTIME; + else + timer->clock_id = CLOCK_MONOTONIC; + clock_gettime(timer->clock_id, &timer->ts0); +#else + gettimeofday(&timer->tv0, NULL); +#endif +} + +void +timer_stop(timedelta_t *timer) +{ + +#ifdef _WIN32 + GetSystemTimeAsFileTime(&timer->ft0); +#elif JEMALLOC_CLOCK_GETTIME + clock_gettime(timer->clock_id, &timer->ts1); +#else + gettimeofday(&timer->tv1, NULL); +#endif +} + +uint64_t +timer_usec(const timedelta_t *timer) +{ + +#ifdef _WIN32 + uint64_t t0, t1; + t0 = (((uint64_t)timer->ft0.dwHighDateTime) << 32) | + timer->ft0.dwLowDateTime; + t1 = (((uint64_t)timer->ft1.dwHighDateTime) << 32) | + timer->ft1.dwLowDateTime; + return ((t1 - t0) / 10); +#elif JEMALLOC_CLOCK_GETTIME + return (((timer->ts1.tv_sec - timer->ts0.tv_sec) * 1000000) + + (timer->ts1.tv_nsec - timer->ts0.tv_nsec) / 1000); +#else + return (((timer->tv1.tv_sec - timer->tv0.tv_sec) * 1000000) + + timer->tv1.tv_usec - timer->tv0.tv_usec); +#endif +} + +void +timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) +{ + uint64_t t0 = timer_usec(a); + uint64_t t1 = timer_usec(b); + uint64_t mult; + unsigned i = 0; + unsigned j; + int n; + + /* Whole. */ + n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); + i += n; + if (i >= buflen) + return; + mult = 1; + for (j = 0; j < n; j++) + mult *= 10; + + /* Decimal. */ + n = malloc_snprintf(&buf[i], buflen-i, "."); + i += n; + + /* Fraction. */ + while (i < buflen-1) { + uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10 + >= 5)) ? 1 : 0; + n = malloc_snprintf(&buf[i], buflen-i, + "%"FMTu64, (t0 * mult / t1) % 10 + round); + i += n; + mult *= 10; + } +} diff --git a/redis.submodule/jemalloc/test/stress/microbench.c b/redis.submodule/jemalloc/test/stress/microbench.c new file mode 100644 index 0000000..ee39fea --- /dev/null +++ b/redis.submodule/jemalloc/test/stress/microbench.c @@ -0,0 +1,181 @@ +#include "test/jemalloc_test.h" + +JEMALLOC_INLINE_C void +time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, void (*func)(void)) +{ + uint64_t i; + + for (i = 0; i < nwarmup; i++) + func(); + timer_start(timer); + for (i = 0; i < niter; i++) + func(); + timer_stop(timer); +} + +void +compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, + void (*func_a), const char *name_b, void (*func_b)) +{ + timedelta_t timer_a, timer_b; + char ratio_buf[6]; + void *p; + + p = mallocx(1, 0); + if (p == NULL) { + test_fail("Unexpected mallocx() failure"); + return; + } + + time_func(&timer_a, nwarmup, niter, func_a); + time_func(&timer_b, nwarmup, niter, func_b); + + timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf)); + malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us, " + "%s=%"FMTu64"us, ratio=1:%s\n", + niter, name_a, timer_usec(&timer_a), name_b, timer_usec(&timer_b), + ratio_buf); + + dallocx(p, 0); +} + +static void +malloc_free(void) +{ + /* The compiler can optimize away free(malloc(1))! */ + void *p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + free(p); +} + +static void +mallocx_free(void) +{ + void *p = mallocx(1, 0); + if (p == NULL) { + test_fail("Unexpected mallocx() failure"); + return; + } + free(p); +} + +TEST_BEGIN(test_malloc_vs_mallocx) +{ + + compare_funcs(10*1000*1000, 100*1000*1000, "malloc", + malloc_free, "mallocx", mallocx_free); +} +TEST_END + +static void +malloc_dallocx(void) +{ + void *p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + dallocx(p, 0); +} + +static void +malloc_sdallocx(void) +{ + void *p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + sdallocx(p, 1, 0); +} + +TEST_BEGIN(test_free_vs_dallocx) +{ + + compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free, + "dallocx", malloc_dallocx); +} +TEST_END + +TEST_BEGIN(test_dallocx_vs_sdallocx) +{ + + compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx, + "sdallocx", malloc_sdallocx); +} +TEST_END + +static void +malloc_mus_free(void) +{ + void *p; + + p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + malloc_usable_size(p); + free(p); +} + +static void +malloc_sallocx_free(void) +{ + void *p; + + p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + if (sallocx(p, 0) < 1) + test_fail("Unexpected sallocx() failure"); + free(p); +} + +TEST_BEGIN(test_mus_vs_sallocx) +{ + + compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size", + malloc_mus_free, "sallocx", malloc_sallocx_free); +} +TEST_END + +static void +malloc_nallocx_free(void) +{ + void *p; + + p = malloc(1); + if (p == NULL) { + test_fail("Unexpected malloc() failure"); + return; + } + if (nallocx(1, 0) < 1) + test_fail("Unexpected nallocx() failure"); + free(p); +} + +TEST_BEGIN(test_sallocx_vs_nallocx) +{ + + compare_funcs(10*1000*1000, 100*1000*1000, "sallocx", + malloc_sallocx_free, "nallocx", malloc_nallocx_free); +} +TEST_END + +int +main(void) +{ + + return (test( + test_malloc_vs_mallocx, + test_free_vs_dallocx, + test_dallocx_vs_sdallocx, + test_mus_vs_sallocx, + test_sallocx_vs_nallocx)); +} diff --git a/redis.submodule/jemalloc/test/test.sh.in b/redis.submodule/jemalloc/test/test.sh.in new file mode 100644 index 0000000..a39f99f --- /dev/null +++ b/redis.submodule/jemalloc/test/test.sh.in @@ -0,0 +1,53 @@ +#!/bin/sh + +case @abi@ in + macho) + export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib" + ;; + pecoff) + export PATH="${PATH}:@objroot@lib" + ;; + *) + ;; +esac + +# Corresponds to test_status_t. +pass_code=0 +skip_code=1 +fail_code=2 + +pass_count=0 +skip_count=0 +fail_count=0 +for t in $@; do + if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then + echo + fi + echo "=== ${t} ===" + ${t}@exe@ @abs_srcroot@ @abs_objroot@ + result_code=$? + case ${result_code} in + ${pass_code}) + pass_count=$((pass_count+1)) + ;; + ${skip_code}) + skip_count=$((skip_count+1)) + ;; + ${fail_code}) + fail_count=$((fail_count+1)) + ;; + *) + echo "Test harness error" 1>&2 + exit 1 + esac +done + +total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}` +echo +echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}" + +if [ ${fail_count} -eq 0 ] ; then + exit 0 +else + exit 1 +fi diff --git a/redis.submodule/jemalloc/test/unit/SFMT.c b/redis.submodule/jemalloc/test/unit/SFMT.c new file mode 100644 index 0000000..ba4be87 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/SFMT.c @@ -0,0 +1,1605 @@ +/* + * This file derives from SFMT 1.3.3 + * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was + * released under the terms of the following license: + * + * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "test/jemalloc_test.h" + +#define BLOCK_SIZE 10000 +#define BLOCK_SIZE64 (BLOCK_SIZE / 2) +#define COUNT_1 1000 +#define COUNT_2 700 + +static const uint32_t init_gen_rand_32_expected[] = { + 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U, + 3796268453U, 423124208U, 2143818589U, 3827219408U, 2987036003U, + 2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U, + 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U, + 1764869045U, 824597505U, 862581900U, 2469764249U, 812862514U, + 359318673U, 116957936U, 3367389672U, 2327178354U, 1898245200U, + 3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U, + 1417665896U, 964324147U, 2282797708U, 2456269299U, 313400376U, + 2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U, + 463679346U, 3721104591U, 3475064196U, 856141236U, 1499559719U, + 3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U, + 1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U, + 462466863U, 1037679449U, 1228140306U, 922298197U, 1205109853U, + 1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U, + 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U, + 2997311961U, 1189931652U, 3981543765U, 2826166703U, 87159245U, + 1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U, + 2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U, + 3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U, + 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U, + 3212265810U, 984692259U, 346590253U, 1804179199U, 3298543443U, + 750108141U, 2880257022U, 243310542U, 1869036465U, 1588062513U, + 2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U, + 2539522841U, 127965585U, 3992448871U, 913388237U, 559130076U, + 1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U, + 1013913783U, 1155864921U, 2715773730U, 915061862U, 1948766573U, + 2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U, + 3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U, + 3237090058U, 3880063844U, 862416318U, 4039923869U, 2303788317U, + 3073590536U, 701653667U, 2131530884U, 3169309950U, 2028486980U, + 747196777U, 3620218225U, 432016035U, 1449580595U, 2772266392U, + 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U, + 1104864179U, 342430307U, 1350510923U, 3024656237U, 1028417492U, + 2870772950U, 290847558U, 3675663500U, 508431529U, 4264340390U, + 2263569913U, 1669302976U, 519511383U, 2706411211U, 3764615828U, + 3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U, + 2348767304U, 2664054906U, 3763609282U, 593943581U, 3757090046U, + 2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U, + 2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U, + 4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U, + 3038334120U, 547099465U, 771105860U, 3199983734U, 4282046461U, + 2298388363U, 934810218U, 2837827901U, 3952500708U, 2095130248U, + 3083335297U, 26885281U, 3932155283U, 1531751116U, 1425227133U, + 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U, + 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U, + 1729880440U, 805571298U, 448971099U, 2726757106U, 2749436461U, + 2485987104U, 175337042U, 3235477922U, 3882114302U, 2020970972U, + 943926109U, 2762587195U, 1904195558U, 3452650564U, 108432281U, + 3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U, + 4258854744U, 980047703U, 4057175418U, 3890008292U, 145653646U, + 3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U, + 2868313360U, 2886032750U, 1110873820U, 279553524U, 3007258565U, + 1104807822U, 3186961098U, 315764646U, 2163680838U, 3574508994U, + 3099755655U, 191957684U, 3642656737U, 3317946149U, 3522087636U, + 444526410U, 779157624U, 1088229627U, 1092460223U, 1856013765U, + 3659877367U, 368270451U, 503570716U, 3000984671U, 2742789647U, + 928097709U, 2914109539U, 308843566U, 2816161253U, 3667192079U, + 2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U, + 3787782576U, 2894104823U, 1296880455U, 1253636503U, 989959407U, + 2291560361U, 2776790436U, 1913178042U, 1584677829U, 689637520U, + 1898406878U, 688391508U, 3385234998U, 845493284U, 1943591856U, + 2720472050U, 222695101U, 1653320868U, 2904632120U, 4084936008U, + 1080720688U, 3938032556U, 387896427U, 2650839632U, 99042991U, + 1720913794U, 1047186003U, 1877048040U, 2090457659U, 517087501U, + 4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U, + 1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U, + 3576797776U, 2074552772U, 832002644U, 3097122623U, 2464859298U, + 2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U, + 2306631119U, 639727358U, 3369698270U, 226902796U, 2099920751U, + 1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U, + 841660320U, 3974501451U, 3360949056U, 1676829340U, 728899254U, + 2047809627U, 2390948962U, 670165943U, 3412951831U, 4189320049U, + 1911595255U, 2055363086U, 507170575U, 418219594U, 4141495280U, + 2692088692U, 4203630654U, 3540093932U, 791986533U, 2237921051U, + 2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U, + 591653646U, 228432437U, 1611046598U, 3007736357U, 1040040725U, + 2726180733U, 2789804360U, 4263568405U, 829098158U, 3847722805U, + 1123578029U, 1804276347U, 997971319U, 4203797076U, 4185199713U, + 2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U, + 1967077982U, 1585223204U, 1097475516U, 1903944948U, 740382444U, + 1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U, + 3519754455U, 2866243923U, 341865437U, 645498576U, 2690735853U, + 1046963033U, 2493178460U, 1187604696U, 1619577821U, 488503634U, + 3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U, + 3794467088U, 1796415981U, 3657173746U, 409136296U, 1387122342U, + 1297726519U, 219544855U, 4270285558U, 437578827U, 1444698679U, + 2258519491U, 963109892U, 3982244073U, 3351535275U, 385328496U, + 1804784013U, 698059346U, 3920535147U, 708331212U, 784338163U, + 785678147U, 1238376158U, 1557298846U, 2037809321U, 271576218U, + 4145155269U, 1913481602U, 2763691931U, 588981080U, 1201098051U, + 3717640232U, 1509206239U, 662536967U, 3180523616U, 1133105435U, + 2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U, + 3034720222U, 1090798544U, 2942170004U, 4036187520U, 686972531U, + 2610990302U, 2641437026U, 1837562420U, 722096247U, 1315333033U, + 2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U, + 814384596U, 385649582U, 1916643285U, 1924625106U, 2512905582U, + 2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U, + 2969208552U, 884750901U, 102992576U, 227844301U, 3681442994U, + 3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U, + 1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U, + 1629323443U, 3233815U, 2003823032U, 3083834263U, 2379264872U, + 3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U, + 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U, + 4018728324U, 983712955U, 440071928U, 3710838677U, 2001027698U, + 3994702151U, 22493119U, 3584400918U, 3446253670U, 4254789085U, + 1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U, + 3403623451U, 67092802U, 2609352193U, 3914150340U, 1814842761U, + 3610830847U, 591531412U, 3880232807U, 1673505890U, 2585326991U, + 1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U, + 154025329U, 121678860U, 1164915738U, 973873761U, 269116100U, + 52087970U, 744015362U, 498556057U, 94298882U, 1563271621U, + 2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U, + 1097742433U, 3924840517U, 264557272U, 2292287003U, 3203307984U, + 4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U, + 3412254904U, 583538222U, 2390557166U, 4140459427U, 2810357445U, + 226777499U, 2496151295U, 2207301712U, 3283683112U, 611630281U, + 1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U, + 1313998161U, 2365383016U, 3146941060U, 1801206260U, 796124080U, + 2076248581U, 1747472464U, 3254365145U, 595543130U, 3573909503U, + 3758250204U, 2020768540U, 2439254210U, 93368951U, 3155792250U, + 2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U, + 1443493395U, 2581621665U, 3086506297U, 2443465861U, 558107211U, + 1519367835U, 249149686U, 908102264U, 2588765675U, 1232743965U, + 1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U, + 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U, + 1633861986U, 1636979509U, 1438500431U, 58463278U, 2823485629U, + 2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U, + 1444292075U, 437703973U, 1060414512U, 189705863U, 910018135U, + 4077357964U, 884213423U, 2644986052U, 3973488374U, 1187906116U, + 2331207875U, 780463700U, 3713351662U, 3854611290U, 412805574U, + 2978462572U, 2176222820U, 829424696U, 2790788332U, 2750819108U, + 1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U, + 14130042U, 192292500U, 947227076U, 90719497U, 3854230320U, + 784028434U, 2142399787U, 1563449646U, 2844400217U, 819143172U, + 2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U, + 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U, + 1412424497U, 2981395985U, 1418359660U, 2925902456U, 52752784U, + 3713667988U, 3924669405U, 648975707U, 1145520213U, 4018650664U, + 3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U, + 1114540067U, 3728768081U, 2396958768U, 590672271U, 904818725U, + 4263660715U, 700754408U, 1042601829U, 4094111823U, 4274838909U, + 2512692617U, 2774300207U, 2057306915U, 3470942453U, 99333088U, + 1142661026U, 2889931380U, 14316674U, 2201179167U, 415289459U, + 448265759U, 3515142743U, 3254903683U, 246633281U, 1184307224U, + 2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U, + 1544150531U, 399010405U, 1513946097U, 499682937U, 461167460U, + 3045570638U, 1633669705U, 851492362U, 4052801922U, 2055266765U, + 635556996U, 368266356U, 2385737383U, 3218202352U, 2603772408U, + 349178792U, 226482567U, 3102426060U, 3575998268U, 2103001871U, + 3243137071U, 225500688U, 1634718593U, 4283311431U, 4292122923U, + 3842802787U, 811735523U, 105712518U, 663434053U, 1855889273U, + 2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U, + 3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U, + 353001539U, 459496439U, 3799690868U, 1293777660U, 2761079737U, + 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U, + 4134660419U, 3903444024U, 3576494993U, 203682175U, 3321164857U, + 2747963611U, 79749085U, 2992890370U, 1240278549U, 1772175713U, + 2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U, + 2489884874U, 756853961U, 2298874501U, 4013448667U, 4143996022U, + 2948306858U, 4132920035U, 1283299272U, 995592228U, 3450508595U, + 1027845759U, 1766942720U, 3861411826U, 1446861231U, 95974993U, + 3502263554U, 1487532194U, 601502472U, 4129619129U, 250131773U, + 2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U, + 2276901713U, 365637751U, 2260718426U, 1394775634U, 1791172338U, + 2690503163U, 2952737846U, 1568710462U, 732623190U, 2980358000U, + 1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U, + 3204031934U, 735775531U, 107468620U, 3734611984U, 631009402U, + 3083622457U, 4109580626U, 159373458U, 1301970201U, 4132389302U, + 1293255004U, 847182752U, 4170022737U, 96712900U, 2641406755U, + 1381727755U, 405608287U, 4287919625U, 1703554290U, 3589580244U, + 2911403488U, 2166565U, 2647306451U, 2330535117U, 1200815358U, + 1165916754U, 245060911U, 4040679071U, 3684908771U, 2452834126U, + 2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U, + 4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U, + 2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U, + 4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U, + 1046178638U, 144496770U, 230725846U, 3349637149U, 17065717U, + 2809932048U, 2054581785U, 3608424964U, 3259628808U, 134897388U, + 3743067463U, 257685904U, 3795656590U, 1562468719U, 3589103904U, + 3120404710U, 254684547U, 2653661580U, 3663904795U, 2631942758U, + 1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U, + 1935868586U, 4110970440U, 296706371U, 2128666368U, 1319875791U, + 1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U, + 1244491489U, 3533770203U, 567496053U, 2757924305U, 2781639343U, + 2818420107U, 560404889U, 2619609724U, 4176035430U, 2511289753U, + 2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U, + 330725126U, 367400677U, 888239854U, 545570454U, 4259590525U, + 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U, + 3631537963U, 3342671457U, 1301549147U, 2083739356U, 146593792U, + 3217959080U, 652755743U, 2032187193U, 3898758414U, 1021358093U, + 4037409230U, 2176407931U, 3427391950U, 2883553603U, 985613827U, + 3105265092U, 3423168427U, 3387507672U, 467170288U, 2141266163U, + 3723870208U, 916410914U, 1293987799U, 2652584950U, 769160137U, + 3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U, + 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U, + 2352279820U, 401870217U, 2625822463U, 1529125296U, 2981801895U, + 1191285226U, 4027725437U, 3432700217U, 4098835661U, 971182783U, + 2443861173U, 3881457123U, 3874386651U, 457276199U, 2638294160U, + 4002809368U, 421169044U, 1112642589U, 3076213779U, 3387033971U, + 2499610950U, 3057240914U, 1662679783U, 461224431U, 1168395933U +}; +static const uint32_t init_by_array_32_expected[] = { + 2920711183U, 3885745737U, 3501893680U, 856470934U, 1421864068U, + 277361036U, 1518638004U, 2328404353U, 3355513634U, 64329189U, + 1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U, + 2913275699U, 882658412U, 384641219U, 422202002U, 1873384891U, + 2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U, + 4195470535U, 779207191U, 1577721373U, 1390469554U, 2928648150U, + 121399709U, 3170839019U, 4044347501U, 953953814U, 3821710850U, + 3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U, + 4044408017U, 433600965U, 1637785608U, 1798509764U, 860770589U, + 3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U, + 3357389386U, 2154596123U, 496568176U, 2650035164U, 2472361850U, + 3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U, + 4078331588U, 3706103141U, 170391138U, 3806085154U, 1680970100U, + 1961637521U, 3316029766U, 890610272U, 1453751581U, 1430283664U, + 3051057411U, 3597003186U, 542563954U, 3796490244U, 1690016688U, + 3448752238U, 440702173U, 347290497U, 1121336647U, 2540588620U, + 280881896U, 2495136428U, 213707396U, 15104824U, 2946180358U, + 659000016U, 566379385U, 2614030979U, 2855760170U, 334526548U, + 2315569495U, 2729518615U, 564745877U, 1263517638U, 3157185798U, + 1604852056U, 1011639885U, 2950579535U, 2524219188U, 312951012U, + 1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U, + 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U, + 3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U, + 2631577923U, 3027156164U, 842334259U, 3353446843U, 1226432104U, + 1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U, + 2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U, + 1625156629U, 3669701987U, 615211810U, 3294791649U, 4131143784U, + 2590843588U, 3207422808U, 3275066464U, 561592872U, 3957205738U, + 3396578098U, 48410678U, 3505556445U, 1005764855U, 3920606528U, + 2936980473U, 2378918600U, 2404449845U, 1649515163U, 701203563U, + 3705256349U, 83714199U, 3586854132U, 922978446U, 2863406304U, + 3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U, + 3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U, + 3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U, + 980754888U, 1302782359U, 1765252468U, 459245755U, 3728923860U, + 1512894209U, 2046491914U, 207860527U, 514188684U, 2288713615U, + 1597354672U, 3349636117U, 2357291114U, 3995796221U, 945364213U, + 1893326518U, 3770814016U, 1691552714U, 2397527410U, 967486361U, + 776416472U, 4197661421U, 951150819U, 1852770983U, 4044624181U, + 1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U, + 1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U, + 2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U, + 20815612U, 3727008608U, 3871996229U, 824061249U, 1932503978U, + 3404541726U, 758428924U, 2609331364U, 1223966026U, 1299179808U, + 648499352U, 2180134401U, 880821170U, 3781130950U, 113491270U, + 1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U, + 4165586898U, 1629813212U, 2887821158U, 415045333U, 628926856U, + 2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U, + 1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U, + 2052479531U, 1695809164U, 3176587306U, 2632770465U, 81634404U, + 1603220563U, 644238487U, 302857763U, 897352968U, 2613146653U, + 1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U, + 3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U, + 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U, + 3111154986U, 2929478371U, 668346391U, 1152241381U, 2632029711U, + 3004150659U, 2135025926U, 948690501U, 2799119116U, 4228829406U, + 1981197489U, 4209064138U, 684318751U, 3459397845U, 201790843U, + 4022541136U, 3043635877U, 492509624U, 3263466772U, 1509148086U, + 921459029U, 3198857146U, 705479721U, 3835966910U, 3603356465U, + 576159741U, 1742849431U, 594214882U, 2055294343U, 3634861861U, + 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U, + 2464815318U, 3960178104U, 1784261920U, 18311476U, 3627135050U, + 644609697U, 424968996U, 919890700U, 2986824110U, 816423214U, + 4003562844U, 1392714305U, 1757384428U, 2569030598U, 995949559U, + 3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U, + 2770783427U, 2775406005U, 2777781742U, 1931292655U, 472147933U, + 3865853827U, 2726470545U, 2668412860U, 2887008249U, 408979190U, + 3578063323U, 3242082049U, 1778193530U, 27981909U, 2362826515U, + 389875677U, 1043878156U, 581653903U, 3830568952U, 389535942U, + 3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U, + 3951172488U, 1098005003U, 906275699U, 3446228002U, 2220677963U, + 2059306445U, 132199571U, 476838790U, 1868039399U, 3097344807U, + 857300945U, 396345050U, 2835919916U, 1782168828U, 1419519470U, + 4288137521U, 819087232U, 596301494U, 872823172U, 1526888217U, + 805161465U, 1116186205U, 2829002754U, 2352620120U, 620121516U, + 354159268U, 3601949785U, 209568138U, 1352371732U, 2145977349U, + 4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U, + 3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U, + 1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U, + 3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U, + 3574970923U, 4057131421U, 589224178U, 1000098193U, 171190718U, + 2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U, + 3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U, + 2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U, + 1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U, + 2419700818U, 971242709U, 1361975763U, 1096842482U, 3271045537U, + 81165449U, 612438025U, 3912966678U, 1356929810U, 733545735U, + 537003843U, 1282953084U, 884458241U, 588930090U, 3930269801U, + 2961472450U, 1219535534U, 3632251943U, 268183903U, 1441240533U, + 3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U, + 2105543911U, 1764085217U, 3246183186U, 482438805U, 888317895U, + 2628314765U, 2466219854U, 717546004U, 2322237039U, 416725234U, + 1544049923U, 1797944973U, 3398652364U, 3111909456U, 485742908U, + 2277491072U, 1056355088U, 3181001278U, 129695079U, 2693624550U, + 1764438564U, 3797785470U, 195503713U, 3266519725U, 2053389444U, + 1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U, + 4094406648U, 2171410785U, 1781151386U, 1378577117U, 654643266U, + 3424024173U, 3385813322U, 679385799U, 479380913U, 681715441U, + 3096225905U, 276813409U, 3854398070U, 2721105350U, 831263315U, + 3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U, + 1412672743U, 820330404U, 3491501010U, 942735832U, 710652807U, + 3972652090U, 679881088U, 40577009U, 3705286397U, 2815423480U, + 3566262429U, 663396513U, 3777887429U, 4016670678U, 404539370U, + 1142712925U, 1140173408U, 2913248352U, 2872321286U, 263751841U, + 3175196073U, 3162557581U, 2878996619U, 75498548U, 3836833140U, + 3284664959U, 1157523805U, 112847376U, 207855609U, 1337979698U, + 1222578451U, 157107174U, 901174378U, 3883717063U, 1618632639U, + 1767889440U, 4264698824U, 1582999313U, 884471997U, 2508825098U, + 3756370771U, 2457213553U, 3565776881U, 3709583214U, 915609601U, + 460833524U, 1091049576U, 85522880U, 2553251U, 132102809U, + 2429882442U, 2562084610U, 1386507633U, 4112471229U, 21965213U, + 1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U, + 1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U, + 3463052265U, 802340101U, 1912886800U, 4031997367U, 3550640406U, + 1596096923U, 610150600U, 431464457U, 2541325046U, 486478003U, + 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U, + 1228993498U, 510075548U, 3424962587U, 2458689681U, 818934833U, + 4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U, + 3317854285U, 631986188U, 3008214764U, 613826412U, 3567358221U, + 3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U, + 565267881U, 768644821U, 198310105U, 2396688616U, 1837659011U, + 203429334U, 854539004U, 4235811518U, 3338304926U, 3730418692U, + 3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U, + 3894665932U, 145835667U, 249563655U, 1932210840U, 2431696407U, + 3312636759U, 214962629U, 2092026914U, 3020145527U, 4073039873U, + 2739105705U, 1308336752U, 855104522U, 2391715321U, 67448785U, + 547989482U, 854411802U, 3608633740U, 431731530U, 537375589U, + 3888005760U, 696099141U, 397343236U, 1864511780U, 44029739U, + 1729526891U, 1993398655U, 2010173426U, 2591546756U, 275223291U, + 1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U, + 681707194U, 3260479338U, 933579397U, 2983029282U, 2505504587U, + 2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U, + 2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U, + 3917185129U, 2155660174U, 3280524475U, 1503827867U, 674380765U, + 1918468193U, 3843983676U, 634358221U, 2538335643U, 1873351298U, + 3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U, + 2516284287U, 24437745U, 1118381474U, 2816314867U, 2448576035U, + 4281989654U, 217287825U, 165872888U, 2628995722U, 3533525116U, + 2721669106U, 872340568U, 3429930655U, 3309047304U, 3916704967U, + 3270160355U, 1348884255U, 1634797670U, 881214967U, 4259633554U, + 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U, + 3853082619U, 4073196549U, 1189620777U, 637238656U, 930241537U, + 4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U, + 1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U, + 3307479609U, 3849322257U, 3507445699U, 4247696636U, 758393720U, + 967665141U, 1095244571U, 1319812152U, 407678762U, 2640605208U, + 2170766134U, 3663594275U, 4039329364U, 2512175520U, 725523154U, + 2249807004U, 3312617979U, 2414634172U, 1278482215U, 349206484U, + 1573063308U, 1196429124U, 3873264116U, 2400067801U, 268795167U, + 226175489U, 2961367263U, 1968719665U, 42656370U, 1010790699U, + 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U, + 3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U, + 1087362407U, 3625200291U, 361937537U, 580441897U, 1520043666U, + 2270875402U, 1009161260U, 2502355842U, 4278769785U, 473902412U, + 1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U, + 1806991954U, 2194674403U, 3455972205U, 807207678U, 3655655687U, + 674112918U, 195425752U, 3917890095U, 1874364234U, 1837892715U, + 3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U, + 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U, + 2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U, + 1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U, + 3494770452U, 3332626671U, 55327267U, 478030603U, 411080625U, + 3419529010U, 1604767823U, 3513468014U, 570668510U, 913790824U, + 2283967995U, 695159462U, 3825542932U, 4150698144U, 1829758699U, + 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U, + 2477423819U, 3932081579U, 900879979U, 2145588390U, 2670007504U, + 580819444U, 1864996828U, 2526325979U, 1019124258U, 815508628U, + 2765933989U, 1277301341U, 3006021786U, 855540956U, 288025710U, + 1919594237U, 2331223864U, 177452412U, 2475870369U, 2689291749U, + 865194284U, 253432152U, 2628531804U, 2861208555U, 2361597573U, + 1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U, + 2596878672U, 2041442161U, 31164696U, 2662962485U, 3665637339U, + 1678115244U, 2699839832U, 3651968520U, 3521595541U, 458433303U, + 2423096824U, 21831741U, 380011703U, 2498168716U, 861806087U, + 1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U, + 4171949898U, 3886039621U, 1661344005U, 3424285243U, 992588372U, + 2500984144U, 2993248497U, 3590193895U, 1535327365U, 515645636U, + 131633450U, 3729760261U, 1613045101U, 3254194278U, 15889678U, + 1493590689U, 244148718U, 2991472662U, 1401629333U, 777349878U, + 2501401703U, 4285518317U, 3794656178U, 955526526U, 3442142820U, + 3970298374U, 736025417U, 2737370764U, 1271509744U, 440570731U, + 136141826U, 1596189518U, 923399175U, 257541519U, 3505774281U, + 2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U, + 1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U, + 1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U, + 2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U, + 353294540U, 1609232588U, 243926648U, 2332803291U, 507996832U, + 2392838793U, 4075145196U, 2060984340U, 4287475136U, 88232602U, + 2491531140U, 4159725633U, 2272075455U, 759298618U, 201384554U, + 838356250U, 1416268324U, 674476934U, 90795364U, 141672229U, + 3660399588U, 4196417251U, 3249270244U, 3774530247U, 59587265U, + 3683164208U, 19392575U, 1463123697U, 1882205379U, 293780489U, + 2553160622U, 2933904694U, 675638239U, 2851336944U, 1435238743U, + 2448730183U, 804436302U, 2119845972U, 322560608U, 4097732704U, + 2987802540U, 641492617U, 2575442710U, 4217822703U, 3271835300U, + 2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U, + 3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U, + 584489368U, 3431165766U, 897384869U, 2062537737U, 2847889234U, + 3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U, + 2750138839U, 3518055702U, 733072558U, 4169325400U, 788493625U +}; +static const uint64_t init_gen_rand_64_expected[] = { + KQU(16924766246869039260), KQU( 8201438687333352714), + KQU( 2265290287015001750), KQU(18397264611805473832), + KQU( 3375255223302384358), KQU( 6345559975416828796), + KQU(18229739242790328073), KQU( 7596792742098800905), + KQU( 255338647169685981), KQU( 2052747240048610300), + KQU(18328151576097299343), KQU(12472905421133796567), + KQU(11315245349717600863), KQU(16594110197775871209), + KQU(15708751964632456450), KQU(10452031272054632535), + KQU(11097646720811454386), KQU( 4556090668445745441), + KQU(17116187693090663106), KQU(14931526836144510645), + KQU( 9190752218020552591), KQU( 9625800285771901401), + KQU(13995141077659972832), KQU( 5194209094927829625), + KQU( 4156788379151063303), KQU( 8523452593770139494), + KQU(14082382103049296727), KQU( 2462601863986088483), + KQU( 3030583461592840678), KQU( 5221622077872827681), + KQU( 3084210671228981236), KQU(13956758381389953823), + KQU(13503889856213423831), KQU(15696904024189836170), + KQU( 4612584152877036206), KQU( 6231135538447867881), + KQU(10172457294158869468), KQU( 6452258628466708150), + KQU(14044432824917330221), KQU( 370168364480044279), + KQU(10102144686427193359), KQU( 667870489994776076), + KQU( 2732271956925885858), KQU(18027788905977284151), + KQU(15009842788582923859), KQU( 7136357960180199542), + KQU(15901736243475578127), KQU(16951293785352615701), + KQU(10551492125243691632), KQU(17668869969146434804), + KQU(13646002971174390445), KQU( 9804471050759613248), + KQU( 5511670439655935493), KQU(18103342091070400926), + KQU(17224512747665137533), KQU(15534627482992618168), + KQU( 1423813266186582647), KQU(15821176807932930024), + KQU( 30323369733607156), KQU(11599382494723479403), + KQU( 653856076586810062), KQU( 3176437395144899659), + KQU(14028076268147963917), KQU(16156398271809666195), + KQU( 3166955484848201676), KQU( 5746805620136919390), + KQU(17297845208891256593), KQU(11691653183226428483), + KQU(17900026146506981577), KQU(15387382115755971042), + KQU(16923567681040845943), KQU( 8039057517199388606), + KQU(11748409241468629263), KQU( 794358245539076095), + KQU(13438501964693401242), KQU(14036803236515618962), + KQU( 5252311215205424721), KQU(17806589612915509081), + KQU( 6802767092397596006), KQU(14212120431184557140), + KQU( 1072951366761385712), KQU(13098491780722836296), + KQU( 9466676828710797353), KQU(12673056849042830081), + KQU(12763726623645357580), KQU(16468961652999309493), + KQU(15305979875636438926), KQU(17444713151223449734), + KQU( 5692214267627883674), KQU(13049589139196151505), + KQU( 880115207831670745), KQU( 1776529075789695498), + KQU(16695225897801466485), KQU(10666901778795346845), + KQU( 6164389346722833869), KQU( 2863817793264300475), + KQU( 9464049921886304754), KQU( 3993566636740015468), + KQU( 9983749692528514136), KQU(16375286075057755211), + KQU(16042643417005440820), KQU(11445419662923489877), + KQU( 7999038846885158836), KQU( 6721913661721511535), + KQU( 5363052654139357320), KQU( 1817788761173584205), + KQU(13290974386445856444), KQU( 4650350818937984680), + KQU( 8219183528102484836), KQU( 1569862923500819899), + KQU( 4189359732136641860), KQU(14202822961683148583), + KQU( 4457498315309429058), KQU(13089067387019074834), + KQU(11075517153328927293), KQU(10277016248336668389), + KQU( 7070509725324401122), KQU(17808892017780289380), + KQU(13143367339909287349), KQU( 1377743745360085151), + KQU( 5749341807421286485), KQU(14832814616770931325), + KQU( 7688820635324359492), KQU(10960474011539770045), + KQU( 81970066653179790), KQU(12619476072607878022), + KQU( 4419566616271201744), KQU(15147917311750568503), + KQU( 5549739182852706345), KQU( 7308198397975204770), + KQU(13580425496671289278), KQU(17070764785210130301), + KQU( 8202832846285604405), KQU( 6873046287640887249), + KQU( 6927424434308206114), KQU( 6139014645937224874), + KQU(10290373645978487639), KQU(15904261291701523804), + KQU( 9628743442057826883), KQU(18383429096255546714), + KQU( 4977413265753686967), KQU( 7714317492425012869), + KQU( 9025232586309926193), KQU(14627338359776709107), + KQU(14759849896467790763), KQU(10931129435864423252), + KQU( 4588456988775014359), KQU(10699388531797056724), + KQU( 468652268869238792), KQU( 5755943035328078086), + KQU( 2102437379988580216), KQU( 9986312786506674028), + KQU( 2654207180040945604), KQU( 8726634790559960062), + KQU( 100497234871808137), KQU( 2800137176951425819), + KQU( 6076627612918553487), KQU( 5780186919186152796), + KQU( 8179183595769929098), KQU( 6009426283716221169), + KQU( 2796662551397449358), KQU( 1756961367041986764), + KQU( 6972897917355606205), KQU(14524774345368968243), + KQU( 2773529684745706940), KQU( 4853632376213075959), + KQU( 4198177923731358102), KQU( 8271224913084139776), + KQU( 2741753121611092226), KQU(16782366145996731181), + KQU(15426125238972640790), KQU(13595497100671260342), + KQU( 3173531022836259898), KQU( 6573264560319511662), + KQU(18041111951511157441), KQU( 2351433581833135952), + KQU( 3113255578908173487), KQU( 1739371330877858784), + KQU(16046126562789165480), KQU( 8072101652214192925), + KQU(15267091584090664910), KQU( 9309579200403648940), + KQU( 5218892439752408722), KQU(14492477246004337115), + KQU(17431037586679770619), KQU( 7385248135963250480), + KQU( 9580144956565560660), KQU( 4919546228040008720), + KQU(15261542469145035584), KQU(18233297270822253102), + KQU( 5453248417992302857), KQU( 9309519155931460285), + KQU(10342813012345291756), KQU(15676085186784762381), + KQU(15912092950691300645), KQU( 9371053121499003195), + KQU( 9897186478226866746), KQU(14061858287188196327), + KQU( 122575971620788119), KQU(12146750969116317754), + KQU( 4438317272813245201), KQU( 8332576791009527119), + KQU(13907785691786542057), KQU(10374194887283287467), + KQU( 2098798755649059566), KQU( 3416235197748288894), + KQU( 8688269957320773484), KQU( 7503964602397371571), + KQU(16724977015147478236), KQU( 9461512855439858184), + KQU(13259049744534534727), KQU( 3583094952542899294), + KQU( 8764245731305528292), KQU(13240823595462088985), + KQU(13716141617617910448), KQU(18114969519935960955), + KQU( 2297553615798302206), KQU( 4585521442944663362), + KQU(17776858680630198686), KQU( 4685873229192163363), + KQU( 152558080671135627), KQU(15424900540842670088), + KQU(13229630297130024108), KQU(17530268788245718717), + KQU(16675633913065714144), KQU( 3158912717897568068), + KQU(15399132185380087288), KQU( 7401418744515677872), + KQU(13135412922344398535), KQU( 6385314346100509511), + KQU(13962867001134161139), KQU(10272780155442671999), + KQU(12894856086597769142), KQU(13340877795287554994), + KQU(12913630602094607396), KQU(12543167911119793857), + KQU(17343570372251873096), KQU(10959487764494150545), + KQU( 6966737953093821128), KQU(13780699135496988601), + KQU( 4405070719380142046), KQU(14923788365607284982), + KQU( 2869487678905148380), KQU( 6416272754197188403), + KQU(15017380475943612591), KQU( 1995636220918429487), + KQU( 3402016804620122716), KQU(15800188663407057080), + KQU(11362369990390932882), KQU(15262183501637986147), + KQU(10239175385387371494), KQU( 9352042420365748334), + KQU( 1682457034285119875), KQU( 1724710651376289644), + KQU( 2038157098893817966), KQU( 9897825558324608773), + KQU( 1477666236519164736), KQU(16835397314511233640), + KQU(10370866327005346508), KQU(10157504370660621982), + KQU(12113904045335882069), KQU(13326444439742783008), + KQU(11302769043000765804), KQU(13594979923955228484), + KQU(11779351762613475968), KQU( 3786101619539298383), + KQU( 8021122969180846063), KQU(15745904401162500495), + KQU(10762168465993897267), KQU(13552058957896319026), + KQU(11200228655252462013), KQU( 5035370357337441226), + KQU( 7593918984545500013), KQU( 5418554918361528700), + KQU( 4858270799405446371), KQU( 9974659566876282544), + KQU(18227595922273957859), KQU( 2772778443635656220), + KQU(14285143053182085385), KQU( 9939700992429600469), + KQU(12756185904545598068), KQU( 2020783375367345262), + KQU( 57026775058331227), KQU( 950827867930065454), + KQU( 6602279670145371217), KQU( 2291171535443566929), + KQU( 5832380724425010313), KQU( 1220343904715982285), + KQU(17045542598598037633), KQU(15460481779702820971), + KQU(13948388779949365130), KQU(13975040175430829518), + KQU(17477538238425541763), KQU(11104663041851745725), + KQU(15860992957141157587), KQU(14529434633012950138), + KQU( 2504838019075394203), KQU( 7512113882611121886), + KQU( 4859973559980886617), KQU( 1258601555703250219), + KQU(15594548157514316394), KQU( 4516730171963773048), + KQU(11380103193905031983), KQU( 6809282239982353344), + KQU(18045256930420065002), KQU( 2453702683108791859), + KQU( 977214582986981460), KQU( 2006410402232713466), + KQU( 6192236267216378358), KQU( 3429468402195675253), + KQU(18146933153017348921), KQU(17369978576367231139), + KQU( 1246940717230386603), KQU(11335758870083327110), + KQU(14166488801730353682), KQU( 9008573127269635732), + KQU(10776025389820643815), KQU(15087605441903942962), + KQU( 1359542462712147922), KQU(13898874411226454206), + KQU(17911176066536804411), KQU( 9435590428600085274), + KQU( 294488509967864007), KQU( 8890111397567922046), + KQU( 7987823476034328778), KQU(13263827582440967651), + KQU( 7503774813106751573), KQU(14974747296185646837), + KQU( 8504765037032103375), KQU(17340303357444536213), + KQU( 7704610912964485743), KQU( 8107533670327205061), + KQU( 9062969835083315985), KQU(16968963142126734184), + KQU(12958041214190810180), KQU( 2720170147759570200), + KQU( 2986358963942189566), KQU(14884226322219356580), + KQU( 286224325144368520), KQU(11313800433154279797), + KQU(18366849528439673248), KQU(17899725929482368789), + KQU( 3730004284609106799), KQU( 1654474302052767205), + KQU( 5006698007047077032), KQU( 8196893913601182838), + KQU(15214541774425211640), KQU(17391346045606626073), + KQU( 8369003584076969089), KQU( 3939046733368550293), + KQU(10178639720308707785), KQU( 2180248669304388697), + KQU( 62894391300126322), KQU( 9205708961736223191), + KQU( 6837431058165360438), KQU( 3150743890848308214), + KQU(17849330658111464583), KQU(12214815643135450865), + KQU(13410713840519603402), KQU( 3200778126692046802), + KQU(13354780043041779313), KQU( 800850022756886036), + KQU(15660052933953067433), KQU( 6572823544154375676), + KQU(11030281857015819266), KQU(12682241941471433835), + KQU(11654136407300274693), KQU( 4517795492388641109), + KQU( 9757017371504524244), KQU(17833043400781889277), + KQU(12685085201747792227), KQU(10408057728835019573), + KQU( 98370418513455221), KQU( 6732663555696848598), + KQU(13248530959948529780), KQU( 3530441401230622826), + KQU(18188251992895660615), KQU( 1847918354186383756), + KQU( 1127392190402660921), KQU(11293734643143819463), + KQU( 3015506344578682982), KQU(13852645444071153329), + KQU( 2121359659091349142), KQU( 1294604376116677694), + KQU( 5616576231286352318), KQU( 7112502442954235625), + KQU(11676228199551561689), KQU(12925182803007305359), + KQU( 7852375518160493082), KQU( 1136513130539296154), + KQU( 5636923900916593195), KQU( 3221077517612607747), + KQU(17784790465798152513), KQU( 3554210049056995938), + KQU(17476839685878225874), KQU( 3206836372585575732), + KQU( 2765333945644823430), KQU(10080070903718799528), + KQU( 5412370818878286353), KQU( 9689685887726257728), + KQU( 8236117509123533998), KQU( 1951139137165040214), + KQU( 4492205209227980349), KQU(16541291230861602967), + KQU( 1424371548301437940), KQU( 9117562079669206794), + KQU(14374681563251691625), KQU(13873164030199921303), + KQU( 6680317946770936731), KQU(15586334026918276214), + KQU(10896213950976109802), KQU( 9506261949596413689), + KQU( 9903949574308040616), KQU( 6038397344557204470), + KQU( 174601465422373648), KQU(15946141191338238030), + KQU(17142225620992044937), KQU( 7552030283784477064), + KQU( 2947372384532947997), KQU( 510797021688197711), + KQU( 4962499439249363461), KQU( 23770320158385357), + KQU( 959774499105138124), KQU( 1468396011518788276), + KQU( 2015698006852312308), KQU( 4149400718489980136), + KQU( 5992916099522371188), KQU(10819182935265531076), + KQU(16189787999192351131), KQU( 342833961790261950), + KQU(12470830319550495336), KQU(18128495041912812501), + KQU( 1193600899723524337), KQU( 9056793666590079770), + KQU( 2154021227041669041), KQU( 4963570213951235735), + KQU( 4865075960209211409), KQU( 2097724599039942963), + KQU( 2024080278583179845), KQU(11527054549196576736), + KQU(10650256084182390252), KQU( 4808408648695766755), + KQU( 1642839215013788844), KQU(10607187948250398390), + KQU( 7076868166085913508), KQU( 730522571106887032), + KQU(12500579240208524895), KQU( 4484390097311355324), + KQU(15145801330700623870), KQU( 8055827661392944028), + KQU( 5865092976832712268), KQU(15159212508053625143), + KQU( 3560964582876483341), KQU( 4070052741344438280), + KQU( 6032585709886855634), KQU(15643262320904604873), + KQU( 2565119772293371111), KQU( 318314293065348260), + KQU(15047458749141511872), KQU( 7772788389811528730), + KQU( 7081187494343801976), KQU( 6465136009467253947), + KQU(10425940692543362069), KQU( 554608190318339115), + KQU(14796699860302125214), KQU( 1638153134431111443), + KQU(10336967447052276248), KQU( 8412308070396592958), + KQU( 4004557277152051226), KQU( 8143598997278774834), + KQU(16413323996508783221), KQU(13139418758033994949), + KQU( 9772709138335006667), KQU( 2818167159287157659), + KQU(17091740573832523669), KQU(14629199013130751608), + KQU(18268322711500338185), KQU( 8290963415675493063), + KQU( 8830864907452542588), KQU( 1614839084637494849), + KQU(14855358500870422231), KQU( 3472996748392519937), + KQU(15317151166268877716), KQU( 5825895018698400362), + KQU(16730208429367544129), KQU(10481156578141202800), + KQU( 4746166512382823750), KQU(12720876014472464998), + KQU( 8825177124486735972), KQU(13733447296837467838), + KQU( 6412293741681359625), KQU( 8313213138756135033), + KQU(11421481194803712517), KQU( 7997007691544174032), + KQU( 6812963847917605930), KQU( 9683091901227558641), + KQU(14703594165860324713), KQU( 1775476144519618309), + KQU( 2724283288516469519), KQU( 717642555185856868), + KQU( 8736402192215092346), KQU(11878800336431381021), + KQU( 4348816066017061293), KQU( 6115112756583631307), + KQU( 9176597239667142976), KQU(12615622714894259204), + KQU(10283406711301385987), KQU( 5111762509485379420), + KQU( 3118290051198688449), KQU( 7345123071632232145), + KQU( 9176423451688682359), KQU( 4843865456157868971), + KQU(12008036363752566088), KQU(12058837181919397720), + KQU( 2145073958457347366), KQU( 1526504881672818067), + KQU( 3488830105567134848), KQU(13208362960674805143), + KQU( 4077549672899572192), KQU( 7770995684693818365), + KQU( 1398532341546313593), KQU(12711859908703927840), + KQU( 1417561172594446813), KQU(17045191024194170604), + KQU( 4101933177604931713), KQU(14708428834203480320), + KQU(17447509264469407724), KQU(14314821973983434255), + KQU(17990472271061617265), KQU( 5087756685841673942), + KQU(12797820586893859939), KQU( 1778128952671092879), + KQU( 3535918530508665898), KQU( 9035729701042481301), + KQU(14808661568277079962), KQU(14587345077537747914), + KQU(11920080002323122708), KQU( 6426515805197278753), + KQU( 3295612216725984831), KQU(11040722532100876120), + KQU(12305952936387598754), KQU(16097391899742004253), + KQU( 4908537335606182208), KQU(12446674552196795504), + KQU(16010497855816895177), KQU( 9194378874788615551), + KQU( 3382957529567613384), KQU( 5154647600754974077), + KQU( 9801822865328396141), KQU( 9023662173919288143), + KQU(17623115353825147868), KQU( 8238115767443015816), + KQU(15811444159859002560), KQU( 9085612528904059661), + KQU( 6888601089398614254), KQU( 258252992894160189), + KQU( 6704363880792428622), KQU( 6114966032147235763), + KQU(11075393882690261875), KQU( 8797664238933620407), + KQU( 5901892006476726920), KQU( 5309780159285518958), + KQU(14940808387240817367), KQU(14642032021449656698), + KQU( 9808256672068504139), KQU( 3670135111380607658), + KQU(11211211097845960152), KQU( 1474304506716695808), + KQU(15843166204506876239), KQU( 7661051252471780561), + KQU(10170905502249418476), KQU( 7801416045582028589), + KQU( 2763981484737053050), KQU( 9491377905499253054), + KQU(16201395896336915095), KQU( 9256513756442782198), + KQU( 5411283157972456034), KQU( 5059433122288321676), + KQU( 4327408006721123357), KQU( 9278544078834433377), + KQU( 7601527110882281612), KQU(11848295896975505251), + KQU(12096998801094735560), KQU(14773480339823506413), + KQU(15586227433895802149), KQU(12786541257830242872), + KQU( 6904692985140503067), KQU( 5309011515263103959), + KQU(12105257191179371066), KQU(14654380212442225037), + KQU( 2556774974190695009), KQU( 4461297399927600261), + KQU(14888225660915118646), KQU(14915459341148291824), + KQU( 2738802166252327631), KQU( 6047155789239131512), + KQU(12920545353217010338), KQU(10697617257007840205), + KQU( 2751585253158203504), KQU(13252729159780047496), + KQU(14700326134672815469), KQU(14082527904374600529), + KQU(16852962273496542070), KQU(17446675504235853907), + KQU(15019600398527572311), KQU(12312781346344081551), + KQU(14524667935039810450), KQU( 5634005663377195738), + KQU(11375574739525000569), KQU( 2423665396433260040), + KQU( 5222836914796015410), KQU( 4397666386492647387), + KQU( 4619294441691707638), KQU( 665088602354770716), + KQU(13246495665281593610), KQU( 6564144270549729409), + KQU(10223216188145661688), KQU( 3961556907299230585), + KQU(11543262515492439914), KQU(16118031437285993790), + KQU( 7143417964520166465), KQU(13295053515909486772), + KQU( 40434666004899675), KQU(17127804194038347164), + KQU( 8599165966560586269), KQU( 8214016749011284903), + KQU(13725130352140465239), KQU( 5467254474431726291), + KQU( 7748584297438219877), KQU(16933551114829772472), + KQU( 2169618439506799400), KQU( 2169787627665113463), + KQU(17314493571267943764), KQU(18053575102911354912), + KQU(11928303275378476973), KQU(11593850925061715550), + KQU(17782269923473589362), KQU( 3280235307704747039), + KQU( 6145343578598685149), KQU(17080117031114086090), + KQU(18066839902983594755), KQU( 6517508430331020706), + KQU( 8092908893950411541), KQU(12558378233386153732), + KQU( 4476532167973132976), KQU(16081642430367025016), + KQU( 4233154094369139361), KQU( 8693630486693161027), + KQU(11244959343027742285), KQU(12273503967768513508), + KQU(14108978636385284876), KQU( 7242414665378826984), + KQU( 6561316938846562432), KQU( 8601038474994665795), + KQU(17532942353612365904), KQU(17940076637020912186), + KQU( 7340260368823171304), KQU( 7061807613916067905), + KQU(10561734935039519326), KQU(17990796503724650862), + KQU( 6208732943911827159), KQU( 359077562804090617), + KQU(14177751537784403113), KQU(10659599444915362902), + KQU(15081727220615085833), KQU(13417573895659757486), + KQU(15513842342017811524), KQU(11814141516204288231), + KQU( 1827312513875101814), KQU( 2804611699894603103), + KQU(17116500469975602763), KQU(12270191815211952087), + KQU(12256358467786024988), KQU(18435021722453971267), + KQU( 671330264390865618), KQU( 476504300460286050), + KQU(16465470901027093441), KQU( 4047724406247136402), + KQU( 1322305451411883346), KQU( 1388308688834322280), + KQU( 7303989085269758176), KQU( 9323792664765233642), + KQU( 4542762575316368936), KQU(17342696132794337618), + KQU( 4588025054768498379), KQU(13415475057390330804), + KQU(17880279491733405570), KQU(10610553400618620353), + KQU( 3180842072658960139), KQU(13002966655454270120), + KQU( 1665301181064982826), KQU( 7083673946791258979), + KQU( 190522247122496820), KQU(17388280237250677740), + KQU( 8430770379923642945), KQU(12987180971921668584), + KQU( 2311086108365390642), KQU( 2870984383579822345), + KQU(14014682609164653318), KQU(14467187293062251484), + KQU( 192186361147413298), KQU(15171951713531796524), + KQU( 9900305495015948728), KQU(17958004775615466344), + KQU(14346380954498606514), KQU(18040047357617407096), + KQU( 5035237584833424532), KQU(15089555460613972287), + KQU( 4131411873749729831), KQU( 1329013581168250330), + KQU(10095353333051193949), KQU(10749518561022462716), + KQU( 9050611429810755847), KQU(15022028840236655649), + KQU( 8775554279239748298), KQU(13105754025489230502), + KQU(15471300118574167585), KQU( 89864764002355628), + KQU( 8776416323420466637), KQU( 5280258630612040891), + KQU( 2719174488591862912), KQU( 7599309137399661994), + KQU(15012887256778039979), KQU(14062981725630928925), + KQU(12038536286991689603), KQU( 7089756544681775245), + KQU(10376661532744718039), KQU( 1265198725901533130), + KQU(13807996727081142408), KQU( 2935019626765036403), + KQU( 7651672460680700141), KQU( 3644093016200370795), + KQU( 2840982578090080674), KQU(17956262740157449201), + KQU(18267979450492880548), KQU(11799503659796848070), + KQU( 9942537025669672388), KQU(11886606816406990297), + KQU( 5488594946437447576), KQU( 7226714353282744302), + KQU( 3784851653123877043), KQU( 878018453244803041), + KQU(12110022586268616085), KQU( 734072179404675123), + KQU(11869573627998248542), KQU( 469150421297783998), + KQU( 260151124912803804), KQU(11639179410120968649), + KQU( 9318165193840846253), KQU(12795671722734758075), + KQU(15318410297267253933), KQU( 691524703570062620), + KQU( 5837129010576994601), KQU(15045963859726941052), + KQU( 5850056944932238169), KQU(12017434144750943807), + KQU( 7447139064928956574), KQU( 3101711812658245019), + KQU(16052940704474982954), KQU(18195745945986994042), + KQU( 8932252132785575659), KQU(13390817488106794834), + KQU(11582771836502517453), KQU( 4964411326683611686), + KQU( 2195093981702694011), KQU(14145229538389675669), + KQU(16459605532062271798), KQU( 866316924816482864), + KQU( 4593041209937286377), KQU( 8415491391910972138), + KQU( 4171236715600528969), KQU(16637569303336782889), + KQU( 2002011073439212680), KQU(17695124661097601411), + KQU( 4627687053598611702), KQU( 7895831936020190403), + KQU( 8455951300917267802), KQU( 2923861649108534854), + KQU( 8344557563927786255), KQU( 6408671940373352556), + KQU(12210227354536675772), KQU(14294804157294222295), + KQU(10103022425071085127), KQU(10092959489504123771), + KQU( 6554774405376736268), KQU(12629917718410641774), + KQU( 6260933257596067126), KQU( 2460827021439369673), + KQU( 2541962996717103668), KQU( 597377203127351475), + KQU( 5316984203117315309), KQU( 4811211393563241961), + KQU(13119698597255811641), KQU( 8048691512862388981), + KQU(10216818971194073842), KQU( 4612229970165291764), + KQU(10000980798419974770), KQU( 6877640812402540687), + KQU( 1488727563290436992), KQU( 2227774069895697318), + KQU(11237754507523316593), KQU(13478948605382290972), + KQU( 1963583846976858124), KQU( 5512309205269276457), + KQU( 3972770164717652347), KQU( 3841751276198975037), + KQU(10283343042181903117), KQU( 8564001259792872199), + KQU(16472187244722489221), KQU( 8953493499268945921), + KQU( 3518747340357279580), KQU( 4003157546223963073), + KQU( 3270305958289814590), KQU( 3966704458129482496), + KQU( 8122141865926661939), KQU(14627734748099506653), + KQU(13064426990862560568), KQU( 2414079187889870829), + KQU( 5378461209354225306), KQU(10841985740128255566), + KQU( 538582442885401738), KQU( 7535089183482905946), + KQU(16117559957598879095), KQU( 8477890721414539741), + KQU( 1459127491209533386), KQU(17035126360733620462), + KQU( 8517668552872379126), KQU(10292151468337355014), + KQU(17081267732745344157), KQU(13751455337946087178), + KQU(14026945459523832966), KQU( 6653278775061723516), + KQU(10619085543856390441), KQU( 2196343631481122885), + KQU(10045966074702826136), KQU(10082317330452718282), + KQU( 5920859259504831242), KQU( 9951879073426540617), + KQU( 7074696649151414158), KQU(15808193543879464318), + KQU( 7385247772746953374), KQU( 3192003544283864292), + KQU(18153684490917593847), KQU(12423498260668568905), + KQU(10957758099756378169), KQU(11488762179911016040), + KQU( 2099931186465333782), KQU(11180979581250294432), + KQU( 8098916250668367933), KQU( 3529200436790763465), + KQU(12988418908674681745), KQU( 6147567275954808580), + KQU( 3207503344604030989), KQU(10761592604898615360), + KQU( 229854861031893504), KQU( 8809853962667144291), + KQU(13957364469005693860), KQU( 7634287665224495886), + KQU(12353487366976556874), KQU( 1134423796317152034), + KQU( 2088992471334107068), KQU( 7393372127190799698), + KQU( 1845367839871058391), KQU( 207922563987322884), + KQU(11960870813159944976), KQU(12182120053317317363), + KQU(17307358132571709283), KQU(13871081155552824936), + KQU(18304446751741566262), KQU( 7178705220184302849), + KQU(10929605677758824425), KQU(16446976977835806844), + KQU(13723874412159769044), KQU( 6942854352100915216), + KQU( 1726308474365729390), KQU( 2150078766445323155), + KQU(15345558947919656626), KQU(12145453828874527201), + KQU( 2054448620739726849), KQU( 2740102003352628137), + KQU(11294462163577610655), KQU( 756164283387413743), + KQU(17841144758438810880), KQU(10802406021185415861), + KQU( 8716455530476737846), KQU( 6321788834517649606), + KQU(14681322910577468426), KQU(17330043563884336387), + KQU(12701802180050071614), KQU(14695105111079727151), + KQU( 5112098511654172830), KQU( 4957505496794139973), + KQU( 8270979451952045982), KQU(12307685939199120969), + KQU(12425799408953443032), KQU( 8376410143634796588), + KQU(16621778679680060464), KQU( 3580497854566660073), + KQU( 1122515747803382416), KQU( 857664980960597599), + KQU( 6343640119895925918), KQU(12878473260854462891), + KQU(10036813920765722626), KQU(14451335468363173812), + KQU( 5476809692401102807), KQU(16442255173514366342), + KQU(13060203194757167104), KQU(14354124071243177715), + KQU(15961249405696125227), KQU(13703893649690872584), + KQU( 363907326340340064), KQU( 6247455540491754842), + KQU(12242249332757832361), KQU( 156065475679796717), + KQU( 9351116235749732355), KQU( 4590350628677701405), + KQU( 1671195940982350389), KQU(13501398458898451905), + KQU( 6526341991225002255), KQU( 1689782913778157592), + KQU( 7439222350869010334), KQU(13975150263226478308), + KQU(11411961169932682710), KQU(17204271834833847277), + KQU( 541534742544435367), KQU( 6591191931218949684), + KQU( 2645454775478232486), KQU( 4322857481256485321), + KQU( 8477416487553065110), KQU(12902505428548435048), + KQU( 971445777981341415), KQU(14995104682744976712), + KQU( 4243341648807158063), KQU( 8695061252721927661), + KQU( 5028202003270177222), KQU( 2289257340915567840), + KQU(13870416345121866007), KQU(13994481698072092233), + KQU( 6912785400753196481), KQU( 2278309315841980139), + KQU( 4329765449648304839), KQU( 5963108095785485298), + KQU( 4880024847478722478), KQU(16015608779890240947), + KQU( 1866679034261393544), KQU( 914821179919731519), + KQU( 9643404035648760131), KQU( 2418114953615593915), + KQU( 944756836073702374), KQU(15186388048737296834), + KQU( 7723355336128442206), KQU( 7500747479679599691), + KQU(18013961306453293634), KQU( 2315274808095756456), + KQU(13655308255424029566), KQU(17203800273561677098), + KQU( 1382158694422087756), KQU( 5090390250309588976), + KQU( 517170818384213989), KQU( 1612709252627729621), + KQU( 1330118955572449606), KQU( 300922478056709885), + KQU(18115693291289091987), KQU(13491407109725238321), + KQU(15293714633593827320), KQU( 5151539373053314504), + KQU( 5951523243743139207), KQU(14459112015249527975), + KQU( 5456113959000700739), KQU( 3877918438464873016), + KQU(12534071654260163555), KQU(15871678376893555041), + KQU(11005484805712025549), KQU(16353066973143374252), + KQU( 4358331472063256685), KQU( 8268349332210859288), + KQU(12485161590939658075), KQU(13955993592854471343), + KQU( 5911446886848367039), KQU(14925834086813706974), + KQU( 6590362597857994805), KQU( 1280544923533661875), + KQU( 1637756018947988164), KQU( 4734090064512686329), + KQU(16693705263131485912), KQU( 6834882340494360958), + KQU( 8120732176159658505), KQU( 2244371958905329346), + KQU(10447499707729734021), KQU( 7318742361446942194), + KQU( 8032857516355555296), KQU(14023605983059313116), + KQU( 1032336061815461376), KQU( 9840995337876562612), + KQU( 9869256223029203587), KQU(12227975697177267636), + KQU(12728115115844186033), KQU( 7752058479783205470), + KQU( 729733219713393087), KQU(12954017801239007622) +}; +static const uint64_t init_by_array_64_expected[] = { + KQU( 2100341266307895239), KQU( 8344256300489757943), + KQU(15687933285484243894), KQU( 8268620370277076319), + KQU(12371852309826545459), KQU( 8800491541730110238), + KQU(18113268950100835773), KQU( 2886823658884438119), + KQU( 3293667307248180724), KQU( 9307928143300172731), + KQU( 7688082017574293629), KQU( 900986224735166665), + KQU( 9977972710722265039), KQU( 6008205004994830552), + KQU( 546909104521689292), KQU( 7428471521869107594), + KQU(14777563419314721179), KQU(16116143076567350053), + KQU( 5322685342003142329), KQU( 4200427048445863473), + KQU( 4693092150132559146), KQU(13671425863759338582), + KQU( 6747117460737639916), KQU( 4732666080236551150), + KQU( 5912839950611941263), KQU( 3903717554504704909), + KQU( 2615667650256786818), KQU(10844129913887006352), + KQU(13786467861810997820), KQU(14267853002994021570), + KQU(13767807302847237439), KQU(16407963253707224617), + KQU( 4802498363698583497), KQU( 2523802839317209764), + KQU( 3822579397797475589), KQU( 8950320572212130610), + KQU( 3745623504978342534), KQU(16092609066068482806), + KQU( 9817016950274642398), KQU(10591660660323829098), + KQU(11751606650792815920), KQU( 5122873818577122211), + KQU(17209553764913936624), KQU( 6249057709284380343), + KQU(15088791264695071830), KQU(15344673071709851930), + KQU( 4345751415293646084), KQU( 2542865750703067928), + KQU(13520525127852368784), KQU(18294188662880997241), + KQU( 3871781938044881523), KQU( 2873487268122812184), + KQU(15099676759482679005), KQU(15442599127239350490), + KQU( 6311893274367710888), KQU( 3286118760484672933), + KQU( 4146067961333542189), KQU(13303942567897208770), + KQU( 8196013722255630418), KQU( 4437815439340979989), + KQU(15433791533450605135), KQU( 4254828956815687049), + KQU( 1310903207708286015), KQU(10529182764462398549), + KQU(14900231311660638810), KQU( 9727017277104609793), + KQU( 1821308310948199033), KQU(11628861435066772084), + KQU( 9469019138491546924), KQU( 3145812670532604988), + KQU( 9938468915045491919), KQU( 1562447430672662142), + KQU(13963995266697989134), KQU( 3356884357625028695), + KQU( 4499850304584309747), KQU( 8456825817023658122), + KQU(10859039922814285279), KQU( 8099512337972526555), + KQU( 348006375109672149), KQU(11919893998241688603), + KQU( 1104199577402948826), KQU(16689191854356060289), + KQU(10992552041730168078), KQU( 7243733172705465836), + KQU( 5668075606180319560), KQU(18182847037333286970), + KQU( 4290215357664631322), KQU( 4061414220791828613), + KQU(13006291061652989604), KQU( 7140491178917128798), + KQU(12703446217663283481), KQU( 5500220597564558267), + KQU(10330551509971296358), KQU(15958554768648714492), + KQU( 5174555954515360045), KQU( 1731318837687577735), + KQU( 3557700801048354857), KQU(13764012341928616198), + KQU(13115166194379119043), KQU( 7989321021560255519), + KQU( 2103584280905877040), KQU( 9230788662155228488), + KQU(16396629323325547654), KQU( 657926409811318051), + KQU(15046700264391400727), KQU( 5120132858771880830), + KQU( 7934160097989028561), KQU( 6963121488531976245), + KQU(17412329602621742089), KQU(15144843053931774092), + KQU(17204176651763054532), KQU(13166595387554065870), + KQU( 8590377810513960213), KQU( 5834365135373991938), + KQU( 7640913007182226243), KQU( 3479394703859418425), + KQU(16402784452644521040), KQU( 4993979809687083980), + KQU(13254522168097688865), KQU(15643659095244365219), + KQU( 5881437660538424982), KQU(11174892200618987379), + KQU( 254409966159711077), KQU(17158413043140549909), + KQU( 3638048789290376272), KQU( 1376816930299489190), + KQU( 4622462095217761923), KQU(15086407973010263515), + KQU(13253971772784692238), KQU( 5270549043541649236), + KQU(11182714186805411604), KQU(12283846437495577140), + KQU( 5297647149908953219), KQU(10047451738316836654), + KQU( 4938228100367874746), KQU(12328523025304077923), + KQU( 3601049438595312361), KQU( 9313624118352733770), + KQU(13322966086117661798), KQU(16660005705644029394), + KQU(11337677526988872373), KQU(13869299102574417795), + KQU(15642043183045645437), KQU( 3021755569085880019), + KQU( 4979741767761188161), KQU(13679979092079279587), + KQU( 3344685842861071743), KQU(13947960059899588104), + KQU( 305806934293368007), KQU( 5749173929201650029), + KQU(11123724852118844098), KQU(15128987688788879802), + KQU(15251651211024665009), KQU( 7689925933816577776), + KQU(16732804392695859449), KQU(17087345401014078468), + KQU(14315108589159048871), KQU( 4820700266619778917), + KQU(16709637539357958441), KQU( 4936227875177351374), + KQU( 2137907697912987247), KQU(11628565601408395420), + KQU( 2333250549241556786), KQU( 5711200379577778637), + KQU( 5170680131529031729), KQU(12620392043061335164), + KQU( 95363390101096078), KQU( 5487981914081709462), + KQU( 1763109823981838620), KQU( 3395861271473224396), + KQU( 1300496844282213595), KQU( 6894316212820232902), + KQU(10673859651135576674), KQU( 5911839658857903252), + KQU(17407110743387299102), KQU( 8257427154623140385), + KQU(11389003026741800267), KQU( 4070043211095013717), + KQU(11663806997145259025), KQU(15265598950648798210), + KQU( 630585789434030934), KQU( 3524446529213587334), + KQU( 7186424168495184211), KQU(10806585451386379021), + KQU(11120017753500499273), KQU( 1586837651387701301), + KQU(17530454400954415544), KQU( 9991670045077880430), + KQU( 7550997268990730180), KQU( 8640249196597379304), + KQU( 3522203892786893823), KQU(10401116549878854788), + KQU(13690285544733124852), KQU( 8295785675455774586), + KQU(15535716172155117603), KQU( 3112108583723722511), + KQU(17633179955339271113), KQU(18154208056063759375), + KQU( 1866409236285815666), KQU(13326075895396412882), + KQU( 8756261842948020025), KQU( 6281852999868439131), + KQU(15087653361275292858), KQU(10333923911152949397), + KQU( 5265567645757408500), KQU(12728041843210352184), + KQU( 6347959327507828759), KQU( 154112802625564758), + KQU(18235228308679780218), KQU( 3253805274673352418), + KQU( 4849171610689031197), KQU(17948529398340432518), + KQU(13803510475637409167), KQU(13506570190409883095), + KQU(15870801273282960805), KQU( 8451286481299170773), + KQU( 9562190620034457541), KQU( 8518905387449138364), + KQU(12681306401363385655), KQU( 3788073690559762558), + KQU( 5256820289573487769), KQU( 2752021372314875467), + KQU( 6354035166862520716), KQU( 4328956378309739069), + KQU( 449087441228269600), KQU( 5533508742653090868), + KQU( 1260389420404746988), KQU(18175394473289055097), + KQU( 1535467109660399420), KQU( 8818894282874061442), + KQU(12140873243824811213), KQU(15031386653823014946), + KQU( 1286028221456149232), KQU( 6329608889367858784), + KQU( 9419654354945132725), KQU( 6094576547061672379), + KQU(17706217251847450255), KQU( 1733495073065878126), + KQU(16918923754607552663), KQU( 8881949849954945044), + KQU(12938977706896313891), KQU(14043628638299793407), + KQU(18393874581723718233), KQU( 6886318534846892044), + KQU(14577870878038334081), KQU(13541558383439414119), + KQU(13570472158807588273), KQU(18300760537910283361), + KQU( 818368572800609205), KQU( 1417000585112573219), + KQU(12337533143867683655), KQU(12433180994702314480), + KQU( 778190005829189083), KQU(13667356216206524711), + KQU( 9866149895295225230), KQU(11043240490417111999), + KQU( 1123933826541378598), KQU( 6469631933605123610), + KQU(14508554074431980040), KQU(13918931242962026714), + KQU( 2870785929342348285), KQU(14786362626740736974), + KQU(13176680060902695786), KQU( 9591778613541679456), + KQU( 9097662885117436706), KQU( 749262234240924947), + KQU( 1944844067793307093), KQU( 4339214904577487742), + KQU( 8009584152961946551), KQU(16073159501225501777), + KQU( 3335870590499306217), KQU(17088312653151202847), + KQU( 3108893142681931848), KQU(16636841767202792021), + KQU(10423316431118400637), KQU( 8008357368674443506), + KQU(11340015231914677875), KQU(17687896501594936090), + KQU(15173627921763199958), KQU( 542569482243721959), + KQU(15071714982769812975), KQU( 4466624872151386956), + KQU( 1901780715602332461), KQU( 9822227742154351098), + KQU( 1479332892928648780), KQU( 6981611948382474400), + KQU( 7620824924456077376), KQU(14095973329429406782), + KQU( 7902744005696185404), KQU(15830577219375036920), + KQU(10287076667317764416), KQU(12334872764071724025), + KQU( 4419302088133544331), KQU(14455842851266090520), + KQU(12488077416504654222), KQU( 7953892017701886766), + KQU( 6331484925529519007), KQU( 4902145853785030022), + KQU(17010159216096443073), KQU(11945354668653886087), + KQU(15112022728645230829), KQU(17363484484522986742), + KQU( 4423497825896692887), KQU( 8155489510809067471), + KQU( 258966605622576285), KQU( 5462958075742020534), + KQU( 6763710214913276228), KQU( 2368935183451109054), + KQU(14209506165246453811), KQU( 2646257040978514881), + KQU( 3776001911922207672), KQU( 1419304601390147631), + KQU(14987366598022458284), KQU( 3977770701065815721), + KQU( 730820417451838898), KQU( 3982991703612885327), + KQU( 2803544519671388477), KQU(17067667221114424649), + KQU( 2922555119737867166), KQU( 1989477584121460932), + KQU(15020387605892337354), KQU( 9293277796427533547), + KQU(10722181424063557247), KQU(16704542332047511651), + KQU( 5008286236142089514), KQU(16174732308747382540), + KQU(17597019485798338402), KQU(13081745199110622093), + KQU( 8850305883842258115), KQU(12723629125624589005), + KQU( 8140566453402805978), KQU(15356684607680935061), + KQU(14222190387342648650), KQU(11134610460665975178), + KQU( 1259799058620984266), KQU(13281656268025610041), + KQU( 298262561068153992), KQU(12277871700239212922), + KQU(13911297774719779438), KQU(16556727962761474934), + KQU(17903010316654728010), KQU( 9682617699648434744), + KQU(14757681836838592850), KQU( 1327242446558524473), + KQU(11126645098780572792), KQU( 1883602329313221774), + KQU( 2543897783922776873), KQU(15029168513767772842), + KQU(12710270651039129878), KQU(16118202956069604504), + KQU(15010759372168680524), KQU( 2296827082251923948), + KQU(10793729742623518101), KQU(13829764151845413046), + KQU(17769301223184451213), KQU( 3118268169210783372), + KQU(17626204544105123127), KQU( 7416718488974352644), + KQU(10450751996212925994), KQU( 9352529519128770586), + KQU( 259347569641110140), KQU( 8048588892269692697), + KQU( 1774414152306494058), KQU(10669548347214355622), + KQU(13061992253816795081), KQU(18432677803063861659), + KQU( 8879191055593984333), KQU(12433753195199268041), + KQU(14919392415439730602), KQU( 6612848378595332963), + KQU( 6320986812036143628), KQU(10465592420226092859), + KQU( 4196009278962570808), KQU( 3747816564473572224), + KQU(17941203486133732898), KQU( 2350310037040505198), + KQU( 5811779859134370113), KQU(10492109599506195126), + KQU( 7699650690179541274), KQU( 1954338494306022961), + KQU(14095816969027231152), KQU( 5841346919964852061), + KQU(14945969510148214735), KQU( 3680200305887550992), + KQU( 6218047466131695792), KQU( 8242165745175775096), + KQU(11021371934053307357), KQU( 1265099502753169797), + KQU( 4644347436111321718), KQU( 3609296916782832859), + KQU( 8109807992218521571), KQU(18387884215648662020), + KQU(14656324896296392902), KQU(17386819091238216751), + KQU(17788300878582317152), KQU( 7919446259742399591), + KQU( 4466613134576358004), KQU(12928181023667938509), + KQU(13147446154454932030), KQU(16552129038252734620), + KQU( 8395299403738822450), KQU(11313817655275361164), + KQU( 434258809499511718), KQU( 2074882104954788676), + KQU( 7929892178759395518), KQU( 9006461629105745388), + KQU( 5176475650000323086), KQU(11128357033468341069), + KQU(12026158851559118955), KQU(14699716249471156500), + KQU( 448982497120206757), KQU( 4156475356685519900), + KQU( 6063816103417215727), KQU(10073289387954971479), + KQU( 8174466846138590962), KQU( 2675777452363449006), + KQU( 9090685420572474281), KQU( 6659652652765562060), + KQU(12923120304018106621), KQU(11117480560334526775), + KQU( 937910473424587511), KQU( 1838692113502346645), + KQU(11133914074648726180), KQU( 7922600945143884053), + KQU(13435287702700959550), KQU( 5287964921251123332), + KQU(11354875374575318947), KQU(17955724760748238133), + KQU(13728617396297106512), KQU( 4107449660118101255), + KQU( 1210269794886589623), KQU(11408687205733456282), + KQU( 4538354710392677887), KQU(13566803319341319267), + KQU(17870798107734050771), KQU( 3354318982568089135), + KQU( 9034450839405133651), KQU(13087431795753424314), + KQU( 950333102820688239), KQU( 1968360654535604116), + KQU(16840551645563314995), KQU( 8867501803892924995), + KQU(11395388644490626845), KQU( 1529815836300732204), + KQU(13330848522996608842), KQU( 1813432878817504265), + KQU( 2336867432693429560), KQU(15192805445973385902), + KQU( 2528593071076407877), KQU( 128459777936689248), + KQU( 9976345382867214866), KQU( 6208885766767996043), + KQU(14982349522273141706), KQU( 3099654362410737822), + KQU(13776700761947297661), KQU( 8806185470684925550), + KQU( 8151717890410585321), KQU( 640860591588072925), + KQU(14592096303937307465), KQU( 9056472419613564846), + KQU(14861544647742266352), KQU(12703771500398470216), + KQU( 3142372800384138465), KQU( 6201105606917248196), + KQU(18337516409359270184), KQU(15042268695665115339), + KQU(15188246541383283846), KQU(12800028693090114519), + KQU( 5992859621101493472), KQU(18278043971816803521), + KQU( 9002773075219424560), KQU( 7325707116943598353), + KQU( 7930571931248040822), KQU( 5645275869617023448), + KQU( 7266107455295958487), KQU( 4363664528273524411), + KQU(14313875763787479809), KQU(17059695613553486802), + KQU( 9247761425889940932), KQU(13704726459237593128), + KQU( 2701312427328909832), KQU(17235532008287243115), + KQU(14093147761491729538), KQU( 6247352273768386516), + KQU( 8268710048153268415), KQU( 7985295214477182083), + KQU(15624495190888896807), KQU( 3772753430045262788), + KQU( 9133991620474991698), KQU( 5665791943316256028), + KQU( 7551996832462193473), KQU(13163729206798953877), + KQU( 9263532074153846374), KQU( 1015460703698618353), + KQU(17929874696989519390), KQU(18257884721466153847), + KQU(16271867543011222991), KQU( 3905971519021791941), + KQU(16814488397137052085), KQU( 1321197685504621613), + KQU( 2870359191894002181), KQU(14317282970323395450), + KQU(13663920845511074366), KQU( 2052463995796539594), + KQU(14126345686431444337), KQU( 1727572121947022534), + KQU(17793552254485594241), KQU( 6738857418849205750), + KQU( 1282987123157442952), KQU(16655480021581159251), + KQU( 6784587032080183866), KQU(14726758805359965162), + KQU( 7577995933961987349), KQU(12539609320311114036), + KQU(10789773033385439494), KQU( 8517001497411158227), + KQU(10075543932136339710), KQU(14838152340938811081), + KQU( 9560840631794044194), KQU(17445736541454117475), + KQU(10633026464336393186), KQU(15705729708242246293), + KQU( 1117517596891411098), KQU( 4305657943415886942), + KQU( 4948856840533979263), KQU(16071681989041789593), + KQU(13723031429272486527), KQU( 7639567622306509462), + KQU(12670424537483090390), KQU( 9715223453097197134), + KQU( 5457173389992686394), KQU( 289857129276135145), + KQU(17048610270521972512), KQU( 692768013309835485), + KQU(14823232360546632057), KQU(18218002361317895936), + KQU( 3281724260212650204), KQU(16453957266549513795), + KQU( 8592711109774511881), KQU( 929825123473369579), + KQU(15966784769764367791), KQU( 9627344291450607588), + KQU(10849555504977813287), KQU( 9234566913936339275), + KQU( 6413807690366911210), KQU(10862389016184219267), + KQU(13842504799335374048), KQU( 1531994113376881174), + KQU( 2081314867544364459), KQU(16430628791616959932), + KQU( 8314714038654394368), KQU( 9155473892098431813), + KQU(12577843786670475704), KQU( 4399161106452401017), + KQU( 1668083091682623186), KQU( 1741383777203714216), + KQU( 2162597285417794374), KQU(15841980159165218736), + KQU( 1971354603551467079), KQU( 1206714764913205968), + KQU( 4790860439591272330), KQU(14699375615594055799), + KQU( 8374423871657449988), KQU(10950685736472937738), + KQU( 697344331343267176), KQU(10084998763118059810), + KQU(12897369539795983124), KQU(12351260292144383605), + KQU( 1268810970176811234), KQU( 7406287800414582768), + KQU( 516169557043807831), KQU( 5077568278710520380), + KQU( 3828791738309039304), KQU( 7721974069946943610), + KQU( 3534670260981096460), KQU( 4865792189600584891), + KQU(16892578493734337298), KQU( 9161499464278042590), + KQU(11976149624067055931), KQU(13219479887277343990), + KQU(14161556738111500680), KQU(14670715255011223056), + KQU( 4671205678403576558), KQU(12633022931454259781), + KQU(14821376219869187646), KQU( 751181776484317028), + KQU( 2192211308839047070), KQU(11787306362361245189), + KQU(10672375120744095707), KQU( 4601972328345244467), + KQU(15457217788831125879), KQU( 8464345256775460809), + KQU(10191938789487159478), KQU( 6184348739615197613), + KQU(11425436778806882100), KQU( 2739227089124319793), + KQU( 461464518456000551), KQU( 4689850170029177442), + KQU( 6120307814374078625), KQU(11153579230681708671), + KQU( 7891721473905347926), KQU(10281646937824872400), + KQU( 3026099648191332248), KQU( 8666750296953273818), + KQU(14978499698844363232), KQU(13303395102890132065), + KQU( 8182358205292864080), KQU(10560547713972971291), + KQU(11981635489418959093), KQU( 3134621354935288409), + KQU(11580681977404383968), KQU(14205530317404088650), + KQU( 5997789011854923157), KQU(13659151593432238041), + KQU(11664332114338865086), KQU( 7490351383220929386), + KQU( 7189290499881530378), KQU(15039262734271020220), + KQU( 2057217285976980055), KQU( 555570804905355739), + KQU(11235311968348555110), KQU(13824557146269603217), + KQU(16906788840653099693), KQU( 7222878245455661677), + KQU( 5245139444332423756), KQU( 4723748462805674292), + KQU(12216509815698568612), KQU(17402362976648951187), + KQU(17389614836810366768), KQU( 4880936484146667711), + KQU( 9085007839292639880), KQU(13837353458498535449), + KQU(11914419854360366677), KQU(16595890135313864103), + KQU( 6313969847197627222), KQU(18296909792163910431), + KQU(10041780113382084042), KQU( 2499478551172884794), + KQU(11057894246241189489), KQU( 9742243032389068555), + KQU(12838934582673196228), KQU(13437023235248490367), + KQU(13372420669446163240), KQU( 6752564244716909224), + KQU( 7157333073400313737), KQU(12230281516370654308), + KQU( 1182884552219419117), KQU( 2955125381312499218), + KQU(10308827097079443249), KQU( 1337648572986534958), + KQU(16378788590020343939), KQU( 108619126514420935), + KQU( 3990981009621629188), KQU( 5460953070230946410), + KQU( 9703328329366531883), KQU(13166631489188077236), + KQU( 1104768831213675170), KQU( 3447930458553877908), + KQU( 8067172487769945676), KQU( 5445802098190775347), + KQU( 3244840981648973873), KQU(17314668322981950060), + KQU( 5006812527827763807), KQU(18158695070225526260), + KQU( 2824536478852417853), KQU(13974775809127519886), + KQU( 9814362769074067392), KQU(17276205156374862128), + KQU(11361680725379306967), KQU( 3422581970382012542), + KQU(11003189603753241266), KQU(11194292945277862261), + KQU( 6839623313908521348), KQU(11935326462707324634), + KQU( 1611456788685878444), KQU(13112620989475558907), + KQU( 517659108904450427), KQU(13558114318574407624), + KQU(15699089742731633077), KQU( 4988979278862685458), + KQU( 8111373583056521297), KQU( 3891258746615399627), + KQU( 8137298251469718086), KQU(12748663295624701649), + KQU( 4389835683495292062), KQU( 5775217872128831729), + KQU( 9462091896405534927), KQU( 8498124108820263989), + KQU( 8059131278842839525), KQU(10503167994254090892), + KQU(11613153541070396656), KQU(18069248738504647790), + KQU( 570657419109768508), KQU( 3950574167771159665), + KQU( 5514655599604313077), KQU( 2908460854428484165), + KQU(10777722615935663114), KQU(12007363304839279486), + KQU( 9800646187569484767), KQU( 8795423564889864287), + KQU(14257396680131028419), KQU( 6405465117315096498), + KQU( 7939411072208774878), KQU(17577572378528990006), + KQU(14785873806715994850), KQU(16770572680854747390), + KQU(18127549474419396481), KQU(11637013449455757750), + KQU(14371851933996761086), KQU( 3601181063650110280), + KQU( 4126442845019316144), KQU(10198287239244320669), + KQU(18000169628555379659), KQU(18392482400739978269), + KQU( 6219919037686919957), KQU( 3610085377719446052), + KQU( 2513925039981776336), KQU(16679413537926716955), + KQU(12903302131714909434), KQU( 5581145789762985009), + KQU(12325955044293303233), KQU(17216111180742141204), + KQU( 6321919595276545740), KQU( 3507521147216174501), + KQU( 9659194593319481840), KQU(11473976005975358326), + KQU(14742730101435987026), KQU( 492845897709954780), + KQU(16976371186162599676), KQU(17712703422837648655), + KQU( 9881254778587061697), KQU( 8413223156302299551), + KQU( 1563841828254089168), KQU( 9996032758786671975), + KQU( 138877700583772667), KQU(13003043368574995989), + KQU( 4390573668650456587), KQU( 8610287390568126755), + KQU(15126904974266642199), KQU( 6703637238986057662), + KQU( 2873075592956810157), KQU( 6035080933946049418), + KQU(13382846581202353014), KQU( 7303971031814642463), + KQU(18418024405307444267), KQU( 5847096731675404647), + KQU( 4035880699639842500), KQU(11525348625112218478), + KQU( 3041162365459574102), KQU( 2604734487727986558), + KQU(15526341771636983145), KQU(14556052310697370254), + KQU(12997787077930808155), KQU( 9601806501755554499), + KQU(11349677952521423389), KQU(14956777807644899350), + KQU(16559736957742852721), KQU(12360828274778140726), + KQU( 6685373272009662513), KQU(16932258748055324130), + KQU(15918051131954158508), KQU( 1692312913140790144), + KQU( 546653826801637367), KQU( 5341587076045986652), + KQU(14975057236342585662), KQU(12374976357340622412), + KQU(10328833995181940552), KQU(12831807101710443149), + KQU(10548514914382545716), KQU( 2217806727199715993), + KQU(12627067369242845138), KQU( 4598965364035438158), + KQU( 150923352751318171), KQU(14274109544442257283), + KQU( 4696661475093863031), KQU( 1505764114384654516), + KQU(10699185831891495147), KQU( 2392353847713620519), + KQU( 3652870166711788383), KQU( 8640653276221911108), + KQU( 3894077592275889704), KQU( 4918592872135964845), + KQU(16379121273281400789), KQU(12058465483591683656), + KQU(11250106829302924945), KQU( 1147537556296983005), + KQU( 6376342756004613268), KQU(14967128191709280506), + KQU(18007449949790627628), KQU( 9497178279316537841), + KQU( 7920174844809394893), KQU(10037752595255719907), + KQU(15875342784985217697), KQU(15311615921712850696), + KQU( 9552902652110992950), KQU(14054979450099721140), + KQU( 5998709773566417349), KQU(18027910339276320187), + KQU( 8223099053868585554), KQU( 7842270354824999767), + KQU( 4896315688770080292), KQU(12969320296569787895), + KQU( 2674321489185759961), KQU( 4053615936864718439), + KQU(11349775270588617578), KQU( 4743019256284553975), + KQU( 5602100217469723769), KQU(14398995691411527813), + KQU( 7412170493796825470), KQU( 836262406131744846), + KQU( 8231086633845153022), KQU( 5161377920438552287), + KQU( 8828731196169924949), KQU(16211142246465502680), + KQU( 3307990879253687818), KQU( 5193405406899782022), + KQU( 8510842117467566693), KQU( 6070955181022405365), + KQU(14482950231361409799), KQU(12585159371331138077), + KQU( 3511537678933588148), KQU( 2041849474531116417), + KQU(10944936685095345792), KQU(18303116923079107729), + KQU( 2720566371239725320), KQU( 4958672473562397622), + KQU( 3032326668253243412), KQU(13689418691726908338), + KQU( 1895205511728843996), KQU( 8146303515271990527), + KQU(16507343500056113480), KQU( 473996939105902919), + KQU( 9897686885246881481), KQU(14606433762712790575), + KQU( 6732796251605566368), KQU( 1399778120855368916), + KQU( 935023885182833777), KQU(16066282816186753477), + KQU( 7291270991820612055), KQU(17530230393129853844), + KQU(10223493623477451366), KQU(15841725630495676683), + KQU(17379567246435515824), KQU( 8588251429375561971), + KQU(18339511210887206423), KQU(17349587430725976100), + KQU(12244876521394838088), KQU( 6382187714147161259), + KQU(12335807181848950831), KQU(16948885622305460665), + KQU(13755097796371520506), KQU(14806740373324947801), + KQU( 4828699633859287703), KQU( 8209879281452301604), + KQU(12435716669553736437), KQU(13970976859588452131), + KQU( 6233960842566773148), KQU(12507096267900505759), + KQU( 1198713114381279421), KQU(14989862731124149015), + KQU(15932189508707978949), KQU( 2526406641432708722), + KQU( 29187427817271982), KQU( 1499802773054556353), + KQU(10816638187021897173), KQU( 5436139270839738132), + KQU( 6659882287036010082), KQU( 2154048955317173697), + KQU(10887317019333757642), KQU(16281091802634424955), + KQU(10754549879915384901), KQU(10760611745769249815), + KQU( 2161505946972504002), KQU( 5243132808986265107), + KQU(10129852179873415416), KQU( 710339480008649081), + KQU( 7802129453068808528), KQU(17967213567178907213), + KQU(15730859124668605599), KQU(13058356168962376502), + KQU( 3701224985413645909), KQU(14464065869149109264), + KQU( 9959272418844311646), KQU(10157426099515958752), + KQU(14013736814538268528), KQU(17797456992065653951), + KQU(17418878140257344806), KQU(15457429073540561521), + KQU( 2184426881360949378), KQU( 2062193041154712416), + KQU( 8553463347406931661), KQU( 4913057625202871854), + KQU( 2668943682126618425), KQU(17064444737891172288), + KQU( 4997115903913298637), KQU(12019402608892327416), + KQU(17603584559765897352), KQU(11367529582073647975), + KQU( 8211476043518436050), KQU( 8676849804070323674), + KQU(18431829230394475730), KQU(10490177861361247904), + KQU( 9508720602025651349), KQU( 7409627448555722700), + KQU( 5804047018862729008), KQU(11943858176893142594), + KQU(11908095418933847092), KQU( 5415449345715887652), + KQU( 1554022699166156407), KQU( 9073322106406017161), + KQU( 7080630967969047082), KQU(18049736940860732943), + KQU(12748714242594196794), KQU( 1226992415735156741), + KQU(17900981019609531193), KQU(11720739744008710999), + KQU( 3006400683394775434), KQU(11347974011751996028), + KQU( 3316999628257954608), KQU( 8384484563557639101), + KQU(18117794685961729767), KQU( 1900145025596618194), + KQU(17459527840632892676), KQU( 5634784101865710994), + KQU( 7918619300292897158), KQU( 3146577625026301350), + KQU( 9955212856499068767), KQU( 1873995843681746975), + KQU( 1561487759967972194), KQU( 8322718804375878474), + KQU(11300284215327028366), KQU( 4667391032508998982), + KQU( 9820104494306625580), KQU(17922397968599970610), + KQU( 1784690461886786712), KQU(14940365084341346821), + KQU( 5348719575594186181), KQU(10720419084507855261), + KQU(14210394354145143274), KQU( 2426468692164000131), + KQU(16271062114607059202), KQU(14851904092357070247), + KQU( 6524493015693121897), KQU( 9825473835127138531), + KQU(14222500616268569578), KQU(15521484052007487468), + KQU(14462579404124614699), KQU(11012375590820665520), + KQU(11625327350536084927), KQU(14452017765243785417), + KQU( 9989342263518766305), KQU( 3640105471101803790), + KQU( 4749866455897513242), KQU(13963064946736312044), + KQU(10007416591973223791), KQU(18314132234717431115), + KQU( 3286596588617483450), KQU( 7726163455370818765), + KQU( 7575454721115379328), KQU( 5308331576437663422), + KQU(18288821894903530934), KQU( 8028405805410554106), + KQU(15744019832103296628), KQU( 149765559630932100), + KQU( 6137705557200071977), KQU(14513416315434803615), + KQU(11665702820128984473), KQU( 218926670505601386), + KQU( 6868675028717769519), KQU(15282016569441512302), + KQU( 5707000497782960236), KQU( 6671120586555079567), + KQU( 2194098052618985448), KQU(16849577895477330978), + KQU(12957148471017466283), KQU( 1997805535404859393), + KQU( 1180721060263860490), KQU(13206391310193756958), + KQU(12980208674461861797), KQU( 3825967775058875366), + KQU(17543433670782042631), KQU( 1518339070120322730), + KQU(16344584340890991669), KQU( 2611327165318529819), + KQU(11265022723283422529), KQU( 4001552800373196817), + KQU(14509595890079346161), KQU( 3528717165416234562), + KQU(18153222571501914072), KQU( 9387182977209744425), + KQU(10064342315985580021), KQU(11373678413215253977), + KQU( 2308457853228798099), KQU( 9729042942839545302), + KQU( 7833785471140127746), KQU( 6351049900319844436), + KQU(14454610627133496067), KQU(12533175683634819111), + KQU(15570163926716513029), KQU(13356980519185762498) +}; + +TEST_BEGIN(test_gen_rand_32) +{ + uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + int i; + uint32_t r32; + sfmt_t *ctx; + + assert_d_le(get_min_array_size32(), BLOCK_SIZE, + "Array size too small"); + ctx = init_gen_rand(1234); + fill_array32(ctx, array32, BLOCK_SIZE); + fill_array32(ctx, array32_2, BLOCK_SIZE); + fini_gen_rand(ctx); + + ctx = init_gen_rand(1234); + for (i = 0; i < BLOCK_SIZE; i++) { + if (i < COUNT_1) { + assert_u32_eq(array32[i], init_gen_rand_32_expected[i], + "Output mismatch for i=%d", i); + } + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32[i], + "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); + } + for (i = 0; i < COUNT_2; i++) { + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32_2[i], + "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], + r32); + } + fini_gen_rand(ctx); +} +TEST_END + +TEST_BEGIN(test_by_array_32) +{ + uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); + int i; + uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0}; + uint32_t r32; + sfmt_t *ctx; + + assert_d_le(get_min_array_size32(), BLOCK_SIZE, + "Array size too small"); + ctx = init_by_array(ini, 4); + fill_array32(ctx, array32, BLOCK_SIZE); + fill_array32(ctx, array32_2, BLOCK_SIZE); + fini_gen_rand(ctx); + + ctx = init_by_array(ini, 4); + for (i = 0; i < BLOCK_SIZE; i++) { + if (i < COUNT_1) { + assert_u32_eq(array32[i], init_by_array_32_expected[i], + "Output mismatch for i=%d", i); + } + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32[i], + "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32); + } + for (i = 0; i < COUNT_2; i++) { + r32 = gen_rand32(ctx); + assert_u32_eq(r32, array32_2[i], + "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i], + r32); + } + fini_gen_rand(ctx); +} +TEST_END + +TEST_BEGIN(test_gen_rand_64) +{ + uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + int i; + uint64_t r; + sfmt_t *ctx; + + assert_d_le(get_min_array_size64(), BLOCK_SIZE64, + "Array size too small"); + ctx = init_gen_rand(4321); + fill_array64(ctx, array64, BLOCK_SIZE64); + fill_array64(ctx, array64_2, BLOCK_SIZE64); + fini_gen_rand(ctx); + + ctx = init_gen_rand(4321); + for (i = 0; i < BLOCK_SIZE64; i++) { + if (i < COUNT_1) { + assert_u64_eq(array64[i], init_gen_rand_64_expected[i], + "Output mismatch for i=%d", i); + } + r = gen_rand64(ctx); + assert_u64_eq(r, array64[i], + "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i, + array64[i], r); + } + for (i = 0; i < COUNT_2; i++) { + r = gen_rand64(ctx); + assert_u64_eq(r, array64_2[i], + "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i, + array64_2[i], r); + } + fini_gen_rand(ctx); +} +TEST_END + +TEST_BEGIN(test_by_array_64) +{ + uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); + int i; + uint64_t r; + uint32_t ini[] = {5, 4, 3, 2, 1}; + sfmt_t *ctx; + + assert_d_le(get_min_array_size64(), BLOCK_SIZE64, + "Array size too small"); + ctx = init_by_array(ini, 5); + fill_array64(ctx, array64, BLOCK_SIZE64); + fill_array64(ctx, array64_2, BLOCK_SIZE64); + fini_gen_rand(ctx); + + ctx = init_by_array(ini, 5); + for (i = 0; i < BLOCK_SIZE64; i++) { + if (i < COUNT_1) { + assert_u64_eq(array64[i], init_by_array_64_expected[i], + "Output mismatch for i=%d", i); + } + r = gen_rand64(ctx); + assert_u64_eq(r, array64[i], + "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i, + array64[i], r); + } + for (i = 0; i < COUNT_2; i++) { + r = gen_rand64(ctx); + assert_u64_eq(r, array64_2[i], + "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i, + array64_2[i], r); + } + fini_gen_rand(ctx); +} +TEST_END + +int +main(void) +{ + + return (test( + test_gen_rand_32, + test_by_array_32, + test_gen_rand_64, + test_by_array_64)); +} diff --git a/redis.submodule/jemalloc/test/unit/atomic.c b/redis.submodule/jemalloc/test/unit/atomic.c new file mode 100644 index 0000000..bdd74f6 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/atomic.c @@ -0,0 +1,122 @@ +#include "test/jemalloc_test.h" + +#define TEST_STRUCT(p, t) \ +struct p##_test_s { \ + t accum0; \ + t x; \ + t s; \ +}; \ +typedef struct p##_test_s p##_test_t; + +#define TEST_BODY(p, t, tc, ta, FMT) do { \ + const p##_test_t tests[] = { \ + {(t)-1, (t)-1, (t)-2}, \ + {(t)-1, (t) 0, (t)-2}, \ + {(t)-1, (t) 1, (t)-2}, \ + \ + {(t) 0, (t)-1, (t)-2}, \ + {(t) 0, (t) 0, (t)-2}, \ + {(t) 0, (t) 1, (t)-2}, \ + \ + {(t) 1, (t)-1, (t)-2}, \ + {(t) 1, (t) 0, (t)-2}, \ + {(t) 1, (t) 1, (t)-2}, \ + \ + {(t)0, (t)-(1 << 22), (t)-2}, \ + {(t)0, (t)(1 << 22), (t)-2}, \ + {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \ + {(t)(1 << 22), (t)(1 << 22), (t)-2} \ + }; \ + unsigned i; \ + \ + for (i = 0; i < sizeof(tests)/sizeof(p##_test_t); i++) { \ + bool err; \ + t accum = tests[i].accum0; \ + assert_##ta##_eq(atomic_read_##p(&accum), \ + tests[i].accum0, \ + "Erroneous read, i=%u", i); \ + \ + assert_##ta##_eq(atomic_add_##p(&accum, tests[i].x), \ + (t)((tc)tests[i].accum0 + (tc)tests[i].x), \ + "i=%u, accum=%"FMT", x=%"FMT, \ + i, tests[i].accum0, tests[i].x); \ + assert_##ta##_eq(atomic_read_##p(&accum), accum, \ + "Erroneous add, i=%u", i); \ + \ + accum = tests[i].accum0; \ + assert_##ta##_eq(atomic_sub_##p(&accum, tests[i].x), \ + (t)((tc)tests[i].accum0 - (tc)tests[i].x), \ + "i=%u, accum=%"FMT", x=%"FMT, \ + i, tests[i].accum0, tests[i].x); \ + assert_##ta##_eq(atomic_read_##p(&accum), accum, \ + "Erroneous sub, i=%u", i); \ + \ + accum = tests[i].accum0; \ + err = atomic_cas_##p(&accum, tests[i].x, tests[i].s); \ + assert_b_eq(err, tests[i].accum0 != tests[i].x, \ + "Erroneous cas success/failure result"); \ + assert_##ta##_eq(accum, err ? tests[i].accum0 : \ + tests[i].s, "Erroneous cas effect, i=%u", i); \ + \ + accum = tests[i].accum0; \ + atomic_write_##p(&accum, tests[i].s); \ + assert_##ta##_eq(accum, tests[i].s, \ + "Erroneous write, i=%u", i); \ + } \ +} while (0) + +TEST_STRUCT(uint64, uint64_t) +TEST_BEGIN(test_atomic_uint64) +{ + +#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) + test_skip("64-bit atomic operations not supported"); +#else + TEST_BODY(uint64, uint64_t, uint64_t, u64, FMTx64); +#endif +} +TEST_END + +TEST_STRUCT(uint32, uint32_t) +TEST_BEGIN(test_atomic_uint32) +{ + + TEST_BODY(uint32, uint32_t, uint32_t, u32, "#"FMTx32); +} +TEST_END + +TEST_STRUCT(p, void *) +TEST_BEGIN(test_atomic_p) +{ + + TEST_BODY(p, void *, uintptr_t, ptr, "p"); +} +TEST_END + +TEST_STRUCT(z, size_t) +TEST_BEGIN(test_atomic_z) +{ + + TEST_BODY(z, size_t, size_t, zu, "#zx"); +} +TEST_END + +TEST_STRUCT(u, unsigned) +TEST_BEGIN(test_atomic_u) +{ + + TEST_BODY(u, unsigned, unsigned, u, "#x"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_atomic_uint64, + test_atomic_uint32, + test_atomic_p, + test_atomic_z, + test_atomic_u)); +} diff --git a/redis.submodule/jemalloc/test/unit/bitmap.c b/redis.submodule/jemalloc/test/unit/bitmap.c new file mode 100644 index 0000000..7da583d --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/bitmap.c @@ -0,0 +1,159 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_bitmap_size) +{ + size_t i, prev_size; + + prev_size = 0; + for (i = 1; i <= BITMAP_MAXBITS; i++) { + size_t size = bitmap_size(i); + assert_true(size >= prev_size, + "Bitmap size is smaller than expected"); + prev_size = size; + } +} +TEST_END + +TEST_BEGIN(test_bitmap_init) +{ + size_t i; + + for (i = 1; i <= BITMAP_MAXBITS; i++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, i); + { + size_t j; + bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) * + bitmap_info_ngroups(&binfo)); + bitmap_init(bitmap, &binfo); + + for (j = 0; j < i; j++) { + assert_false(bitmap_get(bitmap, &binfo, j), + "Bit should be unset"); + } + free(bitmap); + } + } +} +TEST_END + +TEST_BEGIN(test_bitmap_set) +{ + size_t i; + + for (i = 1; i <= BITMAP_MAXBITS; i++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, i); + { + size_t j; + bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) * + bitmap_info_ngroups(&binfo)); + bitmap_init(bitmap, &binfo); + + for (j = 0; j < i; j++) + bitmap_set(bitmap, &binfo, j); + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); + free(bitmap); + } + } +} +TEST_END + +TEST_BEGIN(test_bitmap_unset) +{ + size_t i; + + for (i = 1; i <= BITMAP_MAXBITS; i++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, i); + { + size_t j; + bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) * + bitmap_info_ngroups(&binfo)); + bitmap_init(bitmap, &binfo); + + for (j = 0; j < i; j++) + bitmap_set(bitmap, &binfo, j); + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); + for (j = 0; j < i; j++) + bitmap_unset(bitmap, &binfo, j); + for (j = 0; j < i; j++) + bitmap_set(bitmap, &binfo, j); + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); + free(bitmap); + } + } +} +TEST_END + +TEST_BEGIN(test_bitmap_sfu) +{ + size_t i; + + for (i = 1; i <= BITMAP_MAXBITS; i++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, i); + { + ssize_t j; + bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) * + bitmap_info_ngroups(&binfo)); + bitmap_init(bitmap, &binfo); + + /* Iteratively set bits starting at the beginning. */ + for (j = 0; j < i; j++) { + assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, + "First unset bit should be just after " + "previous first unset bit"); + } + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); + + /* + * Iteratively unset bits starting at the end, and + * verify that bitmap_sfu() reaches the unset bits. + */ + for (j = i - 1; j >= 0; j--) { + bitmap_unset(bitmap, &binfo, j); + assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, + "First unset bit should the bit previously " + "unset"); + bitmap_unset(bitmap, &binfo, j); + } + assert_false(bitmap_get(bitmap, &binfo, 0), + "Bit should be unset"); + + /* + * Iteratively set bits starting at the beginning, and + * verify that bitmap_sfu() looks past them. + */ + for (j = 1; j < i; j++) { + bitmap_set(bitmap, &binfo, j - 1); + assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, + "First unset bit should be just after the " + "bit previously set"); + bitmap_unset(bitmap, &binfo, j); + } + assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1, + "First unset bit should be the last bit"); + assert_true(bitmap_full(bitmap, &binfo), + "All bits should be set"); + free(bitmap); + } + } +} +TEST_END + +int +main(void) +{ + + return (test( + test_bitmap_size, + test_bitmap_init, + test_bitmap_set, + test_bitmap_unset, + test_bitmap_sfu)); +} diff --git a/redis.submodule/jemalloc/test/unit/ckh.c b/redis.submodule/jemalloc/test/unit/ckh.c new file mode 100644 index 0000000..b117595 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/ckh.c @@ -0,0 +1,214 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_new_delete) +{ + tsd_t *tsd; + ckh_t ckh; + + tsd = tsd_fetch(); + + assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), + "Unexpected ckh_new() error"); + ckh_delete(tsd, &ckh); + + assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash, + ckh_pointer_keycomp), "Unexpected ckh_new() error"); + ckh_delete(tsd, &ckh); +} +TEST_END + +TEST_BEGIN(test_count_insert_search_remove) +{ + tsd_t *tsd; + ckh_t ckh; + const char *strs[] = { + "a string", + "A string", + "a string.", + "A string." + }; + const char *missing = "A string not in the hash table."; + size_t i; + + tsd = tsd_fetch(); + + assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), + "Unexpected ckh_new() error"); + assert_zu_eq(ckh_count(&ckh), 0, + "ckh_count() should return %zu, but it returned %zu", ZU(0), + ckh_count(&ckh)); + + /* Insert. */ + for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + ckh_insert(tsd, &ckh, strs[i], strs[i]); + assert_zu_eq(ckh_count(&ckh), i+1, + "ckh_count() should return %zu, but it returned %zu", i+1, + ckh_count(&ckh)); + } + + /* Search. */ + for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + union { + void *p; + const char *s; + } k, v; + void **kp, **vp; + const char *ks, *vs; + + kp = (i & 1) ? &k.p : NULL; + vp = (i & 2) ? &v.p : NULL; + k.p = NULL; + v.p = NULL; + assert_false(ckh_search(&ckh, strs[i], kp, vp), + "Unexpected ckh_search() error"); + + ks = (i & 1) ? strs[i] : (const char *)NULL; + vs = (i & 2) ? strs[i] : (const char *)NULL; + assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", + i); + assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", + i); + } + assert_true(ckh_search(&ckh, missing, NULL, NULL), + "Unexpected ckh_search() success"); + + /* Remove. */ + for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { + union { + void *p; + const char *s; + } k, v; + void **kp, **vp; + const char *ks, *vs; + + kp = (i & 1) ? &k.p : NULL; + vp = (i & 2) ? &v.p : NULL; + k.p = NULL; + v.p = NULL; + assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp), + "Unexpected ckh_remove() error"); + + ks = (i & 1) ? strs[i] : (const char *)NULL; + vs = (i & 2) ? strs[i] : (const char *)NULL; + assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", + i); + assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", + i); + assert_zu_eq(ckh_count(&ckh), + sizeof(strs)/sizeof(const char *) - i - 1, + "ckh_count() should return %zu, but it returned %zu", + sizeof(strs)/sizeof(const char *) - i - 1, + ckh_count(&ckh)); + } + + ckh_delete(tsd, &ckh); +} +TEST_END + +TEST_BEGIN(test_insert_iter_remove) +{ +#define NITEMS ZU(1000) + tsd_t *tsd; + ckh_t ckh; + void **p[NITEMS]; + void *q, *r; + size_t i; + + tsd = tsd_fetch(); + + assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash, + ckh_pointer_keycomp), "Unexpected ckh_new() error"); + + for (i = 0; i < NITEMS; i++) { + p[i] = mallocx(i+1, 0); + assert_ptr_not_null(p[i], "Unexpected mallocx() failure"); + } + + for (i = 0; i < NITEMS; i++) { + size_t j; + + for (j = i; j < NITEMS; j++) { + assert_false(ckh_insert(tsd, &ckh, p[j], p[j]), + "Unexpected ckh_insert() failure"); + assert_false(ckh_search(&ckh, p[j], &q, &r), + "Unexpected ckh_search() failure"); + assert_ptr_eq(p[j], q, "Key pointer mismatch"); + assert_ptr_eq(p[j], r, "Value pointer mismatch"); + } + + assert_zu_eq(ckh_count(&ckh), NITEMS, + "ckh_count() should return %zu, but it returned %zu", + NITEMS, ckh_count(&ckh)); + + for (j = i + 1; j < NITEMS; j++) { + assert_false(ckh_search(&ckh, p[j], NULL, NULL), + "Unexpected ckh_search() failure"); + assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r), + "Unexpected ckh_remove() failure"); + assert_ptr_eq(p[j], q, "Key pointer mismatch"); + assert_ptr_eq(p[j], r, "Value pointer mismatch"); + assert_true(ckh_search(&ckh, p[j], NULL, NULL), + "Unexpected ckh_search() success"); + assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r), + "Unexpected ckh_remove() success"); + } + + { + bool seen[NITEMS]; + size_t tabind; + + memset(seen, 0, sizeof(seen)); + + for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) { + size_t k; + + assert_ptr_eq(q, r, "Key and val not equal"); + + for (k = 0; k < NITEMS; k++) { + if (p[k] == q) { + assert_false(seen[k], + "Item %zu already seen", k); + seen[k] = true; + break; + } + } + } + + for (j = 0; j < i + 1; j++) + assert_true(seen[j], "Item %zu not seen", j); + for (; j < NITEMS; j++) + assert_false(seen[j], "Item %zu seen", j); + } + } + + for (i = 0; i < NITEMS; i++) { + assert_false(ckh_search(&ckh, p[i], NULL, NULL), + "Unexpected ckh_search() failure"); + assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r), + "Unexpected ckh_remove() failure"); + assert_ptr_eq(p[i], q, "Key pointer mismatch"); + assert_ptr_eq(p[i], r, "Value pointer mismatch"); + assert_true(ckh_search(&ckh, p[i], NULL, NULL), + "Unexpected ckh_search() success"); + assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r), + "Unexpected ckh_remove() success"); + dallocx(p[i], 0); + } + + assert_zu_eq(ckh_count(&ckh), 0, + "ckh_count() should return %zu, but it returned %zu", + ZU(0), ckh_count(&ckh)); + ckh_delete(tsd, &ckh); +#undef NITEMS +} +TEST_END + +int +main(void) +{ + + return (test( + test_new_delete, + test_count_insert_search_remove, + test_insert_iter_remove)); +} diff --git a/redis.submodule/jemalloc/test/unit/hash.c b/redis.submodule/jemalloc/test/unit/hash.c new file mode 100644 index 0000000..77a8ced --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/hash.c @@ -0,0 +1,171 @@ +/* + * This file is based on code that is part of SMHasher + * (https://code.google.com/p/smhasher/), and is subject to the MIT license + * (http://www.opensource.org/licenses/mit-license.php). Both email addresses + * associated with the source code's revision history belong to Austin Appleby, + * and the revision history ranges from 2010 to 2012. Therefore the copyright + * and license are here taken to be: + * + * Copyright (c) 2010-2012 Austin Appleby + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "test/jemalloc_test.h" + +typedef enum { + hash_variant_x86_32, + hash_variant_x86_128, + hash_variant_x64_128 +} hash_variant_t; + +static size_t +hash_variant_bits(hash_variant_t variant) +{ + + switch (variant) { + case hash_variant_x86_32: return (32); + case hash_variant_x86_128: return (128); + case hash_variant_x64_128: return (128); + default: not_reached(); + } +} + +static const char * +hash_variant_string(hash_variant_t variant) +{ + + switch (variant) { + case hash_variant_x86_32: return ("hash_x86_32"); + case hash_variant_x86_128: return ("hash_x86_128"); + case hash_variant_x64_128: return ("hash_x64_128"); + default: not_reached(); + } +} + +static void +hash_variant_verify(hash_variant_t variant) +{ + const size_t hashbytes = hash_variant_bits(variant) / 8; + uint8_t key[256]; + VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256); + VARIABLE_ARRAY(uint8_t, final, hashbytes); + unsigned i; + uint32_t computed, expected; + + memset(key, 0, sizeof(key)); + memset(hashes, 0, sizeof(hashes)); + memset(final, 0, sizeof(final)); + + /* + * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the + * seed. + */ + for (i = 0; i < 256; i++) { + key[i] = (uint8_t)i; + switch (variant) { + case hash_variant_x86_32: { + uint32_t out; + out = hash_x86_32(key, i, 256-i); + memcpy(&hashes[i*hashbytes], &out, hashbytes); + break; + } case hash_variant_x86_128: { + uint64_t out[2]; + hash_x86_128(key, i, 256-i, out); + memcpy(&hashes[i*hashbytes], out, hashbytes); + break; + } case hash_variant_x64_128: { + uint64_t out[2]; + hash_x64_128(key, i, 256-i, out); + memcpy(&hashes[i*hashbytes], out, hashbytes); + break; + } default: not_reached(); + } + } + + /* Hash the result array. */ + switch (variant) { + case hash_variant_x86_32: { + uint32_t out = hash_x86_32(hashes, hashbytes*256, 0); + memcpy(final, &out, sizeof(out)); + break; + } case hash_variant_x86_128: { + uint64_t out[2]; + hash_x86_128(hashes, hashbytes*256, 0, out); + memcpy(final, out, sizeof(out)); + break; + } case hash_variant_x64_128: { + uint64_t out[2]; + hash_x64_128(hashes, hashbytes*256, 0, out); + memcpy(final, out, sizeof(out)); + break; + } default: not_reached(); + } + + computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) | + (final[3] << 24); + + switch (variant) { +#ifdef JEMALLOC_BIG_ENDIAN + case hash_variant_x86_32: expected = 0x6213303eU; break; + case hash_variant_x86_128: expected = 0x266820caU; break; + case hash_variant_x64_128: expected = 0xcc622b6fU; break; +#else + case hash_variant_x86_32: expected = 0xb0f57ee3U; break; + case hash_variant_x86_128: expected = 0xb3ece62aU; break; + case hash_variant_x64_128: expected = 0x6384ba69U; break; +#endif + default: not_reached(); + } + + assert_u32_eq(computed, expected, + "Hash mismatch for %s(): expected %#x but got %#x", + hash_variant_string(variant), expected, computed); +} + +TEST_BEGIN(test_hash_x86_32) +{ + + hash_variant_verify(hash_variant_x86_32); +} +TEST_END + +TEST_BEGIN(test_hash_x86_128) +{ + + hash_variant_verify(hash_variant_x86_128); +} +TEST_END + +TEST_BEGIN(test_hash_x64_128) +{ + + hash_variant_verify(hash_variant_x64_128); +} +TEST_END + +int +main(void) +{ + + return (test( + test_hash_x86_32, + test_hash_x86_128, + test_hash_x64_128)); +} diff --git a/redis.submodule/jemalloc/test/unit/junk.c b/redis.submodule/jemalloc/test/unit/junk.c new file mode 100644 index 0000000..b23dd1e --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/junk.c @@ -0,0 +1,254 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_FILL +# ifndef JEMALLOC_TEST_JUNK_OPT +# define JEMALLOC_TEST_JUNK_OPT "junk:true" +# endif +const char *malloc_conf = + "abort:false,zero:false,redzone:true,quarantine:0," JEMALLOC_TEST_JUNK_OPT; +#endif + +static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; +static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig; +static huge_dalloc_junk_t *huge_dalloc_junk_orig; +static void *watch_for_junking; +static bool saw_junking; + +static void +watch_junking(void *p) +{ + + watch_for_junking = p; + saw_junking = false; +} + +static void +arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info) +{ + size_t i; + + arena_dalloc_junk_small_orig(ptr, bin_info); + for (i = 0; i < bin_info->reg_size; i++) { + assert_c_eq(((char *)ptr)[i], 0x5a, + "Missing junk fill for byte %zu/%zu of deallocated region", + i, bin_info->reg_size); + } + if (ptr == watch_for_junking) + saw_junking = true; +} + +static void +arena_dalloc_junk_large_intercept(void *ptr, size_t usize) +{ + size_t i; + + arena_dalloc_junk_large_orig(ptr, usize); + for (i = 0; i < usize; i++) { + assert_c_eq(((char *)ptr)[i], 0x5a, + "Missing junk fill for byte %zu/%zu of deallocated region", + i, usize); + } + if (ptr == watch_for_junking) + saw_junking = true; +} + +static void +huge_dalloc_junk_intercept(void *ptr, size_t usize) +{ + + huge_dalloc_junk_orig(ptr, usize); + /* + * The conditions under which junk filling actually occurs are nuanced + * enough that it doesn't make sense to duplicate the decision logic in + * test code, so don't actually check that the region is junk-filled. + */ + if (ptr == watch_for_junking) + saw_junking = true; +} + +static void +test_junk(size_t sz_min, size_t sz_max) +{ + char *s; + size_t sz_prev, sz, i; + + if (opt_junk_free) { + arena_dalloc_junk_small_orig = arena_dalloc_junk_small; + arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; + arena_dalloc_junk_large_orig = arena_dalloc_junk_large; + arena_dalloc_junk_large = arena_dalloc_junk_large_intercept; + huge_dalloc_junk_orig = huge_dalloc_junk; + huge_dalloc_junk = huge_dalloc_junk_intercept; + } + + sz_prev = 0; + s = (char *)mallocx(sz_min, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + + for (sz = sallocx(s, 0); sz <= sz_max; + sz_prev = sz, sz = sallocx(s, 0)) { + if (sz_prev > 0) { + assert_c_eq(s[0], 'a', + "Previously allocated byte %zu/%zu is corrupted", + ZU(0), sz_prev); + assert_c_eq(s[sz_prev-1], 'a', + "Previously allocated byte %zu/%zu is corrupted", + sz_prev-1, sz_prev); + } + + for (i = sz_prev; i < sz; i++) { + if (opt_junk_alloc) { + assert_c_eq(s[i], 0xa5, + "Newly allocated byte %zu/%zu isn't " + "junk-filled", i, sz); + } + s[i] = 'a'; + } + + if (xallocx(s, sz+1, 0, 0) == sz) { + watch_junking(s); + s = (char *)rallocx(s, sz+1, 0); + assert_ptr_not_null((void *)s, + "Unexpected rallocx() failure"); + assert_true(!opt_junk_free || saw_junking, + "Expected region of size %zu to be junk-filled", + sz); + } + } + + watch_junking(s); + dallocx(s, 0); + assert_true(!opt_junk_free || saw_junking, + "Expected region of size %zu to be junk-filled", sz); + + if (opt_junk_free) { + arena_dalloc_junk_small = arena_dalloc_junk_small_orig; + arena_dalloc_junk_large = arena_dalloc_junk_large_orig; + huge_dalloc_junk = huge_dalloc_junk_orig; + } +} + +TEST_BEGIN(test_junk_small) +{ + + test_skip_if(!config_fill); + test_junk(1, SMALL_MAXCLASS-1); +} +TEST_END + +TEST_BEGIN(test_junk_large) +{ + + test_skip_if(!config_fill); + test_junk(SMALL_MAXCLASS+1, large_maxclass); +} +TEST_END + +TEST_BEGIN(test_junk_huge) +{ + + test_skip_if(!config_fill); + test_junk(large_maxclass+1, chunksize*2); +} +TEST_END + +arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig; +static void *most_recently_trimmed; + +static size_t +shrink_size(size_t size) +{ + size_t shrink_size; + + for (shrink_size = size - 1; nallocx(shrink_size, 0) == size; + shrink_size--) + ; /* Do nothing. */ + + return (shrink_size); +} + +static void +arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize) +{ + + arena_ralloc_junk_large_orig(ptr, old_usize, usize); + assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize"); + assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize"); + most_recently_trimmed = ptr; +} + +TEST_BEGIN(test_junk_large_ralloc_shrink) +{ + void *p1, *p2; + + p1 = mallocx(large_maxclass, 0); + assert_ptr_not_null(p1, "Unexpected mallocx() failure"); + + arena_ralloc_junk_large_orig = arena_ralloc_junk_large; + arena_ralloc_junk_large = arena_ralloc_junk_large_intercept; + + p2 = rallocx(p1, shrink_size(large_maxclass), 0); + assert_ptr_eq(p1, p2, "Unexpected move during shrink"); + + arena_ralloc_junk_large = arena_ralloc_junk_large_orig; + + assert_ptr_eq(most_recently_trimmed, p1, + "Expected trimmed portion of region to be junk-filled"); +} +TEST_END + +static bool detected_redzone_corruption; + +static void +arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, + size_t offset, uint8_t byte) +{ + + detected_redzone_corruption = true; +} + +TEST_BEGIN(test_junk_redzone) +{ + char *s; + arena_redzone_corruption_t *arena_redzone_corruption_orig; + + test_skip_if(!config_fill); + test_skip_if(!opt_junk_alloc || !opt_junk_free); + + arena_redzone_corruption_orig = arena_redzone_corruption; + arena_redzone_corruption = arena_redzone_corruption_replacement; + + /* Test underflow. */ + detected_redzone_corruption = false; + s = (char *)mallocx(1, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + s[-1] = 0xbb; + dallocx(s, 0); + assert_true(detected_redzone_corruption, + "Did not detect redzone corruption"); + + /* Test overflow. */ + detected_redzone_corruption = false; + s = (char *)mallocx(1, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + s[sallocx(s, 0)] = 0xbb; + dallocx(s, 0); + assert_true(detected_redzone_corruption, + "Did not detect redzone corruption"); + + arena_redzone_corruption = arena_redzone_corruption_orig; +} +TEST_END + +int +main(void) +{ + + assert(!config_fill || opt_junk_alloc || opt_junk_free); + return (test( + test_junk_small, + test_junk_large, + test_junk_huge, + test_junk_large_ralloc_shrink, + test_junk_redzone)); +} diff --git a/redis.submodule/jemalloc/test/unit/junk_alloc.c b/redis.submodule/jemalloc/test/unit/junk_alloc.c new file mode 100644 index 0000000..8db3331 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/junk_alloc.c @@ -0,0 +1,3 @@ +#define JEMALLOC_TEST_JUNK_OPT "junk:alloc" +#include "junk.c" +#undef JEMALLOC_TEST_JUNK_OPT diff --git a/redis.submodule/jemalloc/test/unit/junk_free.c b/redis.submodule/jemalloc/test/unit/junk_free.c new file mode 100644 index 0000000..482a61d --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/junk_free.c @@ -0,0 +1,3 @@ +#define JEMALLOC_TEST_JUNK_OPT "junk:free" +#include "junk.c" +#undef JEMALLOC_TEST_JUNK_OPT diff --git a/redis.submodule/jemalloc/test/unit/lg_chunk.c b/redis.submodule/jemalloc/test/unit/lg_chunk.c new file mode 100644 index 0000000..7e5df38 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/lg_chunk.c @@ -0,0 +1,26 @@ +#include "test/jemalloc_test.h" + +/* + * Make sure that opt.lg_chunk clamping is sufficient. In practice, this test + * program will fail a debug assertion during initialization and abort (rather + * than the test soft-failing) if clamping is insufficient. + */ +const char *malloc_conf = "lg_chunk:0"; + +TEST_BEGIN(test_lg_chunk_clamp) +{ + void *p; + + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + dallocx(p, 0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_lg_chunk_clamp)); +} diff --git a/redis.submodule/jemalloc/test/unit/mallctl.c b/redis.submodule/jemalloc/test/unit/mallctl.c new file mode 100644 index 0000000..31e354c --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/mallctl.c @@ -0,0 +1,633 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_mallctl_errors) +{ + uint64_t epoch; + size_t sz; + + assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT, + "mallctl() should return ENOENT for non-existent names"); + + assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")), + EPERM, "mallctl() should return EPERM on attempt to write " + "read-only value"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1), + EINVAL, "mallctl() should return EINVAL for input size mismatch"); + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1), + EINVAL, "mallctl() should return EINVAL for input size mismatch"); + + sz = sizeof(epoch)-1; + assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, + "mallctl() should return EINVAL for output size mismatch"); + sz = sizeof(epoch)+1; + assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, + "mallctl() should return EINVAL for output size mismatch"); +} +TEST_END + +TEST_BEGIN(test_mallctlnametomib_errors) +{ + size_t mib[1]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT, + "mallctlnametomib() should return ENOENT for non-existent names"); +} +TEST_END + +TEST_BEGIN(test_mallctlbymib_errors) +{ + uint64_t epoch; + size_t sz; + size_t mib[1]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("version", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", + strlen("0.0.0")), EPERM, "mallctl() should return EPERM on " + "attempt to write read-only value"); + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, + sizeof(epoch)-1), EINVAL, + "mallctlbymib() should return EINVAL for input size mismatch"); + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, + sizeof(epoch)+1), EINVAL, + "mallctlbymib() should return EINVAL for input size mismatch"); + + sz = sizeof(epoch)-1; + assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, + "mallctlbymib() should return EINVAL for output size mismatch"); + sz = sizeof(epoch)+1; + assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, + "mallctlbymib() should return EINVAL for output size mismatch"); +} +TEST_END + +TEST_BEGIN(test_mallctl_read_write) +{ + uint64_t old_epoch, new_epoch; + size_t sz = sizeof(old_epoch); + + /* Blind. */ + assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Read. */ + assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Write. */ + assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)), + 0, "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Read+write. */ + assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch, + sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); +} +TEST_END + +TEST_BEGIN(test_mallctlnametomib_short_mib) +{ + size_t mib[4]; + size_t miblen; + + miblen = 3; + mib[3] = 42; + assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + assert_zu_eq(miblen, 3, "Unexpected mib output length"); + assert_zu_eq(mib[3], 42, + "mallctlnametomib() wrote past the end of the input mib"); +} +TEST_END + +TEST_BEGIN(test_mallctl_config) +{ + +#define TEST_MALLCTL_CONFIG(config) do { \ + bool oldval; \ + size_t sz = sizeof(oldval); \ + assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \ + 0, "Unexpected mallctl() failure"); \ + assert_b_eq(oldval, config_##config, "Incorrect config value"); \ + assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ +} while (0) + + TEST_MALLCTL_CONFIG(cache_oblivious); + TEST_MALLCTL_CONFIG(debug); + TEST_MALLCTL_CONFIG(fill); + TEST_MALLCTL_CONFIG(lazy_lock); + TEST_MALLCTL_CONFIG(munmap); + TEST_MALLCTL_CONFIG(prof); + TEST_MALLCTL_CONFIG(prof_libgcc); + TEST_MALLCTL_CONFIG(prof_libunwind); + TEST_MALLCTL_CONFIG(stats); + TEST_MALLCTL_CONFIG(tcache); + TEST_MALLCTL_CONFIG(tls); + TEST_MALLCTL_CONFIG(utrace); + TEST_MALLCTL_CONFIG(valgrind); + TEST_MALLCTL_CONFIG(xmalloc); + +#undef TEST_MALLCTL_CONFIG +} +TEST_END + +TEST_BEGIN(test_mallctl_opt) +{ + bool config_always = true; + +#define TEST_MALLCTL_OPT(t, opt, config) do { \ + t oldval; \ + size_t sz = sizeof(oldval); \ + int expected = config_##config ? 0 : ENOENT; \ + int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \ + assert_d_eq(result, expected, \ + "Unexpected mallctl() result for opt."#opt); \ + assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ +} while (0) + + TEST_MALLCTL_OPT(bool, abort, always); + TEST_MALLCTL_OPT(size_t, lg_chunk, always); + TEST_MALLCTL_OPT(const char *, dss, always); + TEST_MALLCTL_OPT(size_t, narenas, always); + TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always); + TEST_MALLCTL_OPT(bool, stats_print, always); + TEST_MALLCTL_OPT(const char *, junk, fill); + TEST_MALLCTL_OPT(size_t, quarantine, fill); + TEST_MALLCTL_OPT(bool, redzone, fill); + TEST_MALLCTL_OPT(bool, zero, fill); + TEST_MALLCTL_OPT(bool, utrace, utrace); + TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); + TEST_MALLCTL_OPT(bool, tcache, tcache); + TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache); + TEST_MALLCTL_OPT(bool, prof, prof); + TEST_MALLCTL_OPT(const char *, prof_prefix, prof); + TEST_MALLCTL_OPT(bool, prof_active, prof); + TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof); + TEST_MALLCTL_OPT(bool, prof_accum, prof); + TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof); + TEST_MALLCTL_OPT(bool, prof_gdump, prof); + TEST_MALLCTL_OPT(bool, prof_final, prof); + TEST_MALLCTL_OPT(bool, prof_leak, prof); + +#undef TEST_MALLCTL_OPT +} +TEST_END + +TEST_BEGIN(test_manpage_example) +{ + unsigned nbins, i; + size_t mib[4]; + size_t len, miblen; + + len = sizeof(nbins); + assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0, + "Unexpected mallctl() failure"); + + miblen = 4; + assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + for (i = 0; i < nbins; i++) { + size_t bin_size; + + mib[2] = i; + len = sizeof(bin_size); + assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0), + 0, "Unexpected mallctlbymib() failure"); + /* Do something with bin_size... */ + } +} +TEST_END + +TEST_BEGIN(test_tcache_none) +{ + void *p0, *q, *p1; + + test_skip_if(!config_tcache); + + /* Allocate p and q. */ + p0 = mallocx(42, 0); + assert_ptr_not_null(p0, "Unexpected mallocx() failure"); + q = mallocx(42, 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + + /* Deallocate p and q, but bypass the tcache for q. */ + dallocx(p0, 0); + dallocx(q, MALLOCX_TCACHE_NONE); + + /* Make sure that tcache-based allocation returns p, not q. */ + p1 = mallocx(42, 0); + assert_ptr_not_null(p1, "Unexpected mallocx() failure"); + assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region"); + + /* Clean up. */ + dallocx(p1, MALLOCX_TCACHE_NONE); +} +TEST_END + +TEST_BEGIN(test_tcache) +{ +#define NTCACHES 10 + unsigned tis[NTCACHES]; + void *ps[NTCACHES]; + void *qs[NTCACHES]; + unsigned i; + size_t sz, psz, qsz; + + test_skip_if(!config_tcache); + + psz = 42; + qsz = nallocx(psz, 0) + 1; + + /* Create tcaches. */ + for (i = 0; i < NTCACHES; i++) { + sz = sizeof(unsigned); + assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0, + "Unexpected mallctl() failure, i=%u", i); + } + + /* Exercise tcache ID recycling. */ + for (i = 0; i < NTCACHES; i++) { + assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i], + sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", + i); + } + for (i = 0; i < NTCACHES; i++) { + sz = sizeof(unsigned); + assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0, + "Unexpected mallctl() failure, i=%u", i); + } + + /* Flush empty tcaches. */ + for (i = 0; i < NTCACHES; i++) { + assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i], + sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", + i); + } + + /* Cache some allocations. */ + for (i = 0; i < NTCACHES; i++) { + ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", + i); + dallocx(ps[i], MALLOCX_TCACHE(tis[i])); + + qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u", + i); + dallocx(qs[i], MALLOCX_TCACHE(tis[i])); + } + + /* Verify that tcaches allocate cached regions. */ + for (i = 0; i < NTCACHES; i++) { + void *p0 = ps[i]; + ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", + i); + assert_ptr_eq(ps[i], p0, + "Expected mallocx() to allocate cached region, i=%u", i); + } + + /* Verify that reallocation uses cached regions. */ + for (i = 0; i < NTCACHES; i++) { + void *q0 = qs[i]; + qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u", + i); + assert_ptr_eq(qs[i], q0, + "Expected rallocx() to allocate cached region, i=%u", i); + /* Avoid undefined behavior in case of test failure. */ + if (qs[i] == NULL) + qs[i] = ps[i]; + } + for (i = 0; i < NTCACHES; i++) + dallocx(qs[i], MALLOCX_TCACHE(tis[i])); + + /* Flush some non-empty tcaches. */ + for (i = 0; i < NTCACHES/2; i++) { + assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i], + sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", + i); + } + + /* Destroy tcaches. */ + for (i = 0; i < NTCACHES; i++) { + assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i], + sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", + i); + } +} +TEST_END + +TEST_BEGIN(test_thread_arena) +{ + unsigned arena_old, arena_new, narenas; + size_t sz = sizeof(unsigned); + + assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); + arena_new = narenas - 1; + assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, + sizeof(unsigned)), 0, "Unexpected mallctl() failure"); + arena_new = 0; + assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, + sizeof(unsigned)), 0, "Unexpected mallctl() failure"); +} +TEST_END + +TEST_BEGIN(test_arena_i_lg_dirty_mult) +{ + ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; + size_t sz = sizeof(ssize_t); + + assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + + lg_dirty_mult = -2; + assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, + &lg_dirty_mult, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + lg_dirty_mult = (sizeof(size_t) << 3); + assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, + &lg_dirty_mult, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; + lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult + = lg_dirty_mult, lg_dirty_mult++) { + ssize_t old_lg_dirty_mult; + + assert_d_eq(mallctl("arena.0.lg_dirty_mult", &old_lg_dirty_mult, + &sz, &lg_dirty_mult, sizeof(ssize_t)), 0, + "Unexpected mallctl() failure"); + assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, + "Unexpected old arena.0.lg_dirty_mult"); + } +} +TEST_END + +TEST_BEGIN(test_arena_i_purge) +{ + unsigned narenas; + size_t sz = sizeof(unsigned); + size_t mib[3]; + size_t miblen = 3; + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = narenas; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} +TEST_END + +TEST_BEGIN(test_arena_i_dss) +{ + const char *dss_prec_old, *dss_prec_new; + size_t sz = sizeof(dss_prec_old); + size_t mib[3]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, + "Unexpected mallctlnametomib() error"); + + dss_prec_new = "disabled"; + assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new, + sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected default for dss precedence"); + + assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old, + sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected value for dss precedence"); + + mib[1] = narenas_total_get(); + dss_prec_new = "disabled"; + assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new, + sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected default for dss precedence"); + + assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old, + sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected value for dss precedence"); +} +TEST_END + +TEST_BEGIN(test_arenas_initialized) +{ + unsigned narenas; + size_t sz = sizeof(narenas); + + assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + { + VARIABLE_ARRAY(bool, initialized, narenas); + + sz = narenas * sizeof(bool); + assert_d_eq(mallctl("arenas.initialized", initialized, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + } +} +TEST_END + +TEST_BEGIN(test_arenas_lg_dirty_mult) +{ + ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; + size_t sz = sizeof(ssize_t); + + assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + + lg_dirty_mult = -2; + assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, + &lg_dirty_mult, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + lg_dirty_mult = (sizeof(size_t) << 3); + assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, + &lg_dirty_mult, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; + lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult = + lg_dirty_mult, lg_dirty_mult++) { + ssize_t old_lg_dirty_mult; + + assert_d_eq(mallctl("arenas.lg_dirty_mult", &old_lg_dirty_mult, + &sz, &lg_dirty_mult, sizeof(ssize_t)), 0, + "Unexpected mallctl() failure"); + assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, + "Unexpected old arenas.lg_dirty_mult"); + } +} +TEST_END + +TEST_BEGIN(test_arenas_constants) +{ + +#define TEST_ARENAS_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas."#name, &name, &sz, NULL, 0), 0, \ + "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); + TEST_ARENAS_CONSTANT(size_t, page, PAGE); + TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS); + TEST_ARENAS_CONSTANT(unsigned, nlruns, nlclasses); + TEST_ARENAS_CONSTANT(unsigned, nhchunks, nhclasses); + +#undef TEST_ARENAS_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_bin_constants) +{ + +#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas.bin.0."#name, &name, &sz, NULL, 0), \ + 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size); + TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs); + TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size); + +#undef TEST_ARENAS_BIN_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_lrun_constants) +{ + +#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \ + 0), 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_LRUN_CONSTANT(size_t, size, LARGE_MINCLASS); + +#undef TEST_ARENAS_LRUN_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_hchunk_constants) +{ + +#define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL, \ + 0), 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, chunksize); + +#undef TEST_ARENAS_HCHUNK_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_extend) +{ + unsigned narenas_before, arena, narenas_after; + size_t sz = sizeof(unsigned); + + assert_d_eq(mallctl("arenas.narenas", &narenas_before, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.extend", &arena, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.narenas", &narenas_after, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_u_eq(narenas_before+1, narenas_after, + "Unexpected number of arenas before versus after extension"); + assert_u_eq(arena, narenas_after-1, "Unexpected arena index"); +} +TEST_END + +TEST_BEGIN(test_stats_arenas) +{ + +#define TEST_STATS_ARENAS(t, name) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("stats.arenas.0."#name, &name, &sz, NULL, \ + 0), 0, "Unexpected mallctl() failure"); \ +} while (0) + + TEST_STATS_ARENAS(const char *, dss); + TEST_STATS_ARENAS(unsigned, nthreads); + TEST_STATS_ARENAS(size_t, pactive); + TEST_STATS_ARENAS(size_t, pdirty); + +#undef TEST_STATS_ARENAS +} +TEST_END + +int +main(void) +{ + + return (test( + test_mallctl_errors, + test_mallctlnametomib_errors, + test_mallctlbymib_errors, + test_mallctl_read_write, + test_mallctlnametomib_short_mib, + test_mallctl_config, + test_mallctl_opt, + test_manpage_example, + test_tcache_none, + test_tcache, + test_thread_arena, + test_arena_i_lg_dirty_mult, + test_arena_i_purge, + test_arena_i_dss, + test_arenas_initialized, + test_arenas_lg_dirty_mult, + test_arenas_constants, + test_arenas_bin_constants, + test_arenas_lrun_constants, + test_arenas_hchunk_constants, + test_arenas_extend, + test_stats_arenas)); +} diff --git a/redis.submodule/jemalloc/test/unit/math.c b/redis.submodule/jemalloc/test/unit/math.c new file mode 100644 index 0000000..ebec77a --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/math.c @@ -0,0 +1,394 @@ +#include "test/jemalloc_test.h" + +#define MAX_REL_ERR 1.0e-9 +#define MAX_ABS_ERR 1.0e-9 + +#include + +#ifndef INFINITY +#define INFINITY (DBL_MAX + DBL_MAX) +#endif + +static bool +double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) +{ + double rel_err; + + if (fabs(a - b) < max_abs_err) + return (true); + rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a); + return (rel_err < max_rel_err); +} + +static uint64_t +factorial(unsigned x) +{ + uint64_t ret = 1; + unsigned i; + + for (i = 2; i <= x; i++) + ret *= (uint64_t)i; + + return (ret); +} + +TEST_BEGIN(test_ln_gamma_factorial) +{ + unsigned x; + + /* exp(ln_gamma(x)) == (x-1)! for integer x. */ + for (x = 1; x <= 21; x++) { + assert_true(double_eq_rel(exp(ln_gamma(x)), + (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect factorial result for x=%u", x); + } +} +TEST_END + +/* Expected ln_gamma([0.0..100.0] increment=0.25). */ +static const double ln_gamma_misc_expected[] = { + INFINITY, + 1.28802252469807743, 0.57236494292470008, 0.20328095143129538, + 0.00000000000000000, -0.09827183642181320, -0.12078223763524518, + -0.08440112102048555, 0.00000000000000000, 0.12487171489239651, + 0.28468287047291918, 0.47521466691493719, 0.69314718055994529, + 0.93580193110872523, 1.20097360234707429, 1.48681557859341718, + 1.79175946922805496, 2.11445692745037128, 2.45373657084244234, + 2.80857141857573644, 3.17805383034794575, 3.56137591038669710, + 3.95781396761871651, 4.36671603662228680, 4.78749174278204581, + 5.21960398699022932, 5.66256205985714178, 6.11591589143154568, + 6.57925121201010121, 7.05218545073853953, 7.53436423675873268, + 8.02545839631598312, 8.52516136106541467, 9.03318691960512332, + 9.54926725730099690, 10.07315123968123949, 10.60460290274525086, + 11.14340011995171231, 11.68933342079726856, 12.24220494005076176, + 12.80182748008146909, 13.36802367147604720, 13.94062521940376342, + 14.51947222506051816, 15.10441257307551943, 15.69530137706046524, + 16.29200047656724237, 16.89437797963419285, 17.50230784587389010, + 18.11566950571089407, 18.73434751193644843, 19.35823122022435427, + 19.98721449566188468, 20.62119544270163018, 21.26007615624470048, + 21.90376249182879320, 22.55216385312342098, 23.20519299513386002, + 23.86276584168908954, 24.52480131594137802, 25.19122118273868338, + 25.86194990184851861, 26.53691449111561340, 27.21604439872720604, + 27.89927138384089389, 28.58652940490193828, 29.27775451504081516, + 29.97288476399884871, 30.67186010608067548, 31.37462231367769050, + 32.08111489594735843, 32.79128302226991565, 33.50507345013689076, + 34.22243445715505317, 34.94331577687681545, 35.66766853819134298, + 36.39544520803305261, 37.12659953718355865, 37.86108650896109395, + 38.59886229060776230, 39.33988418719949465, 40.08411059791735198, + 40.83150097453079752, 41.58201578195490100, 42.33561646075348506, + 43.09226539146988699, 43.85192586067515208, 44.61456202863158893, + 45.38013889847690052, 46.14862228684032885, 46.91997879580877395, + 47.69417578616628361, 48.47118135183522014, 49.25096429545256882, + 50.03349410501914463, 50.81874093156324790, 51.60667556776436982, + 52.39726942748592364, 53.19049452616926743, 53.98632346204390586, + 54.78472939811231157, 55.58568604486942633, 56.38916764371992940, + 57.19514895105859864, 58.00360522298051080, 58.81451220059079787, + 59.62784609588432261, 60.44358357816834371, 61.26170176100199427, + 62.08217818962842927, 62.90499082887649962, 63.73011805151035958, + 64.55753862700632340, 65.38723171073768015, 66.21917683354901385, + 67.05335389170279825, 67.88974313718154008, 68.72832516833013017, + 69.56908092082363737, 70.41199165894616385, 71.25703896716800045, + 72.10420474200799390, 72.95347118416940191, 73.80482079093779646, + 74.65823634883015814, 75.51370092648485866, 76.37119786778275454, + 77.23071078519033961, 78.09222355331530707, 78.95572030266725960, + 79.82118541361435859, 80.68860351052903468, 81.55795945611502873, + 82.42923834590904164, 83.30242550295004378, 84.17750647261028973, + 85.05446701758152983, 85.93329311301090456, 86.81397094178107920, + 87.69648688992882057, 88.58082754219766741, 89.46697967771913795, + 90.35493026581838194, 91.24466646193963015, 92.13617560368709292, + 93.02944520697742803, 93.92446296229978486, 94.82121673107967297, + 95.71969454214321615, 96.61988458827809723, 97.52177522288820910, + 98.42535495673848800, 99.33061245478741341, 100.23753653310367895, + 101.14611615586458981, 102.05634043243354370, 102.96819861451382394, + 103.88168009337621811, 104.79677439715833032, 105.71347118823287303, + 106.63176026064346047, 107.55163153760463501, 108.47307506906540198, + 109.39608102933323153, 110.32063971475740516, 111.24674154146920557, + 112.17437704317786995, 113.10353686902013237, 114.03421178146170689, + 114.96639265424990128, 115.90007047041454769, 116.83523632031698014, + 117.77188139974506953, 118.70999700805310795, 119.64957454634490830, + 120.59060551569974962, 121.53308151543865279, 122.47699424143097247, + 123.42233548443955726, 124.36909712850338394, 125.31727114935689826, + 126.26684961288492559, 127.21782467361175861, 128.17018857322420899, + 129.12393363912724453, 130.07905228303084755, 131.03553699956862033, + 131.99338036494577864, 132.95257503561629164, 133.91311374698926784, + 134.87498931216194364, 135.83819462068046846, 136.80272263732638294, + 137.76856640092901785, 138.73571902320256299, 139.70417368760718091, + 140.67392364823425055, 141.64496222871400732, 142.61728282114600574, + 143.59087888505104047, 144.56574394634486680, 145.54187159633210058, + 146.51925549072063859, 147.49788934865566148, 148.47776695177302031, + 149.45888214327129617, 150.44122882700193600, 151.42480096657754984, + 152.40959258449737490, 153.39559776128982094, 154.38281063467164245, + 155.37122539872302696, 156.36083630307879844, 157.35163765213474107, + 158.34362380426921391, 159.33678917107920370, 160.33112821663092973, + 161.32663545672428995, 162.32330545817117695, 163.32113283808695314, + 164.32011226319519892, 165.32023844914485267, 166.32150615984036790, + 167.32391020678358018, 168.32744544842768164, 169.33210678954270634, + 170.33788918059275375, 171.34478761712384198, 172.35279713916281707, + 173.36191283062726143, 174.37212981874515094, 175.38344327348534080, + 176.39584840699734514, 177.40934047306160437, 178.42391476654847793, + 179.43956662288721304, 180.45629141754378111, 181.47408456550741107, + 182.49294152078630304, 183.51285777591152737, 184.53382886144947861, + 185.55585034552262869, 186.57891783333786861, 187.60302696672312095, + 188.62817342367162610, 189.65435291789341932, 190.68156119837468054, + 191.70979404894376330, 192.73904728784492590, 193.76931676731820176, + 194.80059837318714244, 195.83288802445184729, 196.86618167288995096, + 197.90047530266301123, 198.93576492992946214, 199.97204660246373464, + 201.00931639928148797, 202.04757043027063901, 203.08680483582807597, + 204.12701578650228385, 205.16819948264117102, 206.21035215404597807, + 207.25347005962987623, 208.29754948708190909, 209.34258675253678916, + 210.38857820024875878, 211.43552020227099320, 212.48340915813977858, + 213.53224149456323744, 214.58201366511514152, 215.63272214993284592, + 216.68436345542014010, 217.73693411395422004, 218.79043068359703739, + 219.84484974781133815, 220.90018791517996988, 221.95644181913033322, + 223.01360811766215875, 224.07168349307951871, 225.13066465172661879, + 226.19054832372759734, 227.25133126272962159, 228.31301024565024704, + 229.37558207242807384, 230.43904356577689896, 231.50339157094342113, + 232.56862295546847008, 233.63473460895144740, 234.70172344281823484, + 235.76958639009222907, 236.83832040516844586, 237.90792246359117712, + 238.97838956183431947, 240.04971871708477238, 241.12190696702904802, + 242.19495136964280846, 243.26884900298270509, 244.34359696498191283, + 245.41919237324782443, 246.49563236486270057, 247.57291409618682110, + 248.65103474266476269, 249.72999149863338175, 250.80978157713354904, + 251.89040220972316320, 252.97185064629374551, 254.05412415488834199, + 255.13722002152300661, 256.22113555000953511, 257.30586806178126835, + 258.39141489572085675, 259.47777340799029844, 260.56494097186322279, + 261.65291497755913497, 262.74169283208021852, 263.83127195904967266, + 264.92164979855277807, 266.01282380697938379, 267.10479145686849733, + 268.19755023675537586, 269.29109765101975427, 270.38543121973674488, + 271.48054847852881721, 272.57644697842033565, 273.67312428569374561, + 274.77057798174683967, 275.86880566295326389, 276.96780494052313770, + 278.06757344036617496, 279.16810880295668085, 280.26940868320008349, + 281.37147075030043197, 282.47429268763045229, 283.57787219260217171, + 284.68220697654078322, 285.78729476455760050, 286.89313329542699194, + 287.99972032146268930, 289.10705360839756395, 290.21513093526289140, + 291.32395009427028754, 292.43350889069523646, 293.54380514276073200, + 294.65483668152336350, 295.76660135076059532, 296.87909700685889902, + 297.99232151870342022, 299.10627276756946458, 300.22094864701409733, + 301.33634706277030091, 302.45246593264130297, 303.56930318639643929, + 304.68685676566872189, 305.80512462385280514, 306.92410472600477078, + 308.04379504874236773, 309.16419358014690033, 310.28529831966631036, + 311.40710727801865687, 312.52961847709792664, 313.65282994987899201, + 314.77673974032603610, 315.90134590329950015, 317.02664650446632777, + 318.15263962020929966, 319.27932333753892635, 320.40669575400545455, + 321.53475497761127144, 322.66349912672620803, 323.79292633000159185, + 324.92303472628691452, 326.05382246454587403, 327.18528770377525916, + 328.31742861292224234, 329.45024337080525356, 330.58373016603343331, + 331.71788719692847280, 332.85271267144611329, 333.98820480709991898, + 335.12436183088397001, 336.26118197919845443, 337.39866349777429377, + 338.53680464159958774, 339.67560367484657036, 340.81505887079896411, + 341.95516851178109619, 343.09593088908627578, 344.23734430290727460, + 345.37940706226686416, 346.52211748494903532, 347.66547389743118401, + 348.80947463481720661, 349.95411804077025408, 351.09940246744753267, + 352.24532627543504759, 353.39188783368263103, 354.53908551944078908, + 355.68691771819692349, 356.83538282361303118, 357.98447923746385868, + 359.13420536957539753 +}; + +TEST_BEGIN(test_ln_gamma_misc) +{ + unsigned i; + + for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) { + double x = (double)i * 0.25; + assert_true(double_eq_rel(ln_gamma(x), + ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect ln_gamma result for i=%u", i); + } +} +TEST_END + +/* Expected pt_norm([0.01..0.99] increment=0.01). */ +static const double pt_norm_expected[] = { + -INFINITY, + -2.32634787404084076, -2.05374891063182252, -1.88079360815125085, + -1.75068607125216946, -1.64485362695147264, -1.55477359459685305, + -1.47579102817917063, -1.40507156030963221, -1.34075503369021654, + -1.28155156554460081, -1.22652812003661049, -1.17498679206608991, + -1.12639112903880045, -1.08031934081495606, -1.03643338949378938, + -0.99445788320975281, -0.95416525314619416, -0.91536508784281390, + -0.87789629505122846, -0.84162123357291418, -0.80642124701824025, + -0.77219321418868492, -0.73884684918521371, -0.70630256284008752, + -0.67448975019608171, -0.64334540539291685, -0.61281299101662701, + -0.58284150727121620, -0.55338471955567281, -0.52440051270804067, + -0.49585034734745320, -0.46769879911450812, -0.43991316567323380, + -0.41246312944140462, -0.38532046640756751, -0.35845879325119373, + -0.33185334643681652, -0.30548078809939738, -0.27931903444745404, + -0.25334710313579978, -0.22754497664114931, -0.20189347914185077, + -0.17637416478086135, -0.15096921549677725, -0.12566134685507399, + -0.10043372051146975, -0.07526986209982976, -0.05015358346473352, + -0.02506890825871106, 0.00000000000000000, 0.02506890825871106, + 0.05015358346473366, 0.07526986209982990, 0.10043372051146990, + 0.12566134685507413, 0.15096921549677739, 0.17637416478086146, + 0.20189347914185105, 0.22754497664114931, 0.25334710313579978, + 0.27931903444745404, 0.30548078809939738, 0.33185334643681652, + 0.35845879325119373, 0.38532046640756762, 0.41246312944140484, + 0.43991316567323391, 0.46769879911450835, 0.49585034734745348, + 0.52440051270804111, 0.55338471955567303, 0.58284150727121620, + 0.61281299101662701, 0.64334540539291685, 0.67448975019608171, + 0.70630256284008752, 0.73884684918521371, 0.77219321418868492, + 0.80642124701824036, 0.84162123357291441, 0.87789629505122879, + 0.91536508784281423, 0.95416525314619460, 0.99445788320975348, + 1.03643338949378938, 1.08031934081495606, 1.12639112903880045, + 1.17498679206608991, 1.22652812003661049, 1.28155156554460081, + 1.34075503369021654, 1.40507156030963265, 1.47579102817917085, + 1.55477359459685394, 1.64485362695147308, 1.75068607125217102, + 1.88079360815125041, 2.05374891063182208, 2.32634787404084076 +}; + +TEST_BEGIN(test_pt_norm) +{ + unsigned i; + + for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) { + double p = (double)i * 0.01; + assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i], + MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect pt_norm result for i=%u", i); + } +} +TEST_END + +/* + * Expected pt_chi2(p=[0.01..0.99] increment=0.07, + * df={0.1, 1.1, 10.1, 100.1, 1000.1}). + */ +static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1}; +static const double pt_chi2_expected[] = { + 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17, + 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09, + 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05, + 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03, + 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00, + + 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113, + 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931, + 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259, + 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304, + 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839, + + 2.606673548632508, 4.602913725294877, 5.646152813924212, + 6.488971315540869, 7.249823275816285, 7.977314231410841, + 8.700354939944047, 9.441728024225892, 10.224338321374127, + 11.076435368801061, 12.039320937038386, 13.183878752697167, + 14.657791935084575, 16.885728216339373, 23.361991680031817, + + 70.14844087392152, 80.92379498849355, 85.53325420085891, + 88.94433120715347, 91.83732712857017, 94.46719943606301, + 96.96896479994635, 99.43412843510363, 101.94074719829733, + 104.57228644307247, 107.43900093448734, 110.71844673417287, + 114.76616819871325, 120.57422505959563, 135.92318818757556, + + 899.0072447849649, 937.9271278858220, 953.8117189560207, + 965.3079371501154, 974.8974061207954, 983.4936235182347, + 991.5691170518946, 999.4334123954690, 1007.3391826856553, + 1015.5445154999951, 1024.3777075619569, 1034.3538789836223, + 1046.4872561869577, 1063.5717461999654, 1107.0741966053859 +}; + +TEST_BEGIN(test_pt_chi2) +{ + unsigned i, j; + unsigned e = 0; + + for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) { + double df = pt_chi2_df[i]; + double ln_gamma_df = ln_gamma(df * 0.5); + for (j = 1; j < 100; j += 7) { + double p = (double)j * 0.01; + assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df), + pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR), + "Incorrect pt_chi2 result for i=%u, j=%u", i, j); + e++; + } + } +} +TEST_END + +/* + * Expected pt_gamma(p=[0.1..0.99] increment=0.07, + * shape=[0.5..3.0] increment=0.5). + */ +static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0}; +static const double pt_gamma_expected[] = { + 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02, + 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01, + 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01, + 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01, + 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00, + + 0.01005033585350144, 0.08338160893905107, 0.16251892949777497, + 0.24846135929849966, 0.34249030894677596, 0.44628710262841947, + 0.56211891815354142, 0.69314718055994529, 0.84397007029452920, + 1.02165124753198167, 1.23787435600161766, 1.51412773262977574, + 1.89711998488588196, 2.52572864430825783, 4.60517018598809091, + + 0.05741590094955853, 0.24747378084860744, 0.39888572212236084, + 0.54394139997444901, 0.69048812513915159, 0.84311389861296104, + 1.00580622221479898, 1.18298694218766931, 1.38038096305861213, + 1.60627736383027453, 1.87396970522337947, 2.20749220408081070, + 2.65852391865854942, 3.37934630984842244, 5.67243336507218476, + + 0.1485547402532659, 0.4657458011640391, 0.6832386130709406, + 0.8794297834672100, 1.0700752852474524, 1.2629614217350744, + 1.4638400448580779, 1.6783469900166610, 1.9132338090606940, + 2.1778589228618777, 2.4868823970010991, 2.8664695666264195, + 3.3724415436062114, 4.1682658512758071, 6.6383520679938108, + + 0.2771490383641385, 0.7195001279643727, 0.9969081732265243, + 1.2383497880608061, 1.4675206597269927, 1.6953064251816552, + 1.9291243435606809, 2.1757300955477641, 2.4428032131216391, + 2.7406534569230616, 3.0851445039665513, 3.5043101122033367, + 4.0575997065264637, 4.9182956424675286, 7.5431362346944937, + + 0.4360451650782932, 0.9983600902486267, 1.3306365880734528, + 1.6129750834753802, 1.8767241606994294, 2.1357032436097660, + 2.3988853336865565, 2.6740603137235603, 2.9697561737517959, + 3.2971457713883265, 3.6731795898504660, 4.1275751617770631, + 4.7230515633946677, 5.6417477865306020, 8.4059469148854635 +}; + +TEST_BEGIN(test_pt_gamma_shape) +{ + unsigned i, j; + unsigned e = 0; + + for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) { + double shape = pt_gamma_shape[i]; + double ln_gamma_shape = ln_gamma(shape); + for (j = 1; j < 100; j += 7) { + double p = (double)j * 0.01; + assert_true(double_eq_rel(pt_gamma(p, shape, 1.0, + ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR, + MAX_ABS_ERR), + "Incorrect pt_gamma result for i=%u, j=%u", i, j); + e++; + } + } +} +TEST_END + +TEST_BEGIN(test_pt_gamma_scale) +{ + double shape = 1.0; + double ln_gamma_shape = ln_gamma(shape); + + assert_true(double_eq_rel( + pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0, + pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR, + MAX_ABS_ERR), + "Scale should be trivially equivalent to external multiplication"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_ln_gamma_factorial, + test_ln_gamma_misc, + test_pt_norm, + test_pt_chi2, + test_pt_gamma_shape, + test_pt_gamma_scale)); +} diff --git a/redis.submodule/jemalloc/test/unit/mq.c b/redis.submodule/jemalloc/test/unit/mq.c new file mode 100644 index 0000000..bde2a48 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/mq.c @@ -0,0 +1,93 @@ +#include "test/jemalloc_test.h" + +#define NSENDERS 3 +#define NMSGS 100000 + +typedef struct mq_msg_s mq_msg_t; +struct mq_msg_s { + mq_msg(mq_msg_t) link; +}; +mq_gen(static, mq_, mq_t, mq_msg_t, link) + +TEST_BEGIN(test_mq_basic) +{ + mq_t mq; + mq_msg_t msg; + + assert_false(mq_init(&mq), "Unexpected mq_init() failure"); + assert_u_eq(mq_count(&mq), 0, "mq should be empty"); + assert_ptr_null(mq_tryget(&mq), + "mq_tryget() should fail when the queue is empty"); + + mq_put(&mq, &msg); + assert_u_eq(mq_count(&mq), 1, "mq should contain one message"); + assert_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg"); + + mq_put(&mq, &msg); + assert_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg"); + + mq_fini(&mq); +} +TEST_END + +static void * +thd_receiver_start(void *arg) +{ + mq_t *mq = (mq_t *)arg; + unsigned i; + + for (i = 0; i < (NSENDERS * NMSGS); i++) { + mq_msg_t *msg = mq_get(mq); + assert_ptr_not_null(msg, "mq_get() should never return NULL"); + dallocx(msg, 0); + } + return (NULL); +} + +static void * +thd_sender_start(void *arg) +{ + mq_t *mq = (mq_t *)arg; + unsigned i; + + for (i = 0; i < NMSGS; i++) { + mq_msg_t *msg; + void *p; + p = mallocx(sizeof(mq_msg_t), 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + msg = (mq_msg_t *)p; + mq_put(mq, msg); + } + return (NULL); +} + +TEST_BEGIN(test_mq_threaded) +{ + mq_t mq; + thd_t receiver; + thd_t senders[NSENDERS]; + unsigned i; + + assert_false(mq_init(&mq), "Unexpected mq_init() failure"); + + thd_create(&receiver, thd_receiver_start, (void *)&mq); + for (i = 0; i < NSENDERS; i++) + thd_create(&senders[i], thd_sender_start, (void *)&mq); + + thd_join(receiver, NULL); + for (i = 0; i < NSENDERS; i++) + thd_join(senders[i], NULL); + + mq_fini(&mq); +} +TEST_END + +int +main(void) +{ + + return (test( + test_mq_basic, + test_mq_threaded)); +} + diff --git a/redis.submodule/jemalloc/test/unit/mtx.c b/redis.submodule/jemalloc/test/unit/mtx.c new file mode 100644 index 0000000..96ff694 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/mtx.c @@ -0,0 +1,60 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 2 +#define NINCRS 2000000 + +TEST_BEGIN(test_mtx_basic) +{ + mtx_t mtx; + + assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure"); + mtx_lock(&mtx); + mtx_unlock(&mtx); + mtx_fini(&mtx); +} +TEST_END + +typedef struct { + mtx_t mtx; + unsigned x; +} thd_start_arg_t; + +static void * +thd_start(void *varg) +{ + thd_start_arg_t *arg = (thd_start_arg_t *)varg; + unsigned i; + + for (i = 0; i < NINCRS; i++) { + mtx_lock(&arg->mtx); + arg->x++; + mtx_unlock(&arg->mtx); + } + return (NULL); +} + +TEST_BEGIN(test_mtx_race) +{ + thd_start_arg_t arg; + thd_t thds[NTHREADS]; + unsigned i; + + assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure"); + arg.x = 0; + for (i = 0; i < NTHREADS; i++) + thd_create(&thds[i], thd_start, (void *)&arg); + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); + assert_u_eq(arg.x, NTHREADS * NINCRS, + "Race-related counter corruption"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_mtx_basic, + test_mtx_race)); +} diff --git a/redis.submodule/jemalloc/test/unit/prof_accum.c b/redis.submodule/jemalloc/test/unit/prof_accum.c new file mode 100644 index 0000000..fd229e0 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/prof_accum.c @@ -0,0 +1,91 @@ +#include "test/jemalloc_test.h" + +#define NTHREADS 4 +#define NALLOCS_PER_THREAD 50 +#define DUMP_INTERVAL 1 +#define BT_COUNT_CHECK_INTERVAL 5 + +#ifdef JEMALLOC_PROF +const char *malloc_conf = + "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"; +#endif + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) +{ + int fd; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return (fd); +} + +static void * +alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) +{ + + return (btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration)); +} + +static void * +thd_start(void *varg) +{ + unsigned thd_ind = *(unsigned *)varg; + size_t bt_count_prev, bt_count; + unsigned i_prev, i; + + i_prev = 0; + bt_count_prev = 0; + for (i = 0; i < NALLOCS_PER_THREAD; i++) { + void *p = alloc_from_permuted_backtrace(thd_ind, i); + dallocx(p, 0); + if (i % DUMP_INTERVAL == 0) { + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + } + + if (i % BT_COUNT_CHECK_INTERVAL == 0 || + i+1 == NALLOCS_PER_THREAD) { + bt_count = prof_bt_count(); + assert_zu_le(bt_count_prev+(i-i_prev), bt_count, + "Expected larger backtrace count increase"); + i_prev = i; + bt_count_prev = bt_count; + } + } + + return (NULL); +} + +TEST_BEGIN(test_idump) +{ + bool active; + thd_t thds[NTHREADS]; + unsigned thd_args[NTHREADS]; + unsigned i; + + test_skip_if(!config_prof); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); + + prof_dump_open = prof_dump_open_intercept; + + for (i = 0; i < NTHREADS; i++) { + thd_args[i] = i; + thd_create(&thds[i], thd_start, (void *)&thd_args[i]); + } + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); +} +TEST_END + +int +main(void) +{ + + return (test( + test_idump)); +} diff --git a/redis.submodule/jemalloc/test/unit/prof_active.c b/redis.submodule/jemalloc/test/unit/prof_active.c new file mode 100644 index 0000000..8149095 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/prof_active.c @@ -0,0 +1,136 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = + "prof:true,prof_thread_active_init:false,lg_prof_sample:0"; +#endif + +static void +mallctl_bool_get(const char *name, bool expected, const char *func, int line) +{ + bool old; + size_t sz; + + sz = sizeof(old); + assert_d_eq(mallctl(name, &old, &sz, NULL, 0), 0, + "%s():%d: Unexpected mallctl failure reading %s", func, line, name); + assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line, + name); +} + +static void +mallctl_bool_set(const char *name, bool old_expected, bool val_new, + const char *func, int line) +{ + bool old; + size_t sz; + + sz = sizeof(old); + assert_d_eq(mallctl(name, &old, &sz, &val_new, sizeof(val_new)), 0, + "%s():%d: Unexpected mallctl failure reading/writing %s", func, + line, name); + assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func, + line, name); +} + +static void +mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func, + int line) +{ + + mallctl_bool_get("prof.active", prof_active_old_expected, func, line); +} +#define mallctl_prof_active_get(a) \ + mallctl_prof_active_get_impl(a, __func__, __LINE__) + +static void +mallctl_prof_active_set_impl(bool prof_active_old_expected, + bool prof_active_new, const char *func, int line) +{ + + mallctl_bool_set("prof.active", prof_active_old_expected, + prof_active_new, func, line); +} +#define mallctl_prof_active_set(a, b) \ + mallctl_prof_active_set_impl(a, b, __func__, __LINE__) + +static void +mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected, + const char *func, int line) +{ + + mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected, + func, line); +} +#define mallctl_thread_prof_active_get(a) \ + mallctl_thread_prof_active_get_impl(a, __func__, __LINE__) + +static void +mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected, + bool thread_prof_active_new, const char *func, int line) +{ + + mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected, + thread_prof_active_new, func, line); +} +#define mallctl_thread_prof_active_set(a, b) \ + mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__) + +static void +prof_sampling_probe_impl(bool expect_sample, const char *func, int line) +{ + void *p; + size_t expected_backtraces = expect_sample ? 1 : 0; + + assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func, + line); + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + assert_zu_eq(prof_bt_count(), expected_backtraces, + "%s():%d: Unexpected backtrace count", func, line); + dallocx(p, 0); +} +#define prof_sampling_probe(a) \ + prof_sampling_probe_impl(a, __func__, __LINE__) + +TEST_BEGIN(test_prof_active) +{ + + test_skip_if(!config_prof); + + mallctl_prof_active_get(true); + mallctl_thread_prof_active_get(false); + + mallctl_prof_active_set(true, true); + mallctl_thread_prof_active_set(false, false); + /* prof.active, !thread.prof.active. */ + prof_sampling_probe(false); + + mallctl_prof_active_set(true, false); + mallctl_thread_prof_active_set(false, false); + /* !prof.active, !thread.prof.active. */ + prof_sampling_probe(false); + + mallctl_prof_active_set(false, false); + mallctl_thread_prof_active_set(false, true); + /* !prof.active, thread.prof.active. */ + prof_sampling_probe(false); + + mallctl_prof_active_set(false, true); + mallctl_thread_prof_active_set(true, true); + /* prof.active, thread.prof.active. */ + prof_sampling_probe(true); + + /* Restore settings. */ + mallctl_prof_active_set(true, true); + mallctl_thread_prof_active_set(true, false); +} +TEST_END + +int +main(void) +{ + + return (test( + test_prof_active)); +} diff --git a/redis.submodule/jemalloc/test/unit/prof_gdump.c b/redis.submodule/jemalloc/test/unit/prof_gdump.c new file mode 100644 index 0000000..a0e6ee9 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/prof_gdump.c @@ -0,0 +1,81 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true"; +#endif + +static bool did_prof_dump_open; + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) +{ + int fd; + + did_prof_dump_open = true; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return (fd); +} + +TEST_BEGIN(test_gdump) +{ + bool active, gdump, gdump_old; + void *p, *q, *r, *s; + size_t sz; + + test_skip_if(!config_prof); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); + + prof_dump_open = prof_dump_open_intercept; + + did_prof_dump_open = false; + p = mallocx(chunksize, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + assert_true(did_prof_dump_open, "Expected a profile dump"); + + did_prof_dump_open = false; + q = mallocx(chunksize, 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + assert_true(did_prof_dump_open, "Expected a profile dump"); + + gdump = false; + sz = sizeof(gdump_old); + assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump, + sizeof(gdump)), 0, + "Unexpected mallctl failure while disabling prof.gdump"); + assert(gdump_old); + did_prof_dump_open = false; + r = mallocx(chunksize, 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + assert_false(did_prof_dump_open, "Unexpected profile dump"); + + gdump = true; + sz = sizeof(gdump_old); + assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump, + sizeof(gdump)), 0, + "Unexpected mallctl failure while enabling prof.gdump"); + assert(!gdump_old); + did_prof_dump_open = false; + s = mallocx(chunksize, 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + assert_true(did_prof_dump_open, "Expected a profile dump"); + + dallocx(p, 0); + dallocx(q, 0); + dallocx(r, 0); + dallocx(s, 0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_gdump)); +} diff --git a/redis.submodule/jemalloc/test/unit/prof_idump.c b/redis.submodule/jemalloc/test/unit/prof_idump.c new file mode 100644 index 0000000..bdea53e --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/prof_idump.c @@ -0,0 +1,51 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = + "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0," + "lg_prof_interval:0"; +#endif + +static bool did_prof_dump_open; + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) +{ + int fd; + + did_prof_dump_open = true; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return (fd); +} + +TEST_BEGIN(test_idump) +{ + bool active; + void *p; + + test_skip_if(!config_prof); + + active = true; + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure while activating profiling"); + + prof_dump_open = prof_dump_open_intercept; + + did_prof_dump_open = false; + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + dallocx(p, 0); + assert_true(did_prof_dump_open, "Expected a profile dump"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_idump)); +} diff --git a/redis.submodule/jemalloc/test/unit/prof_reset.c b/redis.submodule/jemalloc/test/unit/prof_reset.c new file mode 100644 index 0000000..69983e5 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/prof_reset.c @@ -0,0 +1,302 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = + "prof:true,prof_active:false,lg_prof_sample:0"; +#endif + +static int +prof_dump_open_intercept(bool propagate_err, const char *filename) +{ + int fd; + + fd = open("/dev/null", O_WRONLY); + assert_d_ne(fd, -1, "Unexpected open() failure"); + + return (fd); +} + +static void +set_prof_active(bool active) +{ + + assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), + 0, "Unexpected mallctl failure"); +} + +static size_t +get_lg_prof_sample(void) +{ + size_t lg_prof_sample; + size_t sz = sizeof(size_t); + + assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0, + "Unexpected mallctl failure while reading profiling sample rate"); + return (lg_prof_sample); +} + +static void +do_prof_reset(size_t lg_prof_sample) +{ + assert_d_eq(mallctl("prof.reset", NULL, NULL, + &lg_prof_sample, sizeof(size_t)), 0, + "Unexpected mallctl failure while resetting profile data"); + assert_zu_eq(lg_prof_sample, get_lg_prof_sample(), + "Expected profile sample rate change"); +} + +TEST_BEGIN(test_prof_reset_basic) +{ + size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next; + size_t sz; + unsigned i; + + test_skip_if(!config_prof); + + sz = sizeof(size_t); + assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz, + NULL, 0), 0, + "Unexpected mallctl failure while reading profiling sample rate"); + assert_zu_eq(lg_prof_sample_orig, 0, + "Unexpected profiling sample rate"); + lg_prof_sample = get_lg_prof_sample(); + assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, + "Unexpected disagreement between \"opt.lg_prof_sample\" and " + "\"prof.lg_sample\""); + + /* Test simple resets. */ + for (i = 0; i < 2; i++) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure while resetting profile data"); + lg_prof_sample = get_lg_prof_sample(); + assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, + "Unexpected profile sample rate change"); + } + + /* Test resets with prof.lg_sample changes. */ + lg_prof_sample_next = 1; + for (i = 0; i < 2; i++) { + do_prof_reset(lg_prof_sample_next); + lg_prof_sample = get_lg_prof_sample(); + assert_zu_eq(lg_prof_sample, lg_prof_sample_next, + "Expected profile sample rate change"); + lg_prof_sample_next = lg_prof_sample_orig; + } + + /* Make sure the test code restored prof.lg_sample. */ + lg_prof_sample = get_lg_prof_sample(); + assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, + "Unexpected disagreement between \"opt.lg_prof_sample\" and " + "\"prof.lg_sample\""); +} +TEST_END + +bool prof_dump_header_intercepted = false; +prof_cnt_t cnt_all_copy = {0, 0, 0, 0}; +static bool +prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all) +{ + + prof_dump_header_intercepted = true; + memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t)); + + return (false); +} + +TEST_BEGIN(test_prof_reset_cleanup) +{ + void *p; + prof_dump_header_t *prof_dump_header_orig; + + test_skip_if(!config_prof); + + set_prof_active(true); + + assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); + + prof_dump_header_orig = prof_dump_header; + prof_dump_header = prof_dump_header_intercept; + assert_false(prof_dump_header_intercepted, "Unexpected intercept"); + + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + assert_true(prof_dump_header_intercepted, "Expected intercept"); + assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation"); + + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, + "Unexpected error while resetting heap profile data"); + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations"); + assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); + + prof_dump_header = prof_dump_header_orig; + + dallocx(p, 0); + assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); + + set_prof_active(false); +} +TEST_END + +#define NTHREADS 4 +#define NALLOCS_PER_THREAD (1U << 13) +#define OBJ_RING_BUF_COUNT 1531 +#define RESET_INTERVAL (1U << 10) +#define DUMP_INTERVAL 3677 +static void * +thd_start(void *varg) +{ + unsigned thd_ind = *(unsigned *)varg; + unsigned i; + void *objs[OBJ_RING_BUF_COUNT]; + + memset(objs, 0, sizeof(objs)); + + for (i = 0; i < NALLOCS_PER_THREAD; i++) { + if (i % RESET_INTERVAL == 0) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), + 0, "Unexpected error while resetting heap profile " + "data"); + } + + if (i % DUMP_INTERVAL == 0) { + assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), + 0, "Unexpected error while dumping heap profile"); + } + + { + void **pp = &objs[i % OBJ_RING_BUF_COUNT]; + if (*pp != NULL) { + dallocx(*pp, 0); + *pp = NULL; + } + *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i); + assert_ptr_not_null(*pp, + "Unexpected btalloc() failure"); + } + } + + /* Clean up any remaining objects. */ + for (i = 0; i < OBJ_RING_BUF_COUNT; i++) { + void **pp = &objs[i % OBJ_RING_BUF_COUNT]; + if (*pp != NULL) { + dallocx(*pp, 0); + *pp = NULL; + } + } + + return (NULL); +} + +TEST_BEGIN(test_prof_reset) +{ + size_t lg_prof_sample_orig; + thd_t thds[NTHREADS]; + unsigned thd_args[NTHREADS]; + unsigned i; + size_t bt_count, tdata_count; + + test_skip_if(!config_prof); + + bt_count = prof_bt_count(); + assert_zu_eq(bt_count, 0, + "Unexpected pre-existing tdata structures"); + tdata_count = prof_tdata_count(); + + lg_prof_sample_orig = get_lg_prof_sample(); + do_prof_reset(5); + + set_prof_active(true); + + for (i = 0; i < NTHREADS; i++) { + thd_args[i] = i; + thd_create(&thds[i], thd_start, (void *)&thd_args[i]); + } + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); + + assert_zu_eq(prof_bt_count(), bt_count, + "Unexpected bactrace count change"); + assert_zu_eq(prof_tdata_count(), tdata_count, + "Unexpected remaining tdata structures"); + + set_prof_active(false); + + do_prof_reset(lg_prof_sample_orig); +} +TEST_END +#undef NTHREADS +#undef NALLOCS_PER_THREAD +#undef OBJ_RING_BUF_COUNT +#undef RESET_INTERVAL +#undef DUMP_INTERVAL + +/* Test sampling at the same allocation site across resets. */ +#define NITER 10 +TEST_BEGIN(test_xallocx) +{ + size_t lg_prof_sample_orig; + unsigned i; + void *ptrs[NITER]; + + test_skip_if(!config_prof); + + lg_prof_sample_orig = get_lg_prof_sample(); + set_prof_active(true); + + /* Reset profiling. */ + do_prof_reset(0); + + for (i = 0; i < NITER; i++) { + void *p; + size_t sz, nsz; + + /* Reset profiling. */ + do_prof_reset(0); + + /* Allocate small object (which will be promoted). */ + p = ptrs[i] = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + /* Reset profiling. */ + do_prof_reset(0); + + /* Perform successful xallocx(). */ + sz = sallocx(p, 0); + assert_zu_eq(xallocx(p, sz, 0, 0), sz, + "Unexpected xallocx() failure"); + + /* Perform unsuccessful xallocx(). */ + nsz = nallocx(sz+1, 0); + assert_zu_eq(xallocx(p, nsz, 0, 0), sz, + "Unexpected xallocx() success"); + } + + for (i = 0; i < NITER; i++) { + /* dallocx. */ + dallocx(ptrs[i], 0); + } + + set_prof_active(false); + do_prof_reset(lg_prof_sample_orig); +} +TEST_END +#undef NITER + +int +main(void) +{ + + /* Intercept dumping prior to running any tests. */ + prof_dump_open = prof_dump_open_intercept; + + return (test( + test_prof_reset_basic, + test_prof_reset_cleanup, + test_prof_reset, + test_xallocx)); +} diff --git a/redis.submodule/jemalloc/test/unit/prof_thread_name.c b/redis.submodule/jemalloc/test/unit/prof_thread_name.c new file mode 100644 index 0000000..f501158 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/prof_thread_name.c @@ -0,0 +1,129 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_PROF +const char *malloc_conf = "prof:true,prof_active:false"; +#endif + +static void +mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func, + int line) +{ + const char *thread_name_old; + size_t sz; + + sz = sizeof(thread_name_old); + assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, NULL, 0), + 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", + func, line); + assert_str_eq(thread_name_old, thread_name_expected, + "%s():%d: Unexpected thread.prof.name value", func, line); +} +#define mallctl_thread_name_get(a) \ + mallctl_thread_name_get_impl(a, __func__, __LINE__) + +static void +mallctl_thread_name_set_impl(const char *thread_name, const char *func, + int line) +{ + + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, + sizeof(thread_name)), 0, + "%s():%d: Unexpected mallctl failure reading thread.prof.name", + func, line); + mallctl_thread_name_get_impl(thread_name, func, line); +} +#define mallctl_thread_name_set(a) \ + mallctl_thread_name_set_impl(a, __func__, __LINE__) + +TEST_BEGIN(test_prof_thread_name_validation) +{ + const char *thread_name; + + test_skip_if(!config_prof); + + mallctl_thread_name_get(""); + mallctl_thread_name_set("hi there"); + + /* NULL input shouldn't be allowed. */ + thread_name = NULL; + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, + sizeof(thread_name)), EFAULT, + "Unexpected mallctl result writing \"%s\" to thread.prof.name", + thread_name); + + /* '\n' shouldn't be allowed. */ + thread_name = "hi\nthere"; + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, + sizeof(thread_name)), EFAULT, + "Unexpected mallctl result writing \"%s\" to thread.prof.name", + thread_name); + + /* Simultaneous read/write shouldn't be allowed. */ + { + const char *thread_name_old; + size_t sz; + + sz = sizeof(thread_name_old); + assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, + &thread_name, sizeof(thread_name)), EPERM, + "Unexpected mallctl result writing \"%s\" to " + "thread.prof.name", thread_name); + } + + mallctl_thread_name_set(""); +} +TEST_END + +#define NTHREADS 4 +#define NRESET 25 +static void * +thd_start(void *varg) +{ + unsigned thd_ind = *(unsigned *)varg; + char thread_name[16] = ""; + unsigned i; + + malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind); + + mallctl_thread_name_get(""); + mallctl_thread_name_set(thread_name); + + for (i = 0; i < NRESET; i++) { + assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, + "Unexpected error while resetting heap profile data"); + mallctl_thread_name_get(thread_name); + } + + mallctl_thread_name_set(thread_name); + mallctl_thread_name_set(""); + + return (NULL); +} + +TEST_BEGIN(test_prof_thread_name_threaded) +{ + thd_t thds[NTHREADS]; + unsigned thd_args[NTHREADS]; + unsigned i; + + test_skip_if(!config_prof); + + for (i = 0; i < NTHREADS; i++) { + thd_args[i] = i; + thd_create(&thds[i], thd_start, (void *)&thd_args[i]); + } + for (i = 0; i < NTHREADS; i++) + thd_join(thds[i], NULL); +} +TEST_END +#undef NTHREADS +#undef NRESET + +int +main(void) +{ + + return (test( + test_prof_thread_name_validation, + test_prof_thread_name_threaded)); +} diff --git a/redis.submodule/jemalloc/test/unit/ql.c b/redis.submodule/jemalloc/test/unit/ql.c new file mode 100644 index 0000000..05fad45 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/ql.c @@ -0,0 +1,209 @@ +#include "test/jemalloc_test.h" + +/* Number of ring entries, in [2..26]. */ +#define NENTRIES 9 + +typedef struct list_s list_t; +typedef ql_head(list_t) list_head_t; + +struct list_s { + ql_elm(list_t) link; + char id; +}; + +static void +test_empty_list(list_head_t *head) +{ + list_t *t; + unsigned i; + + assert_ptr_null(ql_first(head), "Unexpected element for empty list"); + assert_ptr_null(ql_last(head, link), + "Unexpected element for empty list"); + + i = 0; + ql_foreach(t, head, link) { + i++; + } + assert_u_eq(i, 0, "Unexpected element for empty list"); + + i = 0; + ql_reverse_foreach(t, head, link) { + i++; + } + assert_u_eq(i, 0, "Unexpected element for empty list"); +} + +TEST_BEGIN(test_ql_empty) +{ + list_head_t head; + + ql_new(&head); + test_empty_list(&head); +} +TEST_END + +static void +init_entries(list_t *entries, unsigned nentries) +{ + unsigned i; + + for (i = 0; i < nentries; i++) { + entries[i].id = 'a' + i; + ql_elm_new(&entries[i], link); + } +} + +static void +test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) +{ + list_t *t; + unsigned i; + + assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch"); + assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id, + "Element id mismatch"); + + i = 0; + ql_foreach(t, head, link) { + assert_c_eq(t->id, entries[i].id, "Element id mismatch"); + i++; + } + + i = 0; + ql_reverse_foreach(t, head, link) { + assert_c_eq(t->id, entries[nentries-i-1].id, + "Element id mismatch"); + i++; + } + + for (i = 0; i < nentries-1; i++) { + t = ql_next(head, &entries[i], link); + assert_c_eq(t->id, entries[i+1].id, "Element id mismatch"); + } + assert_ptr_null(ql_next(head, &entries[nentries-1], link), + "Unexpected element"); + + assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element"); + for (i = 1; i < nentries; i++) { + t = ql_prev(head, &entries[i], link); + assert_c_eq(t->id, entries[i-1].id, "Element id mismatch"); + } +} + +TEST_BEGIN(test_ql_tail_insert) +{ + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) + ql_tail_insert(&head, &entries[i], link); + + test_entries_list(&head, entries, NENTRIES); +} +TEST_END + +TEST_BEGIN(test_ql_tail_remove) +{ + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) + ql_tail_insert(&head, &entries[i], link); + + for (i = 0; i < NENTRIES; i++) { + test_entries_list(&head, entries, NENTRIES-i); + ql_tail_remove(&head, list_t, link); + } + test_empty_list(&head); +} +TEST_END + +TEST_BEGIN(test_ql_head_insert) +{ + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) + ql_head_insert(&head, &entries[NENTRIES-i-1], link); + + test_entries_list(&head, entries, NENTRIES); +} +TEST_END + +TEST_BEGIN(test_ql_head_remove) +{ + list_head_t head; + list_t entries[NENTRIES]; + unsigned i; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + for (i = 0; i < NENTRIES; i++) + ql_head_insert(&head, &entries[NENTRIES-i-1], link); + + for (i = 0; i < NENTRIES; i++) { + test_entries_list(&head, &entries[i], NENTRIES-i); + ql_head_remove(&head, list_t, link); + } + test_empty_list(&head); +} +TEST_END + +TEST_BEGIN(test_ql_insert) +{ + list_head_t head; + list_t entries[8]; + list_t *a, *b, *c, *d, *e, *f, *g, *h; + + ql_new(&head); + init_entries(entries, sizeof(entries)/sizeof(list_t)); + a = &entries[0]; + b = &entries[1]; + c = &entries[2]; + d = &entries[3]; + e = &entries[4]; + f = &entries[5]; + g = &entries[6]; + h = &entries[7]; + + /* + * ql_remove(), ql_before_insert(), and ql_after_insert() are used + * internally by other macros that are already tested, so there's no + * need to test them completely. However, insertion/deletion from the + * middle of lists is not otherwise tested; do so here. + */ + ql_tail_insert(&head, f, link); + ql_before_insert(&head, f, b, link); + ql_before_insert(&head, f, c, link); + ql_after_insert(f, h, link); + ql_after_insert(f, g, link); + ql_before_insert(&head, b, a, link); + ql_after_insert(c, d, link); + ql_before_insert(&head, f, e, link); + + test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t)); +} +TEST_END + +int +main(void) +{ + + return (test( + test_ql_empty, + test_ql_tail_insert, + test_ql_tail_remove, + test_ql_head_insert, + test_ql_head_remove, + test_ql_insert)); +} diff --git a/redis.submodule/jemalloc/test/unit/qr.c b/redis.submodule/jemalloc/test/unit/qr.c new file mode 100644 index 0000000..a2a2d90 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/qr.c @@ -0,0 +1,248 @@ +#include "test/jemalloc_test.h" + +/* Number of ring entries, in [2..26]. */ +#define NENTRIES 9 +/* Split index, in [1..NENTRIES). */ +#define SPLIT_INDEX 5 + +typedef struct ring_s ring_t; + +struct ring_s { + qr(ring_t) link; + char id; +}; + +static void +init_entries(ring_t *entries) +{ + unsigned i; + + for (i = 0; i < NENTRIES; i++) { + qr_new(&entries[i], link); + entries[i].id = 'a' + i; + } +} + +static void +test_independent_entries(ring_t *entries) +{ + ring_t *t; + unsigned i, j; + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + j++; + } + assert_u_eq(j, 1, + "Iteration over single-element ring should visit precisely " + "one element"); + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + j++; + } + assert_u_eq(j, 1, + "Iteration over single-element ring should visit precisely " + "one element"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_next(&entries[i], link); + assert_ptr_eq(t, &entries[i], + "Next element in single-element ring should be same as " + "current element"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_prev(&entries[i], link); + assert_ptr_eq(t, &entries[i], + "Previous element in single-element ring should be same as " + "current element"); + } +} + +TEST_BEGIN(test_qr_one) +{ + ring_t entries[NENTRIES]; + + init_entries(entries); + test_independent_entries(entries); +} +TEST_END + +static void +test_entries_ring(ring_t *entries) +{ + ring_t *t; + unsigned i, j; + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(i+j) % NENTRIES].id, + "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(NENTRIES+i-j-1) % + NENTRIES].id, "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + t = qr_next(&entries[i], link); + assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, + "Element id mismatch"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_prev(&entries[i], link); + assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, + "Element id mismatch"); + } +} + +TEST_BEGIN(test_qr_after_insert) +{ + ring_t entries[NENTRIES]; + unsigned i; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) + qr_after_insert(&entries[i - 1], &entries[i], link); + test_entries_ring(entries); +} +TEST_END + +TEST_BEGIN(test_qr_remove) +{ + ring_t entries[NENTRIES]; + ring_t *t; + unsigned i, j; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) + qr_after_insert(&entries[i - 1], &entries[i], link); + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[i+j].id, + "Element id mismatch"); + j++; + } + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[NENTRIES - 1 - j].id, + "Element id mismatch"); + j++; + } + qr_remove(&entries[i], link); + } + test_independent_entries(entries); +} +TEST_END + +TEST_BEGIN(test_qr_before_insert) +{ + ring_t entries[NENTRIES]; + ring_t *t; + unsigned i, j; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) + qr_before_insert(&entries[i - 1], &entries[i], link); + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(NENTRIES+i-j) % + NENTRIES].id, "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_reverse_foreach(t, &entries[i], link) { + assert_c_eq(t->id, entries[(i+j+1) % NENTRIES].id, + "Element id mismatch"); + j++; + } + } + for (i = 0; i < NENTRIES; i++) { + t = qr_next(&entries[i], link); + assert_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id, + "Element id mismatch"); + } + for (i = 0; i < NENTRIES; i++) { + t = qr_prev(&entries[i], link); + assert_c_eq(t->id, entries[(i+1) % NENTRIES].id, + "Element id mismatch"); + } +} +TEST_END + +static void +test_split_entries(ring_t *entries) +{ + ring_t *t; + unsigned i, j; + + for (i = 0; i < NENTRIES; i++) { + j = 0; + qr_foreach(t, &entries[i], link) { + if (i < SPLIT_INDEX) { + assert_c_eq(t->id, + entries[(i+j) % SPLIT_INDEX].id, + "Element id mismatch"); + } else { + assert_c_eq(t->id, entries[(i+j-SPLIT_INDEX) % + (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id, + "Element id mismatch"); + } + j++; + } + } +} + +TEST_BEGIN(test_qr_meld_split) +{ + ring_t entries[NENTRIES]; + unsigned i; + + init_entries(entries); + for (i = 1; i < NENTRIES; i++) + qr_after_insert(&entries[i - 1], &entries[i], link); + + qr_split(&entries[0], &entries[SPLIT_INDEX], link); + test_split_entries(entries); + + qr_meld(&entries[0], &entries[SPLIT_INDEX], link); + test_entries_ring(entries); + + qr_meld(&entries[0], &entries[SPLIT_INDEX], link); + test_split_entries(entries); + + qr_split(&entries[0], &entries[SPLIT_INDEX], link); + test_entries_ring(entries); + + qr_split(&entries[0], &entries[0], link); + test_entries_ring(entries); + + qr_meld(&entries[0], &entries[0], link); + test_entries_ring(entries); +} +TEST_END + +int +main(void) +{ + + return (test( + test_qr_one, + test_qr_after_insert, + test_qr_remove, + test_qr_before_insert, + test_qr_meld_split)); +} diff --git a/redis.submodule/jemalloc/test/unit/quarantine.c b/redis.submodule/jemalloc/test/unit/quarantine.c new file mode 100644 index 0000000..bbd48a5 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/quarantine.c @@ -0,0 +1,108 @@ +#include "test/jemalloc_test.h" + +#define QUARANTINE_SIZE 8192 +#define STRINGIFY_HELPER(x) #x +#define STRINGIFY(x) STRINGIFY_HELPER(x) + +#ifdef JEMALLOC_FILL +const char *malloc_conf = "abort:false,junk:true,redzone:true,quarantine:" + STRINGIFY(QUARANTINE_SIZE); +#endif + +void +quarantine_clear(void) +{ + void *p; + + p = mallocx(QUARANTINE_SIZE*2, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + dallocx(p, 0); +} + +TEST_BEGIN(test_quarantine) +{ +#define SZ ZU(256) +#define NQUARANTINED (QUARANTINE_SIZE/SZ) + void *quarantined[NQUARANTINED+1]; + size_t i, j; + + test_skip_if(!config_fill); + + assert_zu_eq(nallocx(SZ, 0), SZ, + "SZ=%zu does not precisely equal a size class", SZ); + + quarantine_clear(); + + /* + * Allocate enough regions to completely fill the quarantine, plus one + * more. The last iteration occurs with a completely full quarantine, + * but no regions should be drained from the quarantine until the last + * deallocation occurs. Therefore no region recycling should occur + * until after this loop completes. + */ + for (i = 0; i < NQUARANTINED+1; i++) { + void *p = mallocx(SZ, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + quarantined[i] = p; + dallocx(p, 0); + for (j = 0; j < i; j++) { + assert_ptr_ne(p, quarantined[j], + "Quarantined region recycled too early; " + "i=%zu, j=%zu", i, j); + } + } +#undef NQUARANTINED +#undef SZ +} +TEST_END + +static bool detected_redzone_corruption; + +static void +arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, + size_t offset, uint8_t byte) +{ + + detected_redzone_corruption = true; +} + +TEST_BEGIN(test_quarantine_redzone) +{ + char *s; + arena_redzone_corruption_t *arena_redzone_corruption_orig; + + test_skip_if(!config_fill); + + arena_redzone_corruption_orig = arena_redzone_corruption; + arena_redzone_corruption = arena_redzone_corruption_replacement; + + /* Test underflow. */ + detected_redzone_corruption = false; + s = (char *)mallocx(1, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + s[-1] = 0xbb; + dallocx(s, 0); + assert_true(detected_redzone_corruption, + "Did not detect redzone corruption"); + + /* Test overflow. */ + detected_redzone_corruption = false; + s = (char *)mallocx(1, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + s[sallocx(s, 0)] = 0xbb; + dallocx(s, 0); + assert_true(detected_redzone_corruption, + "Did not detect redzone corruption"); + + arena_redzone_corruption = arena_redzone_corruption_orig; +} +TEST_END + +int +main(void) +{ + + return (test( + test_quarantine, + test_quarantine_redzone)); +} diff --git a/redis.submodule/jemalloc/test/unit/rb.c b/redis.submodule/jemalloc/test/unit/rb.c new file mode 100644 index 0000000..b38eb0e --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/rb.c @@ -0,0 +1,336 @@ +#include "test/jemalloc_test.h" + +#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ + a_type *rbp_bh_t; \ + for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \ + rbp_bh_t != &(a_rbt)->rbt_nil; \ + rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \ + if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ + (r_height)++; \ + } \ + } \ +} while (0) + +typedef struct node_s node_t; + +struct node_s { +#define NODE_MAGIC 0x9823af7e + uint32_t magic; + rb_node(node_t) link; + uint64_t key; +}; + +static int +node_cmp(node_t *a, node_t *b) { + int ret; + + assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); + assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); + + ret = (a->key > b->key) - (a->key < b->key); + if (ret == 0) { + /* + * Duplicates are not allowed in the tree, so force an + * arbitrary ordering for non-identical items with equal keys. + */ + ret = (((uintptr_t)a) > ((uintptr_t)b)) + - (((uintptr_t)a) < ((uintptr_t)b)); + } + return (ret); +} + +typedef rb_tree(node_t) tree_t; +rb_gen(static, tree_, tree_t, node_t, link, node_cmp); + +TEST_BEGIN(test_rb_empty) +{ + tree_t tree; + node_t key; + + tree_new(&tree); + + assert_true(tree_empty(&tree), "Tree should be empty"); + assert_ptr_null(tree_first(&tree), "Unexpected node"); + assert_ptr_null(tree_last(&tree), "Unexpected node"); + + key.key = 0; + key.magic = NODE_MAGIC; + assert_ptr_null(tree_search(&tree, &key), "Unexpected node"); + + key.key = 0; + key.magic = NODE_MAGIC; + assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node"); + + key.key = 0; + key.magic = NODE_MAGIC; + assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node"); +} +TEST_END + +static unsigned +tree_recurse(node_t *node, unsigned black_height, unsigned black_depth, + node_t *nil) +{ + unsigned ret = 0; + node_t *left_node = rbtn_left_get(node_t, link, node); + node_t *right_node = rbtn_right_get(node_t, link, node); + + if (!rbtn_red_get(node_t, link, node)) + black_depth++; + + /* Red nodes must be interleaved with black nodes. */ + if (rbtn_red_get(node_t, link, node)) { + assert_false(rbtn_red_get(node_t, link, left_node), + "Node should be black"); + assert_false(rbtn_red_get(node_t, link, right_node), + "Node should be black"); + } + + if (node == nil) + return (ret); + /* Self. */ + assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); + + /* Left subtree. */ + if (left_node != nil) + ret += tree_recurse(left_node, black_height, black_depth, nil); + else + ret += (black_depth != black_height); + + /* Right subtree. */ + if (right_node != nil) + ret += tree_recurse(right_node, black_height, black_depth, nil); + else + ret += (black_depth != black_height); + + return (ret); +} + +static node_t * +tree_iterate_cb(tree_t *tree, node_t *node, void *data) +{ + unsigned *i = (unsigned *)data; + node_t *search_node; + + assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); + + /* Test rb_search(). */ + search_node = tree_search(tree, node); + assert_ptr_eq(search_node, node, + "tree_search() returned unexpected node"); + + /* Test rb_nsearch(). */ + search_node = tree_nsearch(tree, node); + assert_ptr_eq(search_node, node, + "tree_nsearch() returned unexpected node"); + + /* Test rb_psearch(). */ + search_node = tree_psearch(tree, node); + assert_ptr_eq(search_node, node, + "tree_psearch() returned unexpected node"); + + (*i)++; + + return (NULL); +} + +static unsigned +tree_iterate(tree_t *tree) +{ + unsigned i; + + i = 0; + tree_iter(tree, NULL, tree_iterate_cb, (void *)&i); + + return (i); +} + +static unsigned +tree_iterate_reverse(tree_t *tree) +{ + unsigned i; + + i = 0; + tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i); + + return (i); +} + +static void +node_remove(tree_t *tree, node_t *node, unsigned nnodes) +{ + node_t *search_node; + unsigned black_height, imbalances; + + tree_remove(tree, node); + + /* Test rb_nsearch(). */ + search_node = tree_nsearch(tree, node); + if (search_node != NULL) { + assert_u64_ge(search_node->key, node->key, + "Key ordering error"); + } + + /* Test rb_psearch(). */ + search_node = tree_psearch(tree, node); + if (search_node != NULL) { + assert_u64_le(search_node->key, node->key, + "Key ordering error"); + } + + node->magic = 0; + + rbtn_black_height(node_t, link, tree, black_height); + imbalances = tree_recurse(tree->rbt_root, black_height, 0, + &(tree->rbt_nil)); + assert_u_eq(imbalances, 0, "Tree is unbalanced"); + assert_u_eq(tree_iterate(tree), nnodes-1, + "Unexpected node iteration count"); + assert_u_eq(tree_iterate_reverse(tree), nnodes-1, + "Unexpected node iteration count"); +} + +static node_t * +remove_iterate_cb(tree_t *tree, node_t *node, void *data) +{ + unsigned *nnodes = (unsigned *)data; + node_t *ret = tree_next(tree, node); + + node_remove(tree, node, *nnodes); + + return (ret); +} + +static node_t * +remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) +{ + unsigned *nnodes = (unsigned *)data; + node_t *ret = tree_prev(tree, node); + + node_remove(tree, node, *nnodes); + + return (ret); +} + +TEST_BEGIN(test_rb_random) +{ +#define NNODES 25 +#define NBAGS 250 +#define SEED 42 + sfmt_t *sfmt; + uint64_t bag[NNODES]; + tree_t tree; + node_t nodes[NNODES]; + unsigned i, j, k, black_height, imbalances; + + sfmt = init_gen_rand(SEED); + for (i = 0; i < NBAGS; i++) { + switch (i) { + case 0: + /* Insert in order. */ + for (j = 0; j < NNODES; j++) + bag[j] = j; + break; + case 1: + /* Insert in reverse order. */ + for (j = 0; j < NNODES; j++) + bag[j] = NNODES - j - 1; + break; + default: + for (j = 0; j < NNODES; j++) + bag[j] = gen_rand64_range(sfmt, NNODES); + } + + for (j = 1; j <= NNODES; j++) { + /* Initialize tree and nodes. */ + tree_new(&tree); + tree.rbt_nil.magic = 0; + for (k = 0; k < j; k++) { + nodes[k].magic = NODE_MAGIC; + nodes[k].key = bag[k]; + } + + /* Insert nodes. */ + for (k = 0; k < j; k++) { + tree_insert(&tree, &nodes[k]); + + rbtn_black_height(node_t, link, &tree, + black_height); + imbalances = tree_recurse(tree.rbt_root, + black_height, 0, &(tree.rbt_nil)); + assert_u_eq(imbalances, 0, + "Tree is unbalanced"); + + assert_u_eq(tree_iterate(&tree), k+1, + "Unexpected node iteration count"); + assert_u_eq(tree_iterate_reverse(&tree), k+1, + "Unexpected node iteration count"); + + assert_false(tree_empty(&tree), + "Tree should not be empty"); + assert_ptr_not_null(tree_first(&tree), + "Tree should not be empty"); + assert_ptr_not_null(tree_last(&tree), + "Tree should not be empty"); + + tree_next(&tree, &nodes[k]); + tree_prev(&tree, &nodes[k]); + } + + /* Remove nodes. */ + switch (i % 4) { + case 0: + for (k = 0; k < j; k++) + node_remove(&tree, &nodes[k], j - k); + break; + case 1: + for (k = j; k > 0; k--) + node_remove(&tree, &nodes[k-1], k); + break; + case 2: { + node_t *start; + unsigned nnodes = j; + + start = NULL; + do { + start = tree_iter(&tree, start, + remove_iterate_cb, (void *)&nnodes); + nnodes--; + } while (start != NULL); + assert_u_eq(nnodes, 0, + "Removal terminated early"); + break; + } case 3: { + node_t *start; + unsigned nnodes = j; + + start = NULL; + do { + start = tree_reverse_iter(&tree, start, + remove_reverse_iterate_cb, + (void *)&nnodes); + nnodes--; + } while (start != NULL); + assert_u_eq(nnodes, 0, + "Removal terminated early"); + break; + } default: + not_reached(); + } + } + } + fini_gen_rand(sfmt); +#undef NNODES +#undef NBAGS +#undef SEED +} +TEST_END + +int +main(void) +{ + + return (test( + test_rb_empty, + test_rb_random)); +} diff --git a/redis.submodule/jemalloc/test/unit/rtree.c b/redis.submodule/jemalloc/test/unit/rtree.c new file mode 100644 index 0000000..b54b3e8 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/rtree.c @@ -0,0 +1,151 @@ +#include "test/jemalloc_test.h" + +static rtree_node_elm_t * +node_alloc(size_t nelms) +{ + + return ((rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t))); +} + +static void +node_dalloc(rtree_node_elm_t *node) +{ + + free(node); +} + +TEST_BEGIN(test_rtree_get_empty) +{ + unsigned i; + + for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { + rtree_t rtree; + assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + "Unexpected rtree_new() failure"); + assert_ptr_null(rtree_get(&rtree, 0, false), + "rtree_get() should return NULL for empty tree"); + rtree_delete(&rtree); + } +} +TEST_END + +TEST_BEGIN(test_rtree_extrema) +{ + unsigned i; + extent_node_t node_a, node_b; + + for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { + rtree_t rtree; + assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + "Unexpected rtree_new() failure"); + + assert_false(rtree_set(&rtree, 0, &node_a), + "Unexpected rtree_set() failure"); + assert_ptr_eq(rtree_get(&rtree, 0, true), &node_a, + "rtree_get() should return previously set value"); + + assert_false(rtree_set(&rtree, ~((uintptr_t)0), &node_b), + "Unexpected rtree_set() failure"); + assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true), &node_b, + "rtree_get() should return previously set value"); + + rtree_delete(&rtree); + } +} +TEST_END + +TEST_BEGIN(test_rtree_bits) +{ + unsigned i, j, k; + + for (i = 1; i < (sizeof(uintptr_t) << 3); i++) { + uintptr_t keys[] = {0, 1, + (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1}; + extent_node_t node; + rtree_t rtree; + + assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + "Unexpected rtree_new() failure"); + + for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { + assert_false(rtree_set(&rtree, keys[j], &node), + "Unexpected rtree_set() failure"); + for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) { + assert_ptr_eq(rtree_get(&rtree, keys[k], true), + &node, "rtree_get() should return " + "previously set value and ignore " + "insignificant key bits; i=%u, j=%u, k=%u, " + "set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, + j, k, keys[j], keys[k]); + } + assert_ptr_null(rtree_get(&rtree, + (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)), false), + "Only leftmost rtree leaf should be set; " + "i=%u, j=%u", i, j); + assert_false(rtree_set(&rtree, keys[j], NULL), + "Unexpected rtree_set() failure"); + } + + rtree_delete(&rtree); + } +} +TEST_END + +TEST_BEGIN(test_rtree_random) +{ + unsigned i; + sfmt_t *sfmt; +#define NSET 16 +#define SEED 42 + + sfmt = init_gen_rand(SEED); + for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { + uintptr_t keys[NSET]; + extent_node_t node; + unsigned j; + rtree_t rtree; + + assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + "Unexpected rtree_new() failure"); + + for (j = 0; j < NSET; j++) { + keys[j] = (uintptr_t)gen_rand64(sfmt); + assert_false(rtree_set(&rtree, keys[j], &node), + "Unexpected rtree_set() failure"); + assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node, + "rtree_get() should return previously set value"); + } + for (j = 0; j < NSET; j++) { + assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node, + "rtree_get() should return previously set value"); + } + + for (j = 0; j < NSET; j++) { + assert_false(rtree_set(&rtree, keys[j], NULL), + "Unexpected rtree_set() failure"); + assert_ptr_null(rtree_get(&rtree, keys[j], true), + "rtree_get() should return previously set value"); + } + for (j = 0; j < NSET; j++) { + assert_ptr_null(rtree_get(&rtree, keys[j], true), + "rtree_get() should return previously set value"); + } + + rtree_delete(&rtree); + } + fini_gen_rand(sfmt); +#undef NSET +#undef SEED +} +TEST_END + +int +main(void) +{ + + return (test( + test_rtree_get_empty, + test_rtree_extrema, + test_rtree_bits, + test_rtree_random)); +} diff --git a/redis.submodule/jemalloc/test/unit/size_classes.c b/redis.submodule/jemalloc/test/unit/size_classes.c new file mode 100644 index 0000000..d3aaebd --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/size_classes.c @@ -0,0 +1,89 @@ +#include "test/jemalloc_test.h" + +static size_t +get_max_size_class(void) +{ + unsigned nhchunks; + size_t mib[4]; + size_t sz, miblen, max_size_class; + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, + "Unexpected mallctl() error"); + + miblen = sizeof(mib) / sizeof(size_t); + assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib() error"); + mib[2] = nhchunks - 1; + + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() error"); + + return (max_size_class); +} + +TEST_BEGIN(test_size_classes) +{ + size_t size_class, max_size_class; + szind_t index, max_index; + + max_size_class = get_max_size_class(); + max_index = size2index(max_size_class); + + for (index = 0, size_class = index2size(index); index < max_index || + size_class < max_size_class; index++, size_class = + index2size(index)) { + assert_true(index < max_index, + "Loop conditionals should be equivalent; index=%u, " + "size_class=%zu (%#zx)", index, size_class, size_class); + assert_true(size_class < max_size_class, + "Loop conditionals should be equivalent; index=%u, " + "size_class=%zu (%#zx)", index, size_class, size_class); + + assert_u_eq(index, size2index(size_class), + "size2index() does not reverse index2size(): index=%u -->" + " size_class=%zu --> index=%u --> size_class=%zu", index, + size_class, size2index(size_class), + index2size(size2index(size_class))); + assert_zu_eq(size_class, index2size(size2index(size_class)), + "index2size() does not reverse size2index(): index=%u -->" + " size_class=%zu --> index=%u --> size_class=%zu", index, + size_class, size2index(size_class), + index2size(size2index(size_class))); + + assert_u_eq(index+1, size2index(size_class+1), + "Next size_class does not round up properly"); + + assert_zu_eq(size_class, (index > 0) ? + s2u(index2size(index-1)+1) : s2u(1), + "s2u() does not round up to size class"); + assert_zu_eq(size_class, s2u(size_class-1), + "s2u() does not round up to size class"); + assert_zu_eq(size_class, s2u(size_class), + "s2u() does not compute same size class"); + assert_zu_eq(s2u(size_class+1), index2size(index+1), + "s2u() does not round up to next size class"); + } + + assert_u_eq(index, size2index(index2size(index)), + "size2index() does not reverse index2size()"); + assert_zu_eq(max_size_class, index2size(size2index(max_size_class)), + "index2size() does not reverse size2index()"); + + assert_zu_eq(size_class, s2u(index2size(index-1)+1), + "s2u() does not round up to size class"); + assert_zu_eq(size_class, s2u(size_class-1), + "s2u() does not round up to size class"); + assert_zu_eq(size_class, s2u(size_class), + "s2u() does not compute same size class"); +} +TEST_END + +int +main(void) +{ + + return (test( + test_size_classes)); +} diff --git a/redis.submodule/jemalloc/test/unit/stats.c b/redis.submodule/jemalloc/test/unit/stats.c new file mode 100644 index 0000000..8e4bc63 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/stats.c @@ -0,0 +1,447 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_stats_summary) +{ + size_t *cactive; + size_t sz, allocated, active, resident, mapped; + int expected = config_stats ? 0 : ENOENT; + + sz = sizeof(cactive); + assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.resident", &resident, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_le(active, *cactive, + "active should be no larger than cactive"); + assert_zu_le(allocated, active, + "allocated should be no larger than active"); + assert_zu_lt(active, resident, + "active should be less than resident"); + assert_zu_lt(active, mapped, + "active should be less than mapped"); + } +} +TEST_END + +TEST_BEGIN(test_stats_huge) +{ + void *p; + uint64_t epoch; + size_t allocated; + uint64_t nmalloc, ndalloc, nrequests; + size_t sz; + int expected = config_stats ? 0 : ENOENT; + + p = mallocx(large_maxclass+1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL, + 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL, + 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_le(nmalloc, nrequests, + "nmalloc should no larger than nrequests"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_summary) +{ + unsigned arena; + void *little, *large, *huge; + uint64_t epoch; + size_t sz; + int expected = config_stats ? 0 : ENOENT; + size_t mapped; + uint64_t npurge, nmadvise, purged; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + little = mallocx(SMALL_MAXCLASS, 0); + assert_ptr_not_null(little, "Unexpected mallocx() failure"); + large = mallocx(large_maxclass, 0); + assert_ptr_not_null(large, "Unexpected mallocx() failure"); + huge = mallocx(chunksize, 0); + assert_ptr_not_null(huge, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0), + expected, "Unexepected mallctl() result"); + + if (config_stats) { + assert_u64_gt(npurge, 0, + "At least one purge should have occurred"); + assert_u64_le(nmadvise, purged, + "nmadvise should be no greater than purged"); + } + + dallocx(little, 0); + dallocx(large, 0); + dallocx(huge, 0); +} +TEST_END + +void * +thd_start(void *arg) +{ + + return (NULL); +} + +static void +no_lazy_lock(void) +{ + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} + +TEST_BEGIN(test_stats_arenas_small) +{ + unsigned arena; + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc, nrequests; + int expected = config_stats ? 0 : ENOENT; + + no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(SMALL_MAXCLASS, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_gt(nmalloc, 0, + "nmalloc should be no greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_large) +{ + unsigned arena; + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc, nrequests; + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(large_maxclass, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_zu_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_zu_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_zu_gt(nrequests, 0, + "nrequests should be greater than zero"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_huge) +{ + unsigned arena; + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc; + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(chunksize, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_zu_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_zu_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_bins) +{ + unsigned arena; + void *p; + size_t sz, curruns, curregs; + uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nruns, nreruns; + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(arena_bin_info[0].reg_size, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz, + NULL, 0), config_tcache ? expected : ENOENT, + "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz, + NULL, 0), config_tcache ? expected : ENOENT, + "Unexpected mallctl() result"); + + assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + assert_zu_gt(curregs, 0, + "allocated should be greater than zero"); + if (config_tcache) { + assert_u64_gt(nfills, 0, + "At least one fill should have occurred"); + assert_u64_gt(nflushes, 0, + "At least one flush should have occurred"); + } + assert_u64_gt(nruns, 0, + "At least one run should have been allocated"); + assert_zu_gt(curruns, 0, + "At least one run should be currently allocated"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_lruns) +{ + unsigned arena; + void *p; + uint64_t epoch, nmalloc, ndalloc, nrequests; + size_t curruns, sz; + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(LARGE_MINCLASS, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + assert_u64_gt(curruns, 0, + "At least one run should be currently allocated"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_hchunks) +{ + unsigned arena; + void *p; + uint64_t epoch, nmalloc, ndalloc; + size_t curhchunks, sz; + int expected = config_stats ? 0 : ENOENT; + + arena = 0; + assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), + 0, "Unexpected mallctl() failure"); + + p = mallocx(chunksize, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz, + NULL, 0), expected, "Unexpected mallctl() result"); + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(curhchunks, 0, + "At least one chunk should be currently allocated"); + } + + dallocx(p, 0); +} +TEST_END + +int +main(void) +{ + + return (test( + test_stats_summary, + test_stats_huge, + test_stats_arenas_summary, + test_stats_arenas_small, + test_stats_arenas_large, + test_stats_arenas_huge, + test_stats_arenas_bins, + test_stats_arenas_lruns, + test_stats_arenas_hchunks)); +} diff --git a/redis.submodule/jemalloc/test/unit/tsd.c b/redis.submodule/jemalloc/test/unit/tsd.c new file mode 100644 index 0000000..8be787f --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/tsd.c @@ -0,0 +1,107 @@ +#include "test/jemalloc_test.h" + +#define THREAD_DATA 0x72b65c10 + +typedef unsigned int data_t; + +static bool data_cleanup_executed; + +malloc_tsd_types(data_, data_t) +malloc_tsd_protos(, data_, data_t) + +void +data_cleanup(void *arg) +{ + data_t *data = (data_t *)arg; + + if (!data_cleanup_executed) { + assert_x_eq(*data, THREAD_DATA, + "Argument passed into cleanup function should match tsd " + "value"); + } + data_cleanup_executed = true; + + /* + * Allocate during cleanup for two rounds, in order to assure that + * jemalloc's internal tsd reinitialization happens. + */ + switch (*data) { + case THREAD_DATA: + *data = 1; + data_tsd_set(data); + break; + case 1: + *data = 2; + data_tsd_set(data); + break; + case 2: + return; + default: + not_reached(); + } + + { + void *p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpeced mallocx() failure"); + dallocx(p, 0); + } +} + +malloc_tsd_externs(data_, data_t) +#define DATA_INIT 0x12345678 +malloc_tsd_data(, data_, data_t, DATA_INIT) +malloc_tsd_funcs(, data_, data_t, DATA_INIT, data_cleanup) + +static void * +thd_start(void *arg) +{ + data_t d = (data_t)(uintptr_t)arg; + void *p; + + assert_x_eq(*data_tsd_get(), DATA_INIT, + "Initial tsd get should return initialization value"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + + data_tsd_set(&d); + assert_x_eq(*data_tsd_get(), d, + "After tsd set, tsd get should return value that was set"); + + d = 0; + assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg, + "Resetting local data should have no effect on tsd"); + + free(p); + return (NULL); +} + +TEST_BEGIN(test_tsd_main_thread) +{ + + thd_start((void *) 0xa5f3e329); +} +TEST_END + +TEST_BEGIN(test_tsd_sub_thread) +{ + thd_t thd; + + data_cleanup_executed = false; + thd_create(&thd, thd_start, (void *)THREAD_DATA); + thd_join(thd, NULL); + assert_true(data_cleanup_executed, + "Cleanup function should have executed"); +} +TEST_END + +int +main(void) +{ + + data_tsd_boot(); + + return (test( + test_tsd_main_thread, + test_tsd_sub_thread)); +} diff --git a/redis.submodule/jemalloc/test/unit/util.c b/redis.submodule/jemalloc/test/unit/util.c new file mode 100644 index 0000000..8ab39a4 --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/util.c @@ -0,0 +1,294 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_pow2_ceil) +{ + unsigned i, pow2; + size_t x; + + assert_zu_eq(pow2_ceil(0), 0, "Unexpected result"); + + for (i = 0; i < sizeof(size_t) * 8; i++) { + assert_zu_eq(pow2_ceil(ZU(1) << i), ZU(1) << i, + "Unexpected result"); + } + + for (i = 2; i < sizeof(size_t) * 8; i++) { + assert_zu_eq(pow2_ceil((ZU(1) << i) - 1), ZU(1) << i, + "Unexpected result"); + } + + for (i = 0; i < sizeof(size_t) * 8 - 1; i++) { + assert_zu_eq(pow2_ceil((ZU(1) << i) + 1), ZU(1) << (i+1), + "Unexpected result"); + } + + for (pow2 = 1; pow2 < 25; pow2++) { + for (x = (ZU(1) << (pow2-1)) + 1; x <= ZU(1) << pow2; x++) { + assert_zu_eq(pow2_ceil(x), ZU(1) << pow2, + "Unexpected result, x=%zu", x); + } + } +} +TEST_END + +TEST_BEGIN(test_malloc_strtoumax_no_endptr) +{ + int err; + + set_errno(0); + assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result"); + err = get_errno(); + assert_d_eq(err, 0, "Unexpected failure"); +} +TEST_END + +TEST_BEGIN(test_malloc_strtoumax) +{ + struct test_s { + const char *input; + const char *expected_remainder; + int base; + int expected_errno; + const char *expected_errno_name; + uintmax_t expected_x; + }; +#define ERR(e) e, #e +#define KUMAX(x) ((uintmax_t)x##ULL) + struct test_s tests[] = { + {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, + {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, + {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, + + {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, + {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, + {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, + {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, + + {"42", "", 0, ERR(0), KUMAX(42)}, + {"+42", "", 0, ERR(0), KUMAX(42)}, + {"-42", "", 0, ERR(0), KUMAX(-42)}, + {"042", "", 0, ERR(0), KUMAX(042)}, + {"+042", "", 0, ERR(0), KUMAX(042)}, + {"-042", "", 0, ERR(0), KUMAX(-042)}, + {"0x42", "", 0, ERR(0), KUMAX(0x42)}, + {"+0x42", "", 0, ERR(0), KUMAX(0x42)}, + {"-0x42", "", 0, ERR(0), KUMAX(-0x42)}, + + {"0", "", 0, ERR(0), KUMAX(0)}, + {"1", "", 0, ERR(0), KUMAX(1)}, + + {"42", "", 0, ERR(0), KUMAX(42)}, + {" 42", "", 0, ERR(0), KUMAX(42)}, + {"42 ", " ", 0, ERR(0), KUMAX(42)}, + {"0x", "x", 0, ERR(0), KUMAX(0)}, + {"42x", "x", 0, ERR(0), KUMAX(42)}, + + {"07", "", 0, ERR(0), KUMAX(7)}, + {"010", "", 0, ERR(0), KUMAX(8)}, + {"08", "8", 0, ERR(0), KUMAX(0)}, + {"0_", "_", 0, ERR(0), KUMAX(0)}, + + {"0x", "x", 0, ERR(0), KUMAX(0)}, + {"0X", "X", 0, ERR(0), KUMAX(0)}, + {"0xg", "xg", 0, ERR(0), KUMAX(0)}, + {"0XA", "", 0, ERR(0), KUMAX(10)}, + + {"010", "", 10, ERR(0), KUMAX(10)}, + {"0x3", "x3", 10, ERR(0), KUMAX(0)}, + + {"12", "2", 2, ERR(0), KUMAX(1)}, + {"78", "8", 8, ERR(0), KUMAX(7)}, + {"9a", "a", 10, ERR(0), KUMAX(9)}, + {"9A", "A", 10, ERR(0), KUMAX(9)}, + {"fg", "g", 16, ERR(0), KUMAX(15)}, + {"FG", "G", 16, ERR(0), KUMAX(15)}, + {"0xfg", "g", 16, ERR(0), KUMAX(15)}, + {"0XFG", "G", 16, ERR(0), KUMAX(15)}, + {"z_", "_", 36, ERR(0), KUMAX(35)}, + {"Z_", "_", 36, ERR(0), KUMAX(35)} + }; +#undef ERR +#undef KUMAX + unsigned i; + + for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { + struct test_s *test = &tests[i]; + int err; + uintmax_t result; + char *remainder; + + set_errno(0); + result = malloc_strtoumax(test->input, &remainder, test->base); + err = get_errno(); + assert_d_eq(err, test->expected_errno, + "Expected errno %s for \"%s\", base %d", + test->expected_errno_name, test->input, test->base); + assert_str_eq(remainder, test->expected_remainder, + "Unexpected remainder for \"%s\", base %d", + test->input, test->base); + if (err == 0) { + assert_ju_eq(result, test->expected_x, + "Unexpected result for \"%s\", base %d", + test->input, test->base); + } + } +} +TEST_END + +TEST_BEGIN(test_malloc_snprintf_truncated) +{ +#define BUFLEN 15 + char buf[BUFLEN]; + int result; + size_t len; +#define TEST(expected_str_untruncated, ...) do { \ + result = malloc_snprintf(buf, len, __VA_ARGS__); \ + assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ + "Unexpected string inequality (\"%s\" vs \"%s\")", \ + buf, expected_str_untruncated); \ + assert_d_eq(result, strlen(expected_str_untruncated), \ + "Unexpected result"); \ +} while (0) + + for (len = 1; len < BUFLEN; len++) { + TEST("012346789", "012346789"); + TEST("a0123b", "a%sb", "0123"); + TEST("a01234567", "a%s%s", "0123", "4567"); + TEST("a0123 ", "a%-6s", "0123"); + TEST("a 0123", "a%6s", "0123"); + TEST("a 012", "a%6.3s", "0123"); + TEST("a 012", "a%*.*s", 6, 3, "0123"); + TEST("a 123b", "a% db", 123); + TEST("a123b", "a%-db", 123); + TEST("a-123b", "a%-db", -123); + TEST("a+123b", "a%+db", 123); + } +#undef BUFLEN +#undef TEST +} +TEST_END + +TEST_BEGIN(test_malloc_snprintf) +{ +#define BUFLEN 128 + char buf[BUFLEN]; + int result; +#define TEST(expected_str, ...) do { \ + result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ + assert_str_eq(buf, expected_str, "Unexpected output"); \ + assert_d_eq(result, strlen(expected_str), "Unexpected result"); \ +} while (0) + + TEST("hello", "hello"); + + TEST("50%, 100%", "50%%, %d%%", 100); + + TEST("a0123b", "a%sb", "0123"); + + TEST("a 0123b", "a%5sb", "0123"); + TEST("a 0123b", "a%*sb", 5, "0123"); + + TEST("a0123 b", "a%-5sb", "0123"); + TEST("a0123b", "a%*sb", -1, "0123"); + TEST("a0123 b", "a%*sb", -5, "0123"); + TEST("a0123 b", "a%-*sb", -5, "0123"); + + TEST("a012b", "a%.3sb", "0123"); + TEST("a012b", "a%.*sb", 3, "0123"); + TEST("a0123b", "a%.*sb", -3, "0123"); + + TEST("a 012b", "a%5.3sb", "0123"); + TEST("a 012b", "a%5.*sb", 3, "0123"); + TEST("a 012b", "a%*.3sb", 5, "0123"); + TEST("a 012b", "a%*.*sb", 5, 3, "0123"); + TEST("a 0123b", "a%*.*sb", 5, -3, "0123"); + + TEST("_abcd_", "_%x_", 0xabcd); + TEST("_0xabcd_", "_%#x_", 0xabcd); + TEST("_1234_", "_%o_", 01234); + TEST("_01234_", "_%#o_", 01234); + TEST("_1234_", "_%u_", 1234); + + TEST("_1234_", "_%d_", 1234); + TEST("_ 1234_", "_% d_", 1234); + TEST("_+1234_", "_%+d_", 1234); + TEST("_-1234_", "_%d_", -1234); + TEST("_-1234_", "_% d_", -1234); + TEST("_-1234_", "_%+d_", -1234); + + TEST("_-1234_", "_%d_", -1234); + TEST("_1234_", "_%d_", 1234); + TEST("_-1234_", "_%i_", -1234); + TEST("_1234_", "_%i_", 1234); + TEST("_01234_", "_%#o_", 01234); + TEST("_1234_", "_%u_", 1234); + TEST("_0x1234abc_", "_%#x_", 0x1234abc); + TEST("_0X1234ABC_", "_%#X_", 0x1234abc); + TEST("_c_", "_%c_", 'c'); + TEST("_string_", "_%s_", "string"); + TEST("_0x42_", "_%p_", ((void *)0x42)); + + TEST("_-1234_", "_%ld_", ((long)-1234)); + TEST("_1234_", "_%ld_", ((long)1234)); + TEST("_-1234_", "_%li_", ((long)-1234)); + TEST("_1234_", "_%li_", ((long)1234)); + TEST("_01234_", "_%#lo_", ((long)01234)); + TEST("_1234_", "_%lu_", ((long)1234)); + TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc)); + TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC)); + + TEST("_-1234_", "_%lld_", ((long long)-1234)); + TEST("_1234_", "_%lld_", ((long long)1234)); + TEST("_-1234_", "_%lli_", ((long long)-1234)); + TEST("_1234_", "_%lli_", ((long long)1234)); + TEST("_01234_", "_%#llo_", ((long long)01234)); + TEST("_1234_", "_%llu_", ((long long)1234)); + TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc)); + TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC)); + + TEST("_-1234_", "_%qd_", ((long long)-1234)); + TEST("_1234_", "_%qd_", ((long long)1234)); + TEST("_-1234_", "_%qi_", ((long long)-1234)); + TEST("_1234_", "_%qi_", ((long long)1234)); + TEST("_01234_", "_%#qo_", ((long long)01234)); + TEST("_1234_", "_%qu_", ((long long)1234)); + TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc)); + TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC)); + + TEST("_-1234_", "_%jd_", ((intmax_t)-1234)); + TEST("_1234_", "_%jd_", ((intmax_t)1234)); + TEST("_-1234_", "_%ji_", ((intmax_t)-1234)); + TEST("_1234_", "_%ji_", ((intmax_t)1234)); + TEST("_01234_", "_%#jo_", ((intmax_t)01234)); + TEST("_1234_", "_%ju_", ((intmax_t)1234)); + TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc)); + TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC)); + + TEST("_1234_", "_%td_", ((ptrdiff_t)1234)); + TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234)); + TEST("_1234_", "_%ti_", ((ptrdiff_t)1234)); + TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234)); + + TEST("_-1234_", "_%zd_", ((ssize_t)-1234)); + TEST("_1234_", "_%zd_", ((ssize_t)1234)); + TEST("_-1234_", "_%zi_", ((ssize_t)-1234)); + TEST("_1234_", "_%zi_", ((ssize_t)1234)); + TEST("_01234_", "_%#zo_", ((ssize_t)01234)); + TEST("_1234_", "_%zu_", ((ssize_t)1234)); + TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc)); + TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC)); +#undef BUFLEN +} +TEST_END + +int +main(void) +{ + + return (test( + test_pow2_ceil, + test_malloc_strtoumax_no_endptr, + test_malloc_strtoumax, + test_malloc_snprintf_truncated, + test_malloc_snprintf)); +} diff --git a/redis.submodule/jemalloc/test/unit/zero.c b/redis.submodule/jemalloc/test/unit/zero.c new file mode 100644 index 0000000..93afc2b --- /dev/null +++ b/redis.submodule/jemalloc/test/unit/zero.c @@ -0,0 +1,78 @@ +#include "test/jemalloc_test.h" + +#ifdef JEMALLOC_FILL +const char *malloc_conf = + "abort:false,junk:false,zero:true,redzone:false,quarantine:0"; +#endif + +static void +test_zero(size_t sz_min, size_t sz_max) +{ + char *s; + size_t sz_prev, sz, i; + + sz_prev = 0; + s = (char *)mallocx(sz_min, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + + for (sz = sallocx(s, 0); sz <= sz_max; + sz_prev = sz, sz = sallocx(s, 0)) { + if (sz_prev > 0) { + assert_c_eq(s[0], 'a', + "Previously allocated byte %zu/%zu is corrupted", + ZU(0), sz_prev); + assert_c_eq(s[sz_prev-1], 'a', + "Previously allocated byte %zu/%zu is corrupted", + sz_prev-1, sz_prev); + } + + for (i = sz_prev; i < sz; i++) { + assert_c_eq(s[i], 0x0, + "Newly allocated byte %zu/%zu isn't zero-filled", + i, sz); + s[i] = 'a'; + } + + if (xallocx(s, sz+1, 0, 0) == sz) { + s = (char *)rallocx(s, sz+1, 0); + assert_ptr_not_null((void *)s, + "Unexpected rallocx() failure"); + } + } + + dallocx(s, 0); +} + +TEST_BEGIN(test_zero_small) +{ + + test_skip_if(!config_fill); + test_zero(1, SMALL_MAXCLASS-1); +} +TEST_END + +TEST_BEGIN(test_zero_large) +{ + + test_skip_if(!config_fill); + test_zero(SMALL_MAXCLASS+1, large_maxclass); +} +TEST_END + +TEST_BEGIN(test_zero_huge) +{ + + test_skip_if(!config_fill); + test_zero(large_maxclass+1, chunksize*2); +} +TEST_END + +int +main(void) +{ + + return (test( + test_zero_small, + test_zero_large, + test_zero_huge)); +} diff --git a/redis.submodule/runtest-cluster b/redis.submodule/runtest-cluster new file mode 100755 index 0000000..27829a5 --- /dev/null +++ b/redis.submodule/runtest-cluster @@ -0,0 +1,14 @@ +#!/bin/sh +TCL_VERSIONS="8.5 8.6" +TCLSH="" + +for VERSION in $TCL_VERSIONS; do + TCL=`which tclsh$VERSION 2>/dev/null` && TCLSH=$TCL +done + +if [ -z $TCLSH ] +then + echo "You need tcl 8.5 or newer in order to run the Redis Sentinel test" + exit 1 +fi +$TCLSH tests/cluster/run.tcl $* diff --git a/redis.submodule/src/blocked.c b/redis.submodule/src/blocked.c new file mode 100644 index 0000000..8bab5cc --- /dev/null +++ b/redis.submodule/src/blocked.c @@ -0,0 +1,183 @@ +/* blocked.c - generic support for blocking operations like BLPOP & WAIT. + * + * Copyright (c) 2009-2012, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * --------------------------------------------------------------------------- + * + * API: + * + * getTimeoutFromObjectOrReply() is just an utility function to parse a + * timeout argument since blocking operations usually require a timeout. + * + * blockClient() set the REDIS_BLOCKED flag in the client, and set the + * specified block type 'btype' filed to one of REDIS_BLOCKED_* macros. + * + * unblockClient() unblocks the client doing the following: + * 1) It calls the btype-specific function to cleanup the state. + * 2) It unblocks the client by unsetting the REDIS_BLOCKED flag. + * 3) It puts the client into a list of just unblocked clients that are + * processed ASAP in the beforeSleep() event loop callback, so that + * if there is some query buffer to process, we do it. This is also + * required because otherwise there is no 'readable' event fired, we + * already read the pending commands. We also set the REDIS_UNBLOCKED + * flag to remember the client is in the unblocked_clients list. + * + * processUnblockedClients() is called inside the beforeSleep() function + * to process the query buffer from unblocked clients and remove the clients + * from the blocked_clients queue. + * + * replyToBlockedClientTimedOut() is called by the cron function when + * a client blocked reaches the specified timeout (if the timeout is set + * to 0, no timeout is processed). + * It usually just needs to send a reply to the client. + * + * When implementing a new type of blocking opeation, the implementation + * should modify unblockClient() and replyToBlockedClientTimedOut() in order + * to handle the btype-specific behavior of this two functions. + * If the blocking operation waits for certain keys to change state, the + * clusterRedirectBlockedClientIfNeeded() function should also be updated. + */ + +#include "redis.h" + +/* Get a timeout value from an object and store it into 'timeout'. + * The final timeout is always stored as milliseconds as a time where the + * timeout will expire, however the parsing is performed according to + * the 'unit' that can be seconds or milliseconds. + * + * Note that if the timeout is zero (usually from the point of view of + * commands API this means no timeout) the value stored into 'timeout' + * is zero. */ +int getTimeoutFromObjectOrReply(redisClient *c, robj *object, mstime_t *timeout, int unit) { + long long tval; + + if (getLongLongFromObjectOrReply(c,object,&tval, + "timeout is not an integer or out of range") != REDIS_OK) + return REDIS_ERR; + + if (tval < 0) { + addReplyError(c,"timeout is negative"); + return REDIS_ERR; + } + + if (tval > 0) { + if (unit == UNIT_SECONDS) tval *= 1000; + tval += mstime(); + } + *timeout = tval; + + return REDIS_OK; +} + +/* Block a client for the specific operation type. Once the REDIS_BLOCKED + * flag is set client query buffer is not longer processed, but accumulated, + * and will be processed when the client is unblocked. */ +void blockClient(redisClient *c, int btype) { + c->flags |= REDIS_BLOCKED; + c->btype = btype; + server.bpop_blocked_clients++; +} + +/* This function is called in the beforeSleep() function of the event loop + * in order to process the pending input buffer of clients that were + * unblocked after a blocking operation. */ +void processUnblockedClients(void) { + listNode *ln; + redisClient *c; + + while (listLength(server.unblocked_clients)) { + ln = listFirst(server.unblocked_clients); + redisAssert(ln != NULL); + c = ln->value; + listDelNode(server.unblocked_clients,ln); + c->flags &= ~REDIS_UNBLOCKED; + + /* Process remaining data in the input buffer. */ + if (c->querybuf && sdslen(c->querybuf) > 0) { + server.current_client = c; + processInputBuffer(c); + server.current_client = NULL; + } + } +} + +/* Unblock a client calling the right function depending on the kind + * of operation the client is blocking for. */ +void unblockClient(redisClient *c) { + if (c->btype == REDIS_BLOCKED_LIST) { + unblockClientWaitingData(c); + } else if (c->btype == REDIS_BLOCKED_WAIT) { + unblockClientWaitingReplicas(c); + } else { + redisPanic("Unknown btype in unblockClient()."); + } + /* Clear the flags, and put the client in the unblocked list so that + * we'll process new commands in its query buffer ASAP. */ + c->flags &= ~REDIS_BLOCKED; + c->flags |= REDIS_UNBLOCKED; + c->btype = REDIS_BLOCKED_NONE; + server.bpop_blocked_clients--; + listAddNodeTail(server.unblocked_clients,c); +} + +/* This function gets called when a blocked client timed out in order to + * send it a reply of some kind. */ +void replyToBlockedClientTimedOut(redisClient *c) { + if (c->btype == REDIS_BLOCKED_LIST) { + addReply(c,shared.nullmultibulk); + } else if (c->btype == REDIS_BLOCKED_WAIT) { + addReplyLongLong(c,replicationCountAcksByOffset(c->bpop.reploffset)); + } else { + redisPanic("Unknown btype in replyToBlockedClientTimedOut()."); + } +} + +/* Mass-unblock clients because something changed in the instance that makes + * blocking no longer safe. For example clients blocked in list operations + * in an instance which turns from master to slave is unsafe, so this function + * is called when a master turns into a slave. + * + * The semantics is to send an -UNBLOCKED error to the client, disconnecting + * it at the same time. */ +void disconnectAllBlockedClients(void) { + listNode *ln; + listIter li; + + listRewind(server.clients,&li); + while((ln = listNext(&li))) { + redisClient *c = listNodeValue(ln); + + if (c->flags & REDIS_BLOCKED) { + addReplySds(c,sdsnew( + "-UNBLOCKED force unblock from blocking operation, " + "instance state changed (master -> slave?)\r\n")); + unblockClient(c); + c->flags |= REDIS_CLOSE_AFTER_REPLY; + } + } +} diff --git a/redis.submodule/src/cluster.c b/redis.submodule/src/cluster.c new file mode 100644 index 0000000..6280677 --- /dev/null +++ b/redis.submodule/src/cluster.c @@ -0,0 +1,5014 @@ +/* Redis Cluster implementation. + * + * Copyright (c) 2009-2012, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "redis.h" +#include "cluster.h" +#include "endianconv.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* A global reference to myself is handy to make code more clear. + * Myself always points to server.cluster->myself, that is, the clusterNode + * that represents this node. */ +clusterNode *myself = NULL; + +clusterNode *createClusterNode(char *nodename, int flags); +int clusterAddNode(clusterNode *node); +void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask); +void clusterReadHandler(aeEventLoop *el, int fd, void *privdata, int mask); +void clusterSendPing(clusterLink *link, int type); +void clusterSendFail(char *nodename); +void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request); +void clusterUpdateState(void); +int clusterNodeGetSlotBit(clusterNode *n, int slot); +sds clusterGenNodesDescription(int filter); +clusterNode *clusterLookupNode(char *name); +int clusterNodeAddSlave(clusterNode *master, clusterNode *slave); +int clusterAddSlot(clusterNode *n, int slot); +int clusterDelSlot(int slot); +int clusterDelNodeSlots(clusterNode *node); +int clusterNodeSetSlotBit(clusterNode *n, int slot); +void clusterSetMaster(clusterNode *n); +void clusterHandleSlaveFailover(void); +void clusterHandleSlaveMigration(int max_slaves); +int bitmapTestBit(unsigned char *bitmap, int pos); +void clusterDoBeforeSleep(int flags); +void clusterSendUpdate(clusterLink *link, clusterNode *node); +void resetManualFailover(void); +void clusterCloseAllSlots(void); +void clusterSetNodeAsMaster(clusterNode *n); +void clusterDelNode(clusterNode *delnode); +sds representRedisNodeFlags(sds ci, uint16_t flags); +uint64_t clusterGetMaxEpoch(void); +int clusterBumpConfigEpochWithoutConsensus(void); + +/* ----------------------------------------------------------------------------- + * Initialization + * -------------------------------------------------------------------------- */ + +/* Load the cluster config from 'filename'. + * + * If the file does not exist or is zero-length (this may happen because + * when we lock the nodes.conf file, we create a zero-length one for the + * sake of locking if it does not already exist), REDIS_ERR is returned. + * If the configuration was loaded from the file, REDIS_OK is returned. */ +int clusterLoadConfig(char *filename) { + FILE *fp = fopen(filename,"r"); + struct stat sb; + char *line; + int maxline, j; + + if (fp == NULL) { + if (errno == ENOENT) { + return REDIS_ERR; + } else { + redisLog(REDIS_WARNING, + "Loading the cluster node config from %s: %s", + filename, strerror(errno)); + exit(1); + } + } + + /* Check if the file is zero-length: if so return REDIS_ERR to signal + * we have to write the config. */ + if (fstat(fileno(fp),&sb) != -1 && sb.st_size == 0) { + fclose(fp); + return REDIS_ERR; + } + + /* Parse the file. Note that single lines of the cluster config file can + * be really long as they include all the hash slots of the node. + * This means in the worst possible case, half of the Redis slots will be + * present in a single line, possibly in importing or migrating state, so + * together with the node ID of the sender/receiver. + * + * To simplify we allocate 1024+REDIS_CLUSTER_SLOTS*128 bytes per line. */ + maxline = 1024+REDIS_CLUSTER_SLOTS*128; + line = zmalloc(maxline); + while(fgets(line,maxline,fp) != NULL) { + int argc; + sds *argv; + clusterNode *n, *master; + char *p, *s; + + /* Skip blank lines, they can be created either by users manually + * editing nodes.conf or by the config writing process if stopped + * before the truncate() call. */ + if (line[0] == '\n') continue; + + /* Split the line into arguments for processing. */ + argv = sdssplitargs(line,&argc); + if (argv == NULL) goto fmterr; + + /* Handle the special "vars" line. Don't pretend it is the last + * line even if it actually is when generated by Redis. */ + if (strcasecmp(argv[0],"vars") == 0) { + for (j = 1; j < argc; j += 2) { + if (strcasecmp(argv[j],"currentEpoch") == 0) { + server.cluster->currentEpoch = + strtoull(argv[j+1],NULL,10); + } else if (strcasecmp(argv[j],"lastVoteEpoch") == 0) { + server.cluster->lastVoteEpoch = + strtoull(argv[j+1],NULL,10); + } else { + redisLog(REDIS_WARNING, + "Skipping unknown cluster config variable '%s'", + argv[j]); + } + } + sdsfreesplitres(argv,argc); + continue; + } + + /* Regular config lines have at least eight fields */ + if (argc < 8) goto fmterr; + + /* Create this node if it does not exist */ + n = clusterLookupNode(argv[0]); + if (!n) { + n = createClusterNode(argv[0],0); + clusterAddNode(n); + } + /* Address and port */ + if ((p = strrchr(argv[1],':')) == NULL) goto fmterr; + *p = '\0'; + memcpy(n->ip,argv[1],strlen(argv[1])+1); + n->port = atoi(p+1); + + /* Parse flags */ + p = s = argv[2]; + while(p) { + p = strchr(s,','); + if (p) *p = '\0'; + if (!strcasecmp(s,"myself")) { + redisAssert(server.cluster->myself == NULL); + myself = server.cluster->myself = n; + n->flags |= REDIS_NODE_MYSELF; + } else if (!strcasecmp(s,"master")) { + n->flags |= REDIS_NODE_MASTER; + } else if (!strcasecmp(s,"slave")) { + n->flags |= REDIS_NODE_SLAVE; + } else if (!strcasecmp(s,"fail?")) { + n->flags |= REDIS_NODE_PFAIL; + } else if (!strcasecmp(s,"fail")) { + n->flags |= REDIS_NODE_FAIL; + n->fail_time = mstime(); + } else if (!strcasecmp(s,"handshake")) { + n->flags |= REDIS_NODE_HANDSHAKE; + } else if (!strcasecmp(s,"noaddr")) { + n->flags |= REDIS_NODE_NOADDR; + } else if (!strcasecmp(s,"noflags")) { + /* nothing to do */ + } else { + redisPanic("Unknown flag in redis cluster config file"); + } + if (p) s = p+1; + } + + /* Get master if any. Set the master and populate master's + * slave list. */ + if (argv[3][0] != '-') { + master = clusterLookupNode(argv[3]); + if (!master) { + master = createClusterNode(argv[3],0); + clusterAddNode(master); + } + n->slaveof = master; + clusterNodeAddSlave(master,n); + } + + /* Set ping sent / pong received timestamps */ + if (atoi(argv[4])) n->ping_sent = mstime(); + if (atoi(argv[5])) n->pong_received = mstime(); + + /* Set configEpoch for this node. */ + n->configEpoch = strtoull(argv[6],NULL,10); + + /* Populate hash slots served by this instance. */ + for (j = 8; j < argc; j++) { + int start, stop; + + if (argv[j][0] == '[') { + /* Here we handle migrating / importing slots */ + int slot; + char direction; + clusterNode *cn; + + p = strchr(argv[j],'-'); + redisAssert(p != NULL); + *p = '\0'; + direction = p[1]; /* Either '>' or '<' */ + slot = atoi(argv[j]+1); + p += 3; + cn = clusterLookupNode(p); + if (!cn) { + cn = createClusterNode(p,0); + clusterAddNode(cn); + } + if (direction == '>') { + server.cluster->migrating_slots_to[slot] = cn; + } else { + server.cluster->importing_slots_from[slot] = cn; + } + continue; + } else if ((p = strchr(argv[j],'-')) != NULL) { + *p = '\0'; + start = atoi(argv[j]); + stop = atoi(p+1); + } else { + start = stop = atoi(argv[j]); + } + while(start <= stop) clusterAddSlot(n, start++); + } + + sdsfreesplitres(argv,argc); + } + /* Config sanity check */ + if (server.cluster->myself == NULL) goto fmterr; + + zfree(line); + fclose(fp); + + redisLog(REDIS_NOTICE,"Node configuration loaded, I'm %.40s", myself->name); + + /* Something that should never happen: currentEpoch smaller than + * the max epoch found in the nodes configuration. However we handle this + * as some form of protection against manual editing of critical files. */ + if (clusterGetMaxEpoch() > server.cluster->currentEpoch) { + server.cluster->currentEpoch = clusterGetMaxEpoch(); + } + return REDIS_OK; + +fmterr: + redisLog(REDIS_WARNING, + "Unrecoverable error: corrupted cluster config file."); + zfree(line); + if (fp) fclose(fp); + exit(1); +} + +/* Cluster node configuration is exactly the same as CLUSTER NODES output. + * + * This function writes the node config and returns 0, on error -1 + * is returned. + * + * Note: we need to write the file in an atomic way from the point of view + * of the POSIX filesystem semantics, so that if the server is stopped + * or crashes during the write, we'll end with either the old file or the + * new one. Since we have the full payload to write available we can use + * a single write to write the whole file. If the pre-existing file was + * bigger we pad our payload with newlines that are anyway ignored and truncate + * the file afterward. */ +int clusterSaveConfig(int do_fsync) { + sds ci; + size_t content_size; + struct stat sb; + int fd; + + server.cluster->todo_before_sleep &= ~CLUSTER_TODO_SAVE_CONFIG; + + /* Get the nodes description and concatenate our "vars" directive to + * save currentEpoch and lastVoteEpoch. */ + ci = clusterGenNodesDescription(REDIS_NODE_HANDSHAKE); + ci = sdscatprintf(ci,"vars currentEpoch %llu lastVoteEpoch %llu\n", + (unsigned long long) server.cluster->currentEpoch, + (unsigned long long) server.cluster->lastVoteEpoch); + content_size = sdslen(ci); + + if ((fd = open(server.cluster_configfile,O_WRONLY|O_CREAT,0644)) + == -1) goto err; + + /* Pad the new payload if the existing file length is greater. */ + if (fstat(fd,&sb) != -1) { + if (sb.st_size > (off_t)content_size) { + ci = sdsgrowzero(ci,sb.st_size); + memset(ci+content_size,'\n',sb.st_size-content_size); + } + } + if (write(fd,ci,sdslen(ci)) != (ssize_t)sdslen(ci)) goto err; + if (do_fsync) { + server.cluster->todo_before_sleep &= ~CLUSTER_TODO_FSYNC_CONFIG; + fsync(fd); + } + + /* Truncate the file if needed to remove the final \n padding that + * is just garbage. */ + if (content_size != sdslen(ci) && ftruncate(fd,content_size) == -1) { + /* ftruncate() failing is not a critical error. */ + } + close(fd); + sdsfree(ci); + return 0; + +err: + if (fd != -1) close(fd); + sdsfree(ci); + return -1; +} + +void clusterSaveConfigOrDie(int do_fsync) { + if (clusterSaveConfig(do_fsync) == -1) { + redisLog(REDIS_WARNING,"Fatal: can't update cluster config file."); + exit(1); + } +} + +/* Lock the cluster config using flock(), and leaks the file descritor used to + * acquire the lock so that the file will be locked forever. + * + * This works because we always update nodes.conf with a new version + * in-place, reopening the file, and writing to it in place (later adjusting + * the length with ftruncate()). + * + * On success REDIS_OK is returned, otherwise an error is logged and + * the function returns REDIS_ERR to signal a lock was not acquired. */ +int clusterLockConfig(char *filename) { +/* flock() does not exist on Solaris + * and a fcntl-based solution won't help, as we constantly re-open that file, + * which will release _all_ locks anyway + */ +#if !defined(__sun) + /* To lock it, we need to open the file in a way it is created if + * it does not exist, otherwise there is a race condition with other + * processes. */ + int fd = open(filename,O_WRONLY|O_CREAT,0644); + if (fd == -1) { + redisLog(REDIS_WARNING, + "Can't open %s in order to acquire a lock: %s", + filename, strerror(errno)); + return REDIS_ERR; + } + + if (flock(fd,LOCK_EX|LOCK_NB) == -1) { + if (errno == EWOULDBLOCK) { + redisLog(REDIS_WARNING, + "Sorry, the cluster configuration file %s is already used " + "by a different Redis Cluster node. Please make sure that " + "different nodes use different cluster configuration " + "files.", filename); + } else { + redisLog(REDIS_WARNING, + "Impossible to lock %s: %s", filename, strerror(errno)); + } + close(fd); + return REDIS_ERR; + } + /* Lock acquired: leak the 'fd' by not closing it, so that we'll retain the + * lock to the file as long as the process exists. */ +#endif /* __sun */ + + return REDIS_OK; +} + +void clusterInit(void) { + int saveconf = 0; + + server.cluster = zmalloc(sizeof(clusterState)); + server.cluster->myself = NULL; + server.cluster->currentEpoch = 0; + server.cluster->state = REDIS_CLUSTER_FAIL; + server.cluster->size = 1; + server.cluster->todo_before_sleep = 0; + server.cluster->nodes = dictCreate(&clusterNodesDictType,NULL); + server.cluster->nodes_black_list = + dictCreate(&clusterNodesBlackListDictType,NULL); + server.cluster->failover_auth_time = 0; + server.cluster->failover_auth_count = 0; + server.cluster->failover_auth_rank = 0; + server.cluster->failover_auth_epoch = 0; + server.cluster->cant_failover_reason = REDIS_CLUSTER_CANT_FAILOVER_NONE; + server.cluster->lastVoteEpoch = 0; + server.cluster->stats_bus_messages_sent = 0; + server.cluster->stats_bus_messages_received = 0; + memset(server.cluster->slots,0, sizeof(server.cluster->slots)); + clusterCloseAllSlots(); + + /* Lock the cluster config file to make sure every node uses + * its own nodes.conf. */ + if (clusterLockConfig(server.cluster_configfile) == REDIS_ERR) + exit(1); + + /* Load or create a new nodes configuration. */ + if (clusterLoadConfig(server.cluster_configfile) == REDIS_ERR) { + /* No configuration found. We will just use the random name provided + * by the createClusterNode() function. */ + myself = server.cluster->myself = + createClusterNode(NULL,REDIS_NODE_MYSELF|REDIS_NODE_MASTER); + redisLog(REDIS_NOTICE,"No cluster configuration found, I'm %.40s", + myself->name); + clusterAddNode(myself); + saveconf = 1; + } + if (saveconf) clusterSaveConfigOrDie(1); + + /* We need a listening TCP port for our cluster messaging needs. */ + server.cfd_count = 0; + + /* Port sanity check II + * The other handshake port check is triggered too late to stop + * us from trying to use a too-high cluster port number. */ + if (server.port > (65535-REDIS_CLUSTER_PORT_INCR)) { + redisLog(REDIS_WARNING, "Redis port number too high. " + "Cluster communication port is 10,000 port " + "numbers higher than your Redis port. " + "Your Redis port number must be " + "lower than 55535."); + exit(1); + } + + if (listenToPort(server.port+REDIS_CLUSTER_PORT_INCR, + server.cfd,&server.cfd_count) == REDIS_ERR) + { + exit(1); + } else { + int j; + + for (j = 0; j < server.cfd_count; j++) { + if (aeCreateFileEvent(server.el, server.cfd[j], AE_READABLE, + clusterAcceptHandler, NULL) == AE_ERR) + redisPanic("Unrecoverable error creating Redis Cluster " + "file event."); + } + } + + /* The slots -> keys map is a sorted set. Init it. */ + server.cluster->slots_to_keys = zslCreate(); + + /* Set myself->port to my listening port, we'll just need to discover + * the IP address via MEET messages. */ + myself->port = server.port; + + server.cluster->mf_end = 0; + resetManualFailover(); +} + +/* Reset a node performing a soft or hard reset: + * + * 1) All other nodes are forget. + * 2) All the assigned / open slots are released. + * 3) If the node is a slave, it turns into a master. + * 5) Only for hard reset: a new Node ID is generated. + * 6) Only for hard reset: currentEpoch and configEpoch are set to 0. + * 7) The new configuration is saved and the cluster state updated. + * 8) If the node was a slave, the whole data set is flushed away. */ +void clusterReset(int hard) { + dictIterator *di; + dictEntry *de; + int j; + + /* Turn into master. */ + if (nodeIsSlave(myself)) { + clusterSetNodeAsMaster(myself); + replicationUnsetMaster(); + emptyDb(NULL); + } + + /* Close slots, reset manual failover state. */ + clusterCloseAllSlots(); + resetManualFailover(); + + /* Unassign all the slots. */ + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) clusterDelSlot(j); + + /* Forget all the nodes, but myself. */ + di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + + if (node == myself) continue; + clusterDelNode(node); + } + dictReleaseIterator(di); + + /* Hard reset only: set epochs to 0, change node ID. */ + if (hard) { + sds oldname; + + server.cluster->currentEpoch = 0; + server.cluster->lastVoteEpoch = 0; + myself->configEpoch = 0; + redisLog(REDIS_WARNING, "configEpoch set to 0 via CLUSTER RESET HARD"); + + /* To change the Node ID we need to remove the old name from the + * nodes table, change the ID, and re-add back with new name. */ + oldname = sdsnewlen(myself->name, REDIS_CLUSTER_NAMELEN); + dictDelete(server.cluster->nodes,oldname); + sdsfree(oldname); + getRandomHexChars(myself->name, REDIS_CLUSTER_NAMELEN); + clusterAddNode(myself); + } + + /* Make sure to persist the new config and update the state. */ + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_UPDATE_STATE| + CLUSTER_TODO_FSYNC_CONFIG); +} + +/* ----------------------------------------------------------------------------- + * CLUSTER communication link + * -------------------------------------------------------------------------- */ + +clusterLink *createClusterLink(clusterNode *node) { + clusterLink *link = zmalloc(sizeof(*link)); + link->ctime = mstime(); + link->sndbuf = sdsempty(); + link->rcvbuf = sdsempty(); + link->node = node; + link->fd = -1; + return link; +} + +/* Free a cluster link, but does not free the associated node of course. + * This function will just make sure that the original node associated + * with this link will have the 'link' field set to NULL. */ +void freeClusterLink(clusterLink *link) { + if (link->fd != -1) { + aeDeleteFileEvent(server.el, link->fd, AE_WRITABLE); + aeDeleteFileEvent(server.el, link->fd, AE_READABLE); + } + sdsfree(link->sndbuf); + sdsfree(link->rcvbuf); + if (link->node) + link->node->link = NULL; + close(link->fd); + zfree(link); +} + +#define MAX_CLUSTER_ACCEPTS_PER_CALL 1000 +void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { + int cport, cfd; + int max = MAX_CLUSTER_ACCEPTS_PER_CALL; + char cip[REDIS_IP_STR_LEN]; + clusterLink *link; + REDIS_NOTUSED(el); + REDIS_NOTUSED(mask); + REDIS_NOTUSED(privdata); + + /* If the server is starting up, don't accept cluster connections: + * UPDATE messages may interact with the database content. */ + if (server.masterhost == NULL && server.loading) return; + + while(max--) { + cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport); + if (cfd == ANET_ERR) { + if (errno != EWOULDBLOCK) + redisLog(REDIS_VERBOSE, + "Error accepting cluster node: %s", server.neterr); + return; + } + anetNonBlock(NULL,cfd); + anetEnableTcpNoDelay(NULL,cfd); + + /* Use non-blocking I/O for cluster messages. */ + redisLog(REDIS_VERBOSE,"Accepted cluster node %s:%d", cip, cport); + /* Create a link object we use to handle the connection. + * It gets passed to the readable handler when data is available. + * Initiallly the link->node pointer is set to NULL as we don't know + * which node is, but the right node is references once we know the + * node identity. */ + link = createClusterLink(NULL); + link->fd = cfd; + aeCreateFileEvent(server.el,cfd,AE_READABLE,clusterReadHandler,link); + } +} + +/* ----------------------------------------------------------------------------- + * Key space handling + * -------------------------------------------------------------------------- */ + +/* We have 16384 hash slots. The hash slot of a given key is obtained + * as the least significant 14 bits of the crc16 of the key. + * + * However if the key contains the {...} pattern, only the part between + * { and } is hashed. This may be useful in the future to force certain + * keys to be in the same node (assuming no resharding is in progress). */ +unsigned int keyHashSlot(char *key, int keylen) { + int s, e; /* start-end indexes of { and } */ + + for (s = 0; s < keylen; s++) + if (key[s] == '{') break; + + /* No '{' ? Hash the whole key. This is the base case. */ + if (s == keylen) return crc16(key,keylen) & 0x3FFF; + + /* '{' found? Check if we have the corresponding '}'. */ + for (e = s+1; e < keylen; e++) + if (key[e] == '}') break; + + /* No '}' or nothing betweeen {} ? Hash the whole key. */ + if (e == keylen || e == s+1) return crc16(key,keylen) & 0x3FFF; + + /* If we are here there is both a { and a } on its right. Hash + * what is in the middle between { and }. */ + return crc16(key+s+1,e-s-1) & 0x3FFF; +} + +/* ----------------------------------------------------------------------------- + * CLUSTER node API + * -------------------------------------------------------------------------- */ + +/* Create a new cluster node, with the specified flags. + * If "nodename" is NULL this is considered a first handshake and a random + * node name is assigned to this node (it will be fixed later when we'll + * receive the first pong). + * + * The node is created and returned to the user, but it is not automatically + * added to the nodes hash table. */ +clusterNode *createClusterNode(char *nodename, int flags) { + clusterNode *node = zmalloc(sizeof(*node)); + + if (nodename) + memcpy(node->name, nodename, REDIS_CLUSTER_NAMELEN); + else + getRandomHexChars(node->name, REDIS_CLUSTER_NAMELEN); + node->ctime = mstime(); + node->configEpoch = 0; + node->flags = flags; + memset(node->slots,0,sizeof(node->slots)); + node->numslots = 0; + node->numslaves = 0; + node->slaves = NULL; + node->slaveof = NULL; + node->ping_sent = node->pong_received = 0; + node->fail_time = 0; + node->link = NULL; + memset(node->ip,0,sizeof(node->ip)); + node->port = 0; + node->fail_reports = listCreate(); + node->voted_time = 0; + node->repl_offset_time = 0; + node->repl_offset = 0; + listSetFreeMethod(node->fail_reports,zfree); + return node; +} + +/* This function is called every time we get a failure report from a node. + * The side effect is to populate the fail_reports list (or to update + * the timestamp of an existing report). + * + * 'failing' is the node that is in failure state according to the + * 'sender' node. + * + * The function returns 0 if it just updates a timestamp of an existing + * failure report from the same sender. 1 is returned if a new failure + * report is created. */ +int clusterNodeAddFailureReport(clusterNode *failing, clusterNode *sender) { + list *l = failing->fail_reports; + listNode *ln; + listIter li; + clusterNodeFailReport *fr; + + /* If a failure report from the same sender already exists, just update + * the timestamp. */ + listRewind(l,&li); + while ((ln = listNext(&li)) != NULL) { + fr = ln->value; + if (fr->node == sender) { + fr->time = mstime(); + return 0; + } + } + + /* Otherwise create a new report. */ + fr = zmalloc(sizeof(*fr)); + fr->node = sender; + fr->time = mstime(); + listAddNodeTail(l,fr); + return 1; +} + +/* Remove failure reports that are too old, where too old means reasonably + * older than the global node timeout. Note that anyway for a node to be + * flagged as FAIL we need to have a local PFAIL state that is at least + * older than the global node timeout, so we don't just trust the number + * of failure reports from other nodes. */ +void clusterNodeCleanupFailureReports(clusterNode *node) { + list *l = node->fail_reports; + listNode *ln; + listIter li; + clusterNodeFailReport *fr; + mstime_t maxtime = server.cluster_node_timeout * + REDIS_CLUSTER_FAIL_REPORT_VALIDITY_MULT; + mstime_t now = mstime(); + + listRewind(l,&li); + while ((ln = listNext(&li)) != NULL) { + fr = ln->value; + if (now - fr->time > maxtime) listDelNode(l,ln); + } +} + +/* Remove the failing report for 'node' if it was previously considered + * failing by 'sender'. This function is called when a node informs us via + * gossip that a node is OK from its point of view (no FAIL or PFAIL flags). + * + * Note that this function is called relatively often as it gets called even + * when there are no nodes failing, and is O(N), however when the cluster is + * fine the failure reports list is empty so the function runs in constant + * time. + * + * The function returns 1 if the failure report was found and removed. + * Otherwise 0 is returned. */ +int clusterNodeDelFailureReport(clusterNode *node, clusterNode *sender) { + list *l = node->fail_reports; + listNode *ln; + listIter li; + clusterNodeFailReport *fr; + + /* Search for a failure report from this sender. */ + listRewind(l,&li); + while ((ln = listNext(&li)) != NULL) { + fr = ln->value; + if (fr->node == sender) break; + } + if (!ln) return 0; /* No failure report from this sender. */ + + /* Remove the failure report. */ + listDelNode(l,ln); + clusterNodeCleanupFailureReports(node); + return 1; +} + +/* Return the number of external nodes that believe 'node' is failing, + * not including this node, that may have a PFAIL or FAIL state for this + * node as well. */ +int clusterNodeFailureReportsCount(clusterNode *node) { + clusterNodeCleanupFailureReports(node); + return listLength(node->fail_reports); +} + +int clusterNodeRemoveSlave(clusterNode *master, clusterNode *slave) { + int j; + + for (j = 0; j < master->numslaves; j++) { + if (master->slaves[j] == slave) { + if ((j+1) < master->numslaves) { + int remaining_slaves = (master->numslaves - j) - 1; + memmove(master->slaves+j,master->slaves+(j+1), + (sizeof(*master->slaves) * remaining_slaves)); + } + master->numslaves--; + return REDIS_OK; + } + } + return REDIS_ERR; +} + +int clusterNodeAddSlave(clusterNode *master, clusterNode *slave) { + int j; + + /* If it's already a slave, don't add it again. */ + for (j = 0; j < master->numslaves; j++) + if (master->slaves[j] == slave) return REDIS_ERR; + master->slaves = zrealloc(master->slaves, + sizeof(clusterNode*)*(master->numslaves+1)); + master->slaves[master->numslaves] = slave; + master->numslaves++; + return REDIS_OK; +} + +void clusterNodeResetSlaves(clusterNode *n) { + zfree(n->slaves); + n->numslaves = 0; + n->slaves = NULL; +} + +int clusterCountNonFailingSlaves(clusterNode *n) { + int j, okslaves = 0; + + for (j = 0; j < n->numslaves; j++) + if (!nodeFailed(n->slaves[j])) okslaves++; + return okslaves; +} + +/* Low level cleanup of the node structure. Only called by clusterDelNode(). */ +void freeClusterNode(clusterNode *n) { + sds nodename; + int j; + + /* If the node is a master with associated slaves, we have to set + * all the slaves->slaveof fields to NULL (unknown). */ + if (nodeIsMaster(n)) { + for (j = 0; j < n->numslaves; j++) + n->slaves[j]->slaveof = NULL; + } + + /* Remove this node from the list of slaves of its master. */ + if (nodeIsSlave(n) && n->slaveof) clusterNodeRemoveSlave(n->slaveof,n); + + /* Unlink from the set of nodes. */ + nodename = sdsnewlen(n->name, REDIS_CLUSTER_NAMELEN); + redisAssert(dictDelete(server.cluster->nodes,nodename) == DICT_OK); + sdsfree(nodename); + + /* Release link and associated data structures. */ + if (n->link) freeClusterLink(n->link); + listRelease(n->fail_reports); + zfree(n->slaves); + zfree(n); +} + +/* Add a node to the nodes hash table */ +int clusterAddNode(clusterNode *node) { + int retval; + + retval = dictAdd(server.cluster->nodes, + sdsnewlen(node->name,REDIS_CLUSTER_NAMELEN), node); + return (retval == DICT_OK) ? REDIS_OK : REDIS_ERR; +} + +/* Remove a node from the cluster. The functio performs the high level + * cleanup, calling freeClusterNode() for the low level cleanup. + * Here we do the following: + * + * 1) Mark all the slots handled by it as unassigned. + * 2) Remove all the failure reports sent by this node and referenced by + * other nodes. + * 3) Free the node with freeClusterNode() that will in turn remove it + * from the hash table and from the list of slaves of its master, if + * it is a slave node. + */ +void clusterDelNode(clusterNode *delnode) { + int j; + dictIterator *di; + dictEntry *de; + + /* 1) Mark slots as unassigned. */ + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + if (server.cluster->importing_slots_from[j] == delnode) + server.cluster->importing_slots_from[j] = NULL; + if (server.cluster->migrating_slots_to[j] == delnode) + server.cluster->migrating_slots_to[j] = NULL; + if (server.cluster->slots[j] == delnode) + clusterDelSlot(j); + } + + /* 2) Remove failure reports. */ + di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + + if (node == delnode) continue; + clusterNodeDelFailureReport(node,delnode); + } + dictReleaseIterator(di); + + /* 3) Free the node, unlinking it from the cluster. */ + freeClusterNode(delnode); +} + +/* Node lookup by name */ +clusterNode *clusterLookupNode(char *name) { + sds s = sdsnewlen(name, REDIS_CLUSTER_NAMELEN); + dictEntry *de; + + de = dictFind(server.cluster->nodes,s); + sdsfree(s); + if (de == NULL) return NULL; + return dictGetVal(de); +} + +/* This is only used after the handshake. When we connect a given IP/PORT + * as a result of CLUSTER MEET we don't have the node name yet, so we + * pick a random one, and will fix it when we receive the PONG request using + * this function. */ +void clusterRenameNode(clusterNode *node, char *newname) { + int retval; + sds s = sdsnewlen(node->name, REDIS_CLUSTER_NAMELEN); + + redisLog(REDIS_DEBUG,"Renaming node %.40s into %.40s", + node->name, newname); + retval = dictDelete(server.cluster->nodes, s); + sdsfree(s); + redisAssert(retval == DICT_OK); + memcpy(node->name, newname, REDIS_CLUSTER_NAMELEN); + clusterAddNode(node); +} + +/* ----------------------------------------------------------------------------- + * CLUSTER config epoch handling + * -------------------------------------------------------------------------- */ + +/* Return the greatest configEpoch found in the cluster, or the current + * epoch if greater than any node configEpoch. */ +uint64_t clusterGetMaxEpoch(void) { + uint64_t max = 0; + dictIterator *di; + dictEntry *de; + + di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + if (node->configEpoch > max) max = node->configEpoch; + } + dictReleaseIterator(di); + if (max < server.cluster->currentEpoch) max = server.cluster->currentEpoch; + return max; +} + +/* If this node epoch is zero or is not already the greatest across the + * cluster (from the POV of the local configuration), this function will: + * + * 1) Generate a new config epoch increment the current epoch. + * 2) Assign the new epoch to this node, WITHOUT any consensus. + * 3) Persist the configuration on disk before sending packets with the + * new configuration. + * + * If the new config epoch is generated and assigend, REDIS_OK is returned, + * otherwise REDIS_ERR is returned (since the node has already the greatest + * configuration around) and no operation is performed. + * + * Important note: this function violates the principle that config epochs + * should be generated with consensus and should be unique across the cluster. + * However Redis Cluster uses this auto-generated new config epochs in two + * cases: + * + * 1) When slots are closed after importing. Otherwise resharding would be + * too exansive. + * 2) When CLUSTER FAILOVER is called with options that force a slave to + * failover its master even if there is not master majority able to + * create a new configuration epoch. + * + * Redis Cluster does not explode using this function, even in the case of + * a collision between this node and another node, generating the same + * configuration epoch unilaterally, because the config epoch conflict + * resolution algorithm will eventually move colliding nodes to different + * config epochs. However usign this function may violate the "last failover + * wins" rule, so should only be used with care. */ +int clusterBumpConfigEpochWithoutConsensus(void) { + uint64_t maxEpoch = clusterGetMaxEpoch(); + + if (myself->configEpoch == 0 || + myself->configEpoch != maxEpoch) + { + server.cluster->currentEpoch++; + myself->configEpoch = server.cluster->currentEpoch; + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_FSYNC_CONFIG); + redisLog(REDIS_WARNING, + "New configEpoch set to %llu", + (unsigned long long) myself->configEpoch); + return REDIS_OK; + } else { + return REDIS_ERR; + } +} + +/* This function is called when this node is a master, and we receive from + * another master a configuration epoch that is equal to our configuration + * epoch. + * + * BACKGROUND + * + * It is not possible that different slaves get the same config + * epoch during a failover election, because the slaves need to get voted + * by a majority. However when we perform a manual resharding of the cluster + * the node will assign a configuration epoch to itself without to ask + * for agreement. Usually resharding happens when the cluster is working well + * and is supervised by the sysadmin, however it is possible for a failover + * to happen exactly while the node we are resharding a slot to assigns itself + * a new configuration epoch, but before it is able to propagate it. + * + * So technically it is possible in this condition that two nodes end with + * the same configuration epoch. + * + * Another possibility is that there are bugs in the implementation causing + * this to happen. + * + * Moreover when a new cluster is created, all the nodes start with the same + * configEpoch. This collision resolution code allows nodes to automatically + * end with a different configEpoch at startup automatically. + * + * In all the cases, we want a mechanism that resolves this issue automatically + * as a safeguard. The same configuration epoch for masters serving different + * set of slots is not harmful, but it is if the nodes end serving the same + * slots for some reason (manual errors or software bugs) without a proper + * failover procedure. + * + * In general we want a system that eventually always ends with different + * masters having different configuration epochs whatever happened, since + * nothign is worse than a split-brain condition in a distributed system. + * + * BEHAVIOR + * + * When this function gets called, what happens is that if this node + * has the lexicographically smaller Node ID compared to the other node + * with the conflicting epoch (the 'sender' node), it will assign itself + * the greatest configuration epoch currently detected among nodes plus 1. + * + * This means that even if there are multiple nodes colliding, the node + * with the greatest Node ID never moves forward, so eventually all the nodes + * end with a different configuration epoch. + */ +void clusterHandleConfigEpochCollision(clusterNode *sender) { + /* Prerequisites: nodes have the same configEpoch and are both masters. */ + if (sender->configEpoch != myself->configEpoch || + !nodeIsMaster(sender) || !nodeIsMaster(myself)) return; + /* Don't act if the colliding node has a smaller Node ID. */ + if (memcmp(sender->name,myself->name,REDIS_CLUSTER_NAMELEN) <= 0) return; + /* Get the next ID available at the best of this node knowledge. */ + server.cluster->currentEpoch++; + myself->configEpoch = server.cluster->currentEpoch; + clusterSaveConfigOrDie(1); + redisLog(REDIS_VERBOSE, + "WARNING: configEpoch collision with node %.40s." + " configEpoch set to %llu", + sender->name, + (unsigned long long) myself->configEpoch); +} + +/* ----------------------------------------------------------------------------- + * CLUSTER nodes blacklist + * + * The nodes blacklist is just a way to ensure that a given node with a given + * Node ID is not readded before some time elapsed (this time is specified + * in seconds in REDIS_CLUSTER_BLACKLIST_TTL). + * + * This is useful when we want to remove a node from the cluster completely: + * when CLUSTER FORGET is called, it also puts the node into the blacklist so + * that even if we receive gossip messages from other nodes that still remember + * about the node we want to remove, we don't re-add it before some time. + * + * Currently the REDIS_CLUSTER_BLACKLIST_TTL is set to 1 minute, this means + * that redis-trib has 60 seconds to send CLUSTER FORGET messages to nodes + * in the cluster without dealing with the problem of other nodes re-adding + * back the node to nodes we already sent the FORGET command to. + * + * The data structure used is a hash table with an sds string representing + * the node ID as key, and the time when it is ok to re-add the node as + * value. + * -------------------------------------------------------------------------- */ + +#define REDIS_CLUSTER_BLACKLIST_TTL 60 /* 1 minute. */ + + +/* Before of the addNode() or Exists() operations we always remove expired + * entries from the black list. This is an O(N) operation but it is not a + * problem since add / exists operations are called very infrequently and + * the hash table is supposed to contain very little elements at max. + * However without the cleanup during long uptimes and with some automated + * node add/removal procedures, entries could accumulate. */ +void clusterBlacklistCleanup(void) { + dictIterator *di; + dictEntry *de; + + di = dictGetSafeIterator(server.cluster->nodes_black_list); + while((de = dictNext(di)) != NULL) { + int64_t expire = dictGetUnsignedIntegerVal(de); + + if (expire < server.unixtime) + dictDelete(server.cluster->nodes_black_list,dictGetKey(de)); + } + dictReleaseIterator(di); +} + +/* Cleanup the blacklist and add a new node ID to the black list. */ +void clusterBlacklistAddNode(clusterNode *node) { + dictEntry *de; + sds id = sdsnewlen(node->name,REDIS_CLUSTER_NAMELEN); + + clusterBlacklistCleanup(); + if (dictAdd(server.cluster->nodes_black_list,id,NULL) == DICT_OK) { + /* If the key was added, duplicate the sds string representation of + * the key for the next lookup. We'll free it at the end. */ + id = sdsdup(id); + } + de = dictFind(server.cluster->nodes_black_list,id); + dictSetUnsignedIntegerVal(de,time(NULL)+REDIS_CLUSTER_BLACKLIST_TTL); + sdsfree(id); +} + +/* Return non-zero if the specified node ID exists in the blacklist. + * You don't need to pass an sds string here, any pointer to 40 bytes + * will work. */ +int clusterBlacklistExists(char *nodeid) { + sds id = sdsnewlen(nodeid,REDIS_CLUSTER_NAMELEN); + int retval; + + clusterBlacklistCleanup(); + retval = dictFind(server.cluster->nodes_black_list,id) != NULL; + sdsfree(id); + return retval; +} + +/* ----------------------------------------------------------------------------- + * CLUSTER messages exchange - PING/PONG and gossip + * -------------------------------------------------------------------------- */ + +/* This function checks if a given node should be marked as FAIL. + * It happens if the following conditions are met: + * + * 1) We received enough failure reports from other master nodes via gossip. + * Enough means that the majority of the masters signaled the node is + * down recently. + * 2) We believe this node is in PFAIL state. + * + * If a failure is detected we also inform the whole cluster about this + * event trying to force every other node to set the FAIL flag for the node. + * + * Note that the form of agreement used here is weak, as we collect the majority + * of masters state during some time, and even if we force agreement by + * propagating the FAIL message, because of partitions we may not reach every + * node. However: + * + * 1) Either we reach the majority and eventually the FAIL state will propagate + * to all the cluster. + * 2) Or there is no majority so no slave promotion will be authorized and the + * FAIL flag will be cleared after some time. + */ +void markNodeAsFailingIfNeeded(clusterNode *node) { + int failures; + int needed_quorum = (server.cluster->size / 2) + 1; + + if (!nodeTimedOut(node)) return; /* We can reach it. */ + if (nodeFailed(node)) return; /* Already FAILing. */ + + failures = clusterNodeFailureReportsCount(node); + /* Also count myself as a voter if I'm a master. */ + if (nodeIsMaster(myself)) failures++; + if (failures < needed_quorum) return; /* No weak agreement from masters. */ + + redisLog(REDIS_NOTICE, + "Marking node %.40s as failing (quorum reached).", node->name); + + /* Mark the node as failing. */ + node->flags &= ~REDIS_NODE_PFAIL; + node->flags |= REDIS_NODE_FAIL; + node->fail_time = mstime(); + + /* Broadcast the failing node name to everybody, forcing all the other + * reachable nodes to flag the node as FAIL. */ + if (nodeIsMaster(myself)) clusterSendFail(node->name); + clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); +} + +/* This function is called only if a node is marked as FAIL, but we are able + * to reach it again. It checks if there are the conditions to undo the FAIL + * state. */ +void clearNodeFailureIfNeeded(clusterNode *node) { + mstime_t now = mstime(); + + redisAssert(nodeFailed(node)); + + /* For slaves we always clear the FAIL flag if we can contact the + * node again. */ + if (nodeIsSlave(node) || node->numslots == 0) { + redisLog(REDIS_NOTICE, + "Clear FAIL state for node %.40s: %s is reachable again.", + node->name, + nodeIsSlave(node) ? "slave" : "master without slots"); + node->flags &= ~REDIS_NODE_FAIL; + clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); + } + + /* If it is a master and... + * 1) The FAIL state is old enough. + * 2) It is yet serving slots from our point of view (not failed over). + * Apparently no one is going to fix these slots, clear the FAIL flag. */ + if (nodeIsMaster(node) && node->numslots > 0 && + (now - node->fail_time) > + (server.cluster_node_timeout * REDIS_CLUSTER_FAIL_UNDO_TIME_MULT)) + { + redisLog(REDIS_NOTICE, + "Clear FAIL state for node %.40s: is reachable again and nobody is serving its slots after some time.", + node->name); + node->flags &= ~REDIS_NODE_FAIL; + clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); + } +} + +/* Return true if we already have a node in HANDSHAKE state matching the + * specified ip address and port number. This function is used in order to + * avoid adding a new handshake node for the same address multiple times. */ +int clusterHandshakeInProgress(char *ip, int port) { + dictIterator *di; + dictEntry *de; + + di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + + if (!nodeInHandshake(node)) continue; + if (!strcasecmp(node->ip,ip) && node->port == port) break; + } + dictReleaseIterator(di); + return de != NULL; +} + +/* Start an handshake with the specified address if there is not one + * already in progress. Returns non-zero if the handshake was actually + * started. On error zero is returned and errno is set to one of the + * following values: + * + * EAGAIN - There is already an handshake in progress for this address. + * EINVAL - IP or port are not valid. */ +int clusterStartHandshake(char *ip, int port) { + clusterNode *n; + char norm_ip[REDIS_IP_STR_LEN]; + struct sockaddr_storage sa; + + /* IP sanity check */ + if (inet_pton(AF_INET,ip, + &(((struct sockaddr_in *)&sa)->sin_addr))) + { + sa.ss_family = AF_INET; + } else if (inet_pton(AF_INET6,ip, + &(((struct sockaddr_in6 *)&sa)->sin6_addr))) + { + sa.ss_family = AF_INET6; + } else { + errno = EINVAL; + return 0; + } + + /* Port sanity check */ + if (port <= 0 || port > (65535-REDIS_CLUSTER_PORT_INCR)) { + errno = EINVAL; + return 0; + } + + /* Set norm_ip as the normalized string representation of the node + * IP address. */ + memset(norm_ip,0,REDIS_IP_STR_LEN); + if (sa.ss_family == AF_INET) + inet_ntop(AF_INET, + (void*)&(((struct sockaddr_in *)&sa)->sin_addr), + norm_ip,REDIS_IP_STR_LEN); + else + inet_ntop(AF_INET6, + (void*)&(((struct sockaddr_in6 *)&sa)->sin6_addr), + norm_ip,REDIS_IP_STR_LEN); + + if (clusterHandshakeInProgress(norm_ip,port)) { + errno = EAGAIN; + return 0; + } + + /* Add the node with a random address (NULL as first argument to + * createClusterNode()). Everything will be fixed during the + * handshake. */ + n = createClusterNode(NULL,REDIS_NODE_HANDSHAKE|REDIS_NODE_MEET); + memcpy(n->ip,norm_ip,sizeof(n->ip)); + n->port = port; + clusterAddNode(n); + return 1; +} + +/* Process the gossip section of PING or PONG packets. + * Note that this function assumes that the packet is already sanity-checked + * by the caller, not in the content of the gossip section, but in the + * length. */ +void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) { + uint16_t count = ntohs(hdr->count); + clusterMsgDataGossip *g = (clusterMsgDataGossip*) hdr->data.ping.gossip; + clusterNode *sender = link->node ? link->node : clusterLookupNode(hdr->sender); + + while(count--) { + uint16_t flags = ntohs(g->flags); + clusterNode *node; + sds ci; + + ci = representRedisNodeFlags(sdsempty(), flags); + redisLog(REDIS_DEBUG,"GOSSIP %.40s %s:%d %s", + g->nodename, + g->ip, + ntohs(g->port), + ci); + sdsfree(ci); + + /* Update our state accordingly to the gossip sections */ + node = clusterLookupNode(g->nodename); + if (node) { + /* We already know this node. + Handle failure reports, only when the sender is a master. */ + if (sender && nodeIsMaster(sender) && node != myself) { + if (flags & (REDIS_NODE_FAIL|REDIS_NODE_PFAIL)) { + if (clusterNodeAddFailureReport(node,sender)) { + redisLog(REDIS_VERBOSE, + "Node %.40s reported node %.40s as not reachable.", + sender->name, node->name); + } + markNodeAsFailingIfNeeded(node); + } else { + if (clusterNodeDelFailureReport(node,sender)) { + redisLog(REDIS_VERBOSE, + "Node %.40s reported node %.40s is back online.", + sender->name, node->name); + } + } + } + + /* If we already know this node, but it is not reachable, and + * we see a different address in the gossip section, start an + * handshake with the (possibly) new address: this will result + * into a node address update if the handshake will be + * successful. */ + if (node->flags & (REDIS_NODE_FAIL|REDIS_NODE_PFAIL) && + (strcasecmp(node->ip,g->ip) || node->port != ntohs(g->port))) + { + clusterStartHandshake(g->ip,ntohs(g->port)); + } + } else { + /* If it's not in NOADDR state and we don't have it, we + * start a handshake process against this IP/PORT pairs. + * + * Note that we require that the sender of this gossip message + * is a well known node in our cluster, otherwise we risk + * joining another cluster. */ + if (sender && + !(flags & REDIS_NODE_NOADDR) && + !clusterBlacklistExists(g->nodename)) + { + clusterStartHandshake(g->ip,ntohs(g->port)); + } + } + + /* Next node */ + g++; + } +} + +/* IP -> string conversion. 'buf' is supposed to at least be 46 bytes. */ +void nodeIp2String(char *buf, clusterLink *link) { + anetPeerToString(link->fd, buf, REDIS_IP_STR_LEN, NULL); +} + +/* Update the node address to the IP address that can be extracted + * from link->fd, and at the specified port. + * Also disconnect the node link so that we'll connect again to the new + * address. + * + * If the ip/port pair are already correct no operation is performed at + * all. + * + * The function returns 0 if the node address is still the same, + * otherwise 1 is returned. */ +int nodeUpdateAddressIfNeeded(clusterNode *node, clusterLink *link, int port) { + char ip[REDIS_IP_STR_LEN] = {0}; + + /* We don't proceed if the link is the same as the sender link, as this + * function is designed to see if the node link is consistent with the + * symmetric link that is used to receive PINGs from the node. + * + * As a side effect this function never frees the passed 'link', so + * it is safe to call during packet processing. */ + if (link == node->link) return 0; + + nodeIp2String(ip,link); + if (node->port == port && strcmp(ip,node->ip) == 0) return 0; + + /* IP / port is different, update it. */ + memcpy(node->ip,ip,sizeof(ip)); + node->port = port; + if (node->link) freeClusterLink(node->link); + node->flags &= ~REDIS_NODE_NOADDR; + redisLog(REDIS_WARNING,"Address updated for node %.40s, now %s:%d", + node->name, node->ip, node->port); + + /* Check if this is our master and we have to change the + * replication target as well. */ + if (nodeIsSlave(myself) && myself->slaveof == node) + replicationSetMaster(node->ip, node->port); + return 1; +} + +/* Reconfigure the specified node 'n' as a master. This function is called when + * a node that we believed to be a slave is now acting as master in order to + * update the state of the node. */ +void clusterSetNodeAsMaster(clusterNode *n) { + if (nodeIsMaster(n)) return; + + if (n->slaveof) clusterNodeRemoveSlave(n->slaveof,n); + n->flags &= ~REDIS_NODE_SLAVE; + n->flags |= REDIS_NODE_MASTER; + n->slaveof = NULL; + + /* Update config and state. */ + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_UPDATE_STATE); +} + +/* This function is called when we receive a master configuration via a + * PING, PONG or UPDATE packet. What we receive is a node, a configEpoch of the + * node, and the set of slots claimed under this configEpoch. + * + * What we do is to rebind the slots with newer configuration compared to our + * local configuration, and if needed, we turn ourself into a replica of the + * node (see the function comments for more info). + * + * The 'sender' is the node for which we received a configuration update. + * Sometimes it is not actually the "Sender" of the information, like in the case + * we receive the info via an UPDATE packet. */ +void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoch, unsigned char *slots) { + int j; + clusterNode *curmaster, *newmaster = NULL; + /* The dirty slots list is a list of slots for which we lose the ownership + * while having still keys inside. This usually happens after a failover + * or after a manual cluster reconfiguration operated by the admin. + * + * If the update message is not able to demote a master to slave (in this + * case we'll resync with the master updating the whole key space), we + * need to delete all the keys in the slots we lost ownership. */ + uint16_t dirty_slots[REDIS_CLUSTER_SLOTS]; + int dirty_slots_count = 0; + + /* Here we set curmaster to this node or the node this node + * replicates to if it's a slave. In the for loop we are + * interested to check if slots are taken away from curmaster. */ + curmaster = nodeIsMaster(myself) ? myself : myself->slaveof; + + if (sender == myself) { + redisLog(REDIS_WARNING,"Discarding UPDATE message about myself."); + return; + } + + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + if (bitmapTestBit(slots,j)) { + /* The slot is already bound to the sender of this message. */ + if (server.cluster->slots[j] == sender) continue; + + /* The slot is in importing state, it should be modified only + * manually via redis-trib (example: a resharding is in progress + * and the migrating side slot was already closed and is advertising + * a new config. We still want the slot to be closed manually). */ + if (server.cluster->importing_slots_from[j]) continue; + + /* We rebind the slot to the new node claiming it if: + * 1) The slot was unassigned or the new node claims it with a + * greater configEpoch. + * 2) We are not currently importing the slot. */ + if (server.cluster->slots[j] == NULL || + server.cluster->slots[j]->configEpoch < senderConfigEpoch) + { + /* Was this slot mine, and still contains keys? Mark it as + * a dirty slot. */ + if (server.cluster->slots[j] == myself && + countKeysInSlot(j) && + sender != myself) + { + dirty_slots[dirty_slots_count] = j; + dirty_slots_count++; + } + + if (server.cluster->slots[j] == curmaster) + newmaster = sender; + clusterDelSlot(j); + clusterAddSlot(sender,j); + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_UPDATE_STATE| + CLUSTER_TODO_FSYNC_CONFIG); + } + } + } + + /* If at least one slot was reassigned from a node to another node + * with a greater configEpoch, it is possible that: + * 1) We are a master left without slots. This means that we were + * failed over and we should turn into a replica of the new + * master. + * 2) We are a slave and our master is left without slots. We need + * to replicate to the new slots owner. */ + if (newmaster && curmaster->numslots == 0) { + redisLog(REDIS_WARNING, + "Configuration change detected. Reconfiguring myself " + "as a replica of %.40s", sender->name); + clusterSetMaster(sender); + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_UPDATE_STATE| + CLUSTER_TODO_FSYNC_CONFIG); + } else if (dirty_slots_count) { + /* If we are here, we received an update message which removed + * ownership for certain slots we still have keys about, but still + * we are serving some slots, so this master node was not demoted to + * a slave. + * + * In order to maintain a consistent state between keys and slots + * we need to remove all the keys from the slots we lost. */ + for (j = 0; j < dirty_slots_count; j++) + delKeysInSlot(dirty_slots[j]); + } +} + +/* When this function is called, there is a packet to process starting + * at node->rcvbuf. Releasing the buffer is up to the caller, so this + * function should just handle the higher level stuff of processing the + * packet, modifying the cluster state if needed. + * + * The function returns 1 if the link is still valid after the packet + * was processed, otherwise 0 if the link was freed since the packet + * processing lead to some inconsistency error (for instance a PONG + * received from the wrong sender ID). */ +int clusterProcessPacket(clusterLink *link) { + clusterMsg *hdr = (clusterMsg*) link->rcvbuf; + uint32_t totlen = ntohl(hdr->totlen); + uint16_t type = ntohs(hdr->type); + uint16_t flags = ntohs(hdr->flags); + uint64_t senderCurrentEpoch = 0, senderConfigEpoch = 0; + clusterNode *sender; + + server.cluster->stats_bus_messages_received++; + redisLog(REDIS_DEBUG,"--- Processing packet of type %d, %lu bytes", + type, (unsigned long) totlen); + + /* Perform sanity checks */ + if (totlen < 16) return 1; /* At least signature, version, totlen, count. */ + if (ntohs(hdr->ver) != CLUSTER_PROTO_VER) + return 1; /* Can't handle versions other than the current one.*/ + if (totlen > sdslen(link->rcvbuf)) return 1; + if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_PONG || + type == CLUSTERMSG_TYPE_MEET) + { + uint16_t count = ntohs(hdr->count); + uint32_t explen; /* expected length of this packet */ + + explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + explen += (sizeof(clusterMsgDataGossip)*count); + if (totlen != explen) return 1; + } else if (type == CLUSTERMSG_TYPE_FAIL) { + uint32_t explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + + explen += sizeof(clusterMsgDataFail); + if (totlen != explen) return 1; + } else if (type == CLUSTERMSG_TYPE_PUBLISH) { + uint32_t explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + + explen += sizeof(clusterMsgDataPublish) - + 8 + + ntohl(hdr->data.publish.msg.channel_len) + + ntohl(hdr->data.publish.msg.message_len); + if (totlen != explen) return 1; + } else if (type == CLUSTERMSG_TYPE_FAILOVER_AUTH_REQUEST || + type == CLUSTERMSG_TYPE_FAILOVER_AUTH_ACK || + type == CLUSTERMSG_TYPE_MFSTART) + { + uint32_t explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + + if (totlen != explen) return 1; + } else if (type == CLUSTERMSG_TYPE_UPDATE) { + uint32_t explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + + explen += sizeof(clusterMsgDataUpdate); + if (totlen != explen) return 1; + } + + /* Check if the sender is a known node. */ + sender = clusterLookupNode(hdr->sender); + if (sender && !nodeInHandshake(sender)) { + /* Update our curretEpoch if we see a newer epoch in the cluster. */ + senderCurrentEpoch = ntohu64(hdr->currentEpoch); + senderConfigEpoch = ntohu64(hdr->configEpoch); + if (senderCurrentEpoch > server.cluster->currentEpoch) + server.cluster->currentEpoch = senderCurrentEpoch; + /* Update the sender configEpoch if it is publishing a newer one. */ + if (senderConfigEpoch > sender->configEpoch) { + sender->configEpoch = senderConfigEpoch; + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_FSYNC_CONFIG); + } + /* Update the replication offset info for this node. */ + sender->repl_offset = ntohu64(hdr->offset); + sender->repl_offset_time = mstime(); + /* If we are a slave performing a manual failover and our master + * sent its offset while already paused, populate the MF state. */ + if (server.cluster->mf_end && + nodeIsSlave(myself) && + myself->slaveof == sender && + hdr->mflags[0] & CLUSTERMSG_FLAG0_PAUSED && + server.cluster->mf_master_offset == 0) + { + server.cluster->mf_master_offset = sender->repl_offset; + redisLog(REDIS_WARNING, + "Received replication offset for paused " + "master manual failover: %lld", + server.cluster->mf_master_offset); + } + } + + /* Initial processing of PING and MEET requests replying with a PONG. */ + if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_MEET) { + redisLog(REDIS_DEBUG,"Ping packet received: %p", (void*)link->node); + + /* We use incoming MEET messages in order to set the address + * for 'myself', since only other cluster nodes will send us + * MEET messagses on handshakes, when the cluster joins, or + * later if we changed address, and those nodes will use our + * official address to connect to us. So by obtaining this address + * from the socket is a simple way to discover / update our own + * address in the cluster without it being hardcoded in the config. + * + * However if we don't have an address at all, we update the address + * even with a normal PING packet. If it's wrong it will be fixed + * by MEET later. */ + if (type == CLUSTERMSG_TYPE_MEET || myself->ip[0] == '\0') { + char ip[REDIS_IP_STR_LEN]; + + if (anetSockName(link->fd,ip,sizeof(ip),NULL) != -1 && + strcmp(ip,myself->ip)) + { + memcpy(myself->ip,ip,REDIS_IP_STR_LEN); + redisLog(REDIS_WARNING,"IP address for this node updated to %s", + myself->ip); + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); + } + } + + /* Add this node if it is new for us and the msg type is MEET. + * In this stage we don't try to add the node with the right + * flags, slaveof pointer, and so forth, as this details will be + * resolved when we'll receive PONGs from the node. */ + if (!sender && type == CLUSTERMSG_TYPE_MEET) { + clusterNode *node; + + node = createClusterNode(NULL,REDIS_NODE_HANDSHAKE); + nodeIp2String(node->ip,link); + node->port = ntohs(hdr->port); + clusterAddNode(node); + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); + } + + /* If this is a MEET packet from an unknown node, we still process + * the gossip section here since we have to trust the sender because + * of the message type. */ + if (!sender && type == CLUSTERMSG_TYPE_MEET) + clusterProcessGossipSection(hdr,link); + + /* Anyway reply with a PONG */ + clusterSendPing(link,CLUSTERMSG_TYPE_PONG); + } + + /* PING, PONG, MEET: process config information. */ + if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_PONG || + type == CLUSTERMSG_TYPE_MEET) + { + redisLog(REDIS_DEBUG,"%s packet received: %p", + type == CLUSTERMSG_TYPE_PING ? "ping" : "pong", + (void*)link->node); + if (link->node) { + if (nodeInHandshake(link->node)) { + /* If we already have this node, try to change the + * IP/port of the node with the new one. */ + if (sender) { + redisLog(REDIS_VERBOSE, + "Handshake: we already know node %.40s, " + "updating the address if needed.", sender->name); + if (nodeUpdateAddressIfNeeded(sender,link,ntohs(hdr->port))) + { + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_UPDATE_STATE); + } + /* Free this node as we already have it. This will + * cause the link to be freed as well. */ + clusterDelNode(link->node); + return 0; + } + + /* First thing to do is replacing the random name with the + * right node name if this was a handshake stage. */ + clusterRenameNode(link->node, hdr->sender); + redisLog(REDIS_DEBUG,"Handshake with node %.40s completed.", + link->node->name); + link->node->flags &= ~REDIS_NODE_HANDSHAKE; + link->node->flags |= flags&(REDIS_NODE_MASTER|REDIS_NODE_SLAVE); + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); + } else if (memcmp(link->node->name,hdr->sender, + REDIS_CLUSTER_NAMELEN) != 0) + { + /* If the reply has a non matching node ID we + * disconnect this node and set it as not having an associated + * address. */ + redisLog(REDIS_DEBUG,"PONG contains mismatching sender ID"); + link->node->flags |= REDIS_NODE_NOADDR; + link->node->ip[0] = '\0'; + link->node->port = 0; + freeClusterLink(link); + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); + return 0; + } + } + + /* Update the node address if it changed. */ + if (sender && type == CLUSTERMSG_TYPE_PING && + !nodeInHandshake(sender) && + nodeUpdateAddressIfNeeded(sender,link,ntohs(hdr->port))) + { + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_UPDATE_STATE); + } + + /* Update our info about the node */ + if (link->node && type == CLUSTERMSG_TYPE_PONG) { + link->node->pong_received = mstime(); + link->node->ping_sent = 0; + + /* The PFAIL condition can be reversed without external + * help if it is momentary (that is, if it does not + * turn into a FAIL state). + * + * The FAIL condition is also reversible under specific + * conditions detected by clearNodeFailureIfNeeded(). */ + if (nodeTimedOut(link->node)) { + link->node->flags &= ~REDIS_NODE_PFAIL; + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_UPDATE_STATE); + } else if (nodeFailed(link->node)) { + clearNodeFailureIfNeeded(link->node); + } + } + + /* Check for role switch: slave -> master or master -> slave. */ + if (sender) { + if (!memcmp(hdr->slaveof,REDIS_NODE_NULL_NAME, + sizeof(hdr->slaveof))) + { + /* Node is a master. */ + clusterSetNodeAsMaster(sender); + } else { + /* Node is a slave. */ + clusterNode *master = clusterLookupNode(hdr->slaveof); + + if (nodeIsMaster(sender)) { + /* Master turned into a slave! Reconfigure the node. */ + clusterDelNodeSlots(sender); + sender->flags &= ~REDIS_NODE_MASTER; + sender->flags |= REDIS_NODE_SLAVE; + + /* Remove the list of slaves from the node. */ + if (sender->numslaves) clusterNodeResetSlaves(sender); + + /* Update config and state. */ + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_UPDATE_STATE); + } + + /* Master node changed for this slave? */ + if (master && sender->slaveof != master) { + if (sender->slaveof) + clusterNodeRemoveSlave(sender->slaveof,sender); + clusterNodeAddSlave(master,sender); + sender->slaveof = master; + + /* Update config. */ + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG); + } + } + } + + /* Update our info about served slots. + * + * Note: this MUST happen after we update the master/slave state + * so that REDIS_NODE_MASTER flag will be set. */ + + /* Many checks are only needed if the set of served slots this + * instance claims is different compared to the set of slots we have + * for it. Check this ASAP to avoid other computational expansive + * checks later. */ + clusterNode *sender_master = NULL; /* Sender or its master if slave. */ + int dirty_slots = 0; /* Sender claimed slots don't match my view? */ + + if (sender) { + sender_master = nodeIsMaster(sender) ? sender : sender->slaveof; + if (sender_master) { + dirty_slots = memcmp(sender_master->slots, + hdr->myslots,sizeof(hdr->myslots)) != 0; + } + } + + /* 1) If the sender of the message is a master, and we detected that + * the set of slots it claims changed, scan the slots to see if we + * need to update our configuration. */ + if (sender && nodeIsMaster(sender) && dirty_slots) + clusterUpdateSlotsConfigWith(sender,senderConfigEpoch,hdr->myslots); + + /* 2) We also check for the reverse condition, that is, the sender + * claims to serve slots we know are served by a master with a + * greater configEpoch. If this happens we inform the sender. + * + * This is useful because sometimes after a partition heals, a + * reappearing master may be the last one to claim a given set of + * hash slots, but with a configuration that other instances know to + * be deprecated. Example: + * + * A and B are master and slave for slots 1,2,3. + * A is partitioned away, B gets promoted. + * B is partitioned away, and A returns available. + * + * Usually B would PING A publishing its set of served slots and its + * configEpoch, but because of the partition B can't inform A of the + * new configuration, so other nodes that have an updated table must + * do it. In this way A will stop to act as a master (or can try to + * failover if there are the conditions to win the election). */ + if (sender && dirty_slots) { + int j; + + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + if (bitmapTestBit(hdr->myslots,j)) { + if (server.cluster->slots[j] == sender || + server.cluster->slots[j] == NULL) continue; + if (server.cluster->slots[j]->configEpoch > + senderConfigEpoch) + { + redisLog(REDIS_VERBOSE, + "Node %.40s has old slots configuration, sending " + "an UPDATE message about %.40s", + sender->name, server.cluster->slots[j]->name); + clusterSendUpdate(sender->link, + server.cluster->slots[j]); + + /* TODO: instead of exiting the loop send every other + * UPDATE packet for other nodes that are the new owner + * of sender's slots. */ + break; + } + } + } + } + + /* If our config epoch collides with the sender's try to fix + * the problem. */ + if (sender && + nodeIsMaster(myself) && nodeIsMaster(sender) && + senderConfigEpoch == myself->configEpoch) + { + clusterHandleConfigEpochCollision(sender); + } + + /* Get info from the gossip section */ + if (sender) clusterProcessGossipSection(hdr,link); + } else if (type == CLUSTERMSG_TYPE_FAIL) { + clusterNode *failing; + + if (sender) { + failing = clusterLookupNode(hdr->data.fail.about.nodename); + if (failing && + !(failing->flags & (REDIS_NODE_FAIL|REDIS_NODE_MYSELF))) + { + redisLog(REDIS_NOTICE, + "FAIL message received from %.40s about %.40s", + hdr->sender, hdr->data.fail.about.nodename); + failing->flags |= REDIS_NODE_FAIL; + failing->fail_time = mstime(); + failing->flags &= ~REDIS_NODE_PFAIL; + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_UPDATE_STATE); + } + } else { + redisLog(REDIS_NOTICE, + "Ignoring FAIL message from unknown node %.40s about %.40s", + hdr->sender, hdr->data.fail.about.nodename); + } + } else if (type == CLUSTERMSG_TYPE_PUBLISH) { + robj *channel, *message; + uint32_t channel_len, message_len; + + /* Don't bother creating useless objects if there are no + * Pub/Sub subscribers. */ + if (dictSize(server.pubsub_channels) || + listLength(server.pubsub_patterns)) + { + channel_len = ntohl(hdr->data.publish.msg.channel_len); + message_len = ntohl(hdr->data.publish.msg.message_len); + channel = createStringObject( + (char*)hdr->data.publish.msg.bulk_data,channel_len); + message = createStringObject( + (char*)hdr->data.publish.msg.bulk_data+channel_len, + message_len); + pubsubPublishMessage(channel,message); + decrRefCount(channel); + decrRefCount(message); + } + } else if (type == CLUSTERMSG_TYPE_FAILOVER_AUTH_REQUEST) { + if (!sender) return 1; /* We don't know that node. */ + clusterSendFailoverAuthIfNeeded(sender,hdr); + } else if (type == CLUSTERMSG_TYPE_FAILOVER_AUTH_ACK) { + if (!sender) return 1; /* We don't know that node. */ + /* We consider this vote only if the sender is a master serving + * a non zero number of slots, and its currentEpoch is greater or + * equal to epoch where this node started the election. */ + if (nodeIsMaster(sender) && sender->numslots > 0 && + senderCurrentEpoch >= server.cluster->failover_auth_epoch) + { + server.cluster->failover_auth_count++; + /* Maybe we reached a quorum here, set a flag to make sure + * we check ASAP. */ + clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_FAILOVER); + } + } else if (type == CLUSTERMSG_TYPE_MFSTART) { + /* This message is acceptable only if I'm a master and the sender + * is one of my slaves. */ + if (!sender || sender->slaveof != myself) return 1; + /* Manual failover requested from slaves. Initialize the state + * accordingly. */ + resetManualFailover(); + server.cluster->mf_end = mstime() + REDIS_CLUSTER_MF_TIMEOUT; + server.cluster->mf_slave = sender; + pauseClients(mstime()+(REDIS_CLUSTER_MF_TIMEOUT*2)); + redisLog(REDIS_WARNING,"Manual failover requested by slave %.40s.", + sender->name); + } else if (type == CLUSTERMSG_TYPE_UPDATE) { + clusterNode *n; /* The node the update is about. */ + uint64_t reportedConfigEpoch = + ntohu64(hdr->data.update.nodecfg.configEpoch); + + if (!sender) return 1; /* We don't know the sender. */ + n = clusterLookupNode(hdr->data.update.nodecfg.nodename); + if (!n) return 1; /* We don't know the reported node. */ + if (n->configEpoch >= reportedConfigEpoch) return 1; /* Nothing new. */ + + /* If in our current config the node is a slave, set it as a master. */ + if (nodeIsSlave(n)) clusterSetNodeAsMaster(n); + + /* Update the node's configEpoch. */ + n->configEpoch = reportedConfigEpoch; + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_FSYNC_CONFIG); + + /* Check the bitmap of served slots and update our + * config accordingly. */ + clusterUpdateSlotsConfigWith(n,reportedConfigEpoch, + hdr->data.update.nodecfg.slots); + } else { + redisLog(REDIS_WARNING,"Received unknown packet type: %d", type); + } + return 1; +} + +/* This function is called when we detect the link with this node is lost. + We set the node as no longer connected. The Cluster Cron will detect + this connection and will try to get it connected again. + + Instead if the node is a temporary node used to accept a query, we + completely free the node on error. */ +void handleLinkIOError(clusterLink *link) { + freeClusterLink(link); +} + +/* Send data. This is handled using a trivial send buffer that gets + * consumed by write(). We don't try to optimize this for speed too much + * as this is a very low traffic channel. */ +void clusterWriteHandler(aeEventLoop *el, int fd, void *privdata, int mask) { + clusterLink *link = (clusterLink*) privdata; + ssize_t nwritten; + REDIS_NOTUSED(el); + REDIS_NOTUSED(mask); + + nwritten = write(fd, link->sndbuf, sdslen(link->sndbuf)); + if (nwritten <= 0) { + redisLog(REDIS_DEBUG,"I/O error writing to node link: %s", + strerror(errno)); + handleLinkIOError(link); + return; + } + sdsrange(link->sndbuf,nwritten,-1); + if (sdslen(link->sndbuf) == 0) + aeDeleteFileEvent(server.el, link->fd, AE_WRITABLE); +} + +/* Read data. Try to read the first field of the header first to check the + * full length of the packet. When a whole packet is in memory this function + * will call the function to process the packet. And so forth. */ +void clusterReadHandler(aeEventLoop *el, int fd, void *privdata, int mask) { + char buf[sizeof(clusterMsg)]; + ssize_t nread; + clusterMsg *hdr; + clusterLink *link = (clusterLink*) privdata; + unsigned int readlen, rcvbuflen; + REDIS_NOTUSED(el); + REDIS_NOTUSED(mask); + + while(1) { /* Read as long as there is data to read. */ + rcvbuflen = sdslen(link->rcvbuf); + if (rcvbuflen < 8) { + /* First, obtain the first 8 bytes to get the full message + * length. */ + readlen = 8 - rcvbuflen; + } else { + /* Finally read the full message. */ + hdr = (clusterMsg*) link->rcvbuf; + if (rcvbuflen == 8) { + /* Perform some sanity check on the message signature + * and length. */ + if (memcmp(hdr->sig,"RCmb",4) != 0 || + ntohl(hdr->totlen) < CLUSTERMSG_MIN_LEN) + { + redisLog(REDIS_WARNING, + "Bad message length or signature received " + "from Cluster bus."); + handleLinkIOError(link); + return; + } + } + readlen = ntohl(hdr->totlen) - rcvbuflen; + if (readlen > sizeof(buf)) readlen = sizeof(buf); + } + + nread = read(fd,buf,readlen); + if (nread == -1 && errno == EAGAIN) return; /* No more data ready. */ + + if (nread <= 0) { + /* I/O error... */ + redisLog(REDIS_DEBUG,"I/O error reading from node link: %s", + (nread == 0) ? "connection closed" : strerror(errno)); + handleLinkIOError(link); + return; + } else { + /* Read data and recast the pointer to the new buffer. */ + link->rcvbuf = sdscatlen(link->rcvbuf,buf,nread); + hdr = (clusterMsg*) link->rcvbuf; + rcvbuflen += nread; + } + + /* Total length obtained? Process this packet. */ + if (rcvbuflen >= 8 && rcvbuflen == ntohl(hdr->totlen)) { + if (clusterProcessPacket(link)) { + sdsfree(link->rcvbuf); + link->rcvbuf = sdsempty(); + } else { + return; /* Link no longer valid. */ + } + } + } +} + +/* Put stuff into the send buffer. + * + * It is guaranteed that this function will never have as a side effect + * the link to be invalidated, so it is safe to call this function + * from event handlers that will do stuff with the same link later. */ +void clusterSendMessage(clusterLink *link, unsigned char *msg, size_t msglen) { + if (sdslen(link->sndbuf) == 0 && msglen != 0) + aeCreateFileEvent(server.el,link->fd,AE_WRITABLE, + clusterWriteHandler,link); + + link->sndbuf = sdscatlen(link->sndbuf, msg, msglen); + server.cluster->stats_bus_messages_sent++; +} + +/* Send a message to all the nodes that are part of the cluster having + * a connected link. + * + * It is guaranteed that this function will never have as a side effect + * some node->link to be invalidated, so it is safe to call this function + * from event handlers that will do stuff with node links later. */ +void clusterBroadcastMessage(void *buf, size_t len) { + dictIterator *di; + dictEntry *de; + + di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + + if (!node->link) continue; + if (node->flags & (REDIS_NODE_MYSELF|REDIS_NODE_HANDSHAKE)) + continue; + clusterSendMessage(node->link,buf,len); + } + dictReleaseIterator(di); +} + +/* Build the message header. hdr must point to a buffer at least + * sizeof(clusterMsg) in bytes. */ +void clusterBuildMessageHdr(clusterMsg *hdr, int type) { + int totlen = 0; + uint64_t offset; + clusterNode *master; + + /* If this node is a master, we send its slots bitmap and configEpoch. + * If this node is a slave we send the master's information instead (the + * node is flagged as slave so the receiver knows that it is NOT really + * in charge for this slots. */ + master = (nodeIsSlave(myself) && myself->slaveof) ? + myself->slaveof : myself; + + memset(hdr,0,sizeof(*hdr)); + hdr->ver = htons(CLUSTER_PROTO_VER); + hdr->sig[0] = 'R'; + hdr->sig[1] = 'C'; + hdr->sig[2] = 'm'; + hdr->sig[3] = 'b'; + hdr->type = htons(type); + memcpy(hdr->sender,myself->name,REDIS_CLUSTER_NAMELEN); + + memcpy(hdr->myslots,master->slots,sizeof(hdr->myslots)); + memset(hdr->slaveof,0,REDIS_CLUSTER_NAMELEN); + if (myself->slaveof != NULL) + memcpy(hdr->slaveof,myself->slaveof->name, REDIS_CLUSTER_NAMELEN); + hdr->port = htons(server.port); + hdr->flags = htons(myself->flags); + hdr->state = server.cluster->state; + + /* Set the currentEpoch and configEpochs. */ + hdr->currentEpoch = htonu64(server.cluster->currentEpoch); + hdr->configEpoch = htonu64(master->configEpoch); + + /* Set the replication offset. */ + if (nodeIsSlave(myself)) + offset = replicationGetSlaveOffset(); + else + offset = server.master_repl_offset; + hdr->offset = htonu64(offset); + + /* Set the message flags. */ + if (nodeIsMaster(myself) && server.cluster->mf_end) + hdr->mflags[0] |= CLUSTERMSG_FLAG0_PAUSED; + + /* Compute the message length for certain messages. For other messages + * this is up to the caller. */ + if (type == CLUSTERMSG_TYPE_FAIL) { + totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + totlen += sizeof(clusterMsgDataFail); + } else if (type == CLUSTERMSG_TYPE_UPDATE) { + totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + totlen += sizeof(clusterMsgDataUpdate); + } + hdr->totlen = htonl(totlen); + /* For PING, PONG, and MEET, fixing the totlen field is up to the caller. */ +} + +/* Send a PING or PONG packet to the specified node, making sure to add enough + * gossip informations. */ +void clusterSendPing(clusterLink *link, int type) { + unsigned char *buf; + clusterMsg *hdr; + int gossipcount = 0; /* Number of gossip sections added so far. */ + int wanted; /* Number of gossip sections we want to append if possible. */ + int totlen; /* Total packet length. */ + /* freshnodes is the max number of nodes we can hope to append at all: + * nodes available minus two (ourself and the node we are sending the + * message to). However practically there may be less valid nodes since + * nodes in handshake state, disconnected, are not considered. */ + int freshnodes = dictSize(server.cluster->nodes)-2; + + /* How many gossip sections we want to add? 1/10 of the number of nodes + * and anyway at least 3. Why 1/10? + * + * If we have N masters, with N/10 entries, and we consider that in + * node_timeout we exchange with each other node at least 4 packets + * (we ping in the worst case in node_timeout/2 time, and we also + * receive two pings from the host), we have a total of 8 packets + * in the node_timeout*2 falure reports validity time. So we have + * that, for a single PFAIL node, we can expect to receive the following + * number of failure reports (in the specified window of time): + * + * PROB * GOSSIP_ENTRIES_PER_PACKET * TOTAL_PACKETS: + * + * PROB = probability of being featured in a single gossip entry, + * which is 1 / NUM_OF_NODES. + * ENTRIES = 10. + * TOTAL_PACKETS = 2 * 4 * NUM_OF_MASTERS. + * + * If we assume we have just masters (so num of nodes and num of masters + * is the same), with 1/10 we always get over the majority, and specifically + * 80% of the number of nodes, to account for many masters failing at the + * same time. + * + * Since we have non-voting slaves that lower the probability of an entry + * to feature our node, we set the number of entires per packet as + * 10% of the total nodes we have. */ + wanted = floor(dictSize(server.cluster->nodes)/10); + if (wanted < 3) wanted = 3; + if (wanted > freshnodes) wanted = freshnodes; + + /* Compute the maxium totlen to allocate our buffer. We'll fix the totlen + * later according to the number of gossip sections we really were able + * to put inside the packet. */ + totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + totlen += (sizeof(clusterMsgDataGossip)*wanted); + /* Note: clusterBuildMessageHdr() expects the buffer to be always at least + * sizeof(clusterMsg) or more. */ + if (totlen < (int)sizeof(clusterMsg)) totlen = sizeof(clusterMsg); + buf = zcalloc(totlen); + hdr = (clusterMsg*) buf; + + /* Populate the header. */ + if (link->node && type == CLUSTERMSG_TYPE_PING) + link->node->ping_sent = mstime(); + clusterBuildMessageHdr(hdr,type); + + /* Populate the gossip fields */ + int maxiterations = wanted*3; + while(freshnodes > 0 && gossipcount < wanted && maxiterations--) { + dictEntry *de = dictGetRandomKey(server.cluster->nodes); + clusterNode *this = dictGetVal(de); + clusterMsgDataGossip *gossip; + int j; + + /* Don't include this node: the whole packet header is about us + * already, so we just gossip about other nodes. */ + if (this == myself) continue; + + /* Give a bias to FAIL/PFAIL nodes. */ + if (maxiterations > wanted*2 && + !(this->flags & (REDIS_NODE_PFAIL|REDIS_NODE_FAIL))) + continue; + + /* In the gossip section don't include: + * 1) Nodes in HANDSHAKE state. + * 3) Nodes with the NOADDR flag set. + * 4) Disconnected nodes if they don't have configured slots. + */ + if (this->flags & (REDIS_NODE_HANDSHAKE|REDIS_NODE_NOADDR) || + (this->link == NULL && this->numslots == 0)) + { + freshnodes--; /* Tecnically not correct, but saves CPU. */ + continue; + } + + /* Check if we already added this node */ + for (j = 0; j < gossipcount; j++) { + if (memcmp(hdr->data.ping.gossip[j].nodename,this->name, + REDIS_CLUSTER_NAMELEN) == 0) break; + } + if (j != gossipcount) continue; + + /* Add it */ + freshnodes--; + gossip = &(hdr->data.ping.gossip[gossipcount]); + memcpy(gossip->nodename,this->name,REDIS_CLUSTER_NAMELEN); + gossip->ping_sent = htonl(this->ping_sent); + gossip->pong_received = htonl(this->pong_received); + memcpy(gossip->ip,this->ip,sizeof(this->ip)); + gossip->port = htons(this->port); + gossip->flags = htons(this->flags); + gossip->notused1 = 0; + gossip->notused2 = 0; + gossipcount++; + } + + /* Ready to send... fix the totlen fiend and queue the message in the + * output buffer. */ + totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + totlen += (sizeof(clusterMsgDataGossip)*gossipcount); + hdr->count = htons(gossipcount); + hdr->totlen = htonl(totlen); + clusterSendMessage(link,buf,totlen); + zfree(buf); +} + +/* Send a PONG packet to every connected node that's not in handshake state + * and for which we have a valid link. + * + * In Redis Cluster pongs are not used just for failure detection, but also + * to carry important configuration information. So broadcasting a pong is + * useful when something changes in the configuration and we want to make + * the cluster aware ASAP (for instance after a slave promotion). + * + * The 'target' argument specifies the receiving instances using the + * defines below: + * + * CLUSTER_BROADCAST_ALL -> All known instances. + * CLUSTER_BROADCAST_LOCAL_SLAVES -> All slaves in my master-slaves ring. + */ +#define CLUSTER_BROADCAST_ALL 0 +#define CLUSTER_BROADCAST_LOCAL_SLAVES 1 +void clusterBroadcastPong(int target) { + dictIterator *di; + dictEntry *de; + + di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + + if (!node->link) continue; + if (node == myself || nodeInHandshake(node)) continue; + if (target == CLUSTER_BROADCAST_LOCAL_SLAVES) { + int local_slave = + nodeIsSlave(node) && node->slaveof && + (node->slaveof == myself || node->slaveof == myself->slaveof); + if (!local_slave) continue; + } + clusterSendPing(node->link,CLUSTERMSG_TYPE_PONG); + } + dictReleaseIterator(di); +} + +/* Send a PUBLISH message. + * + * If link is NULL, then the message is broadcasted to the whole cluster. */ +void clusterSendPublish(clusterLink *link, robj *channel, robj *message) { + unsigned char buf[sizeof(clusterMsg)], *payload; + clusterMsg *hdr = (clusterMsg*) buf; + uint32_t totlen; + uint32_t channel_len, message_len; + + channel = getDecodedObject(channel); + message = getDecodedObject(message); + channel_len = sdslen(channel->ptr); + message_len = sdslen(message->ptr); + + clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_PUBLISH); + totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + totlen += sizeof(clusterMsgDataPublish) - 8 + channel_len + message_len; + + hdr->data.publish.msg.channel_len = htonl(channel_len); + hdr->data.publish.msg.message_len = htonl(message_len); + hdr->totlen = htonl(totlen); + + /* Try to use the local buffer if possible */ + if (totlen < sizeof(buf)) { + payload = buf; + } else { + payload = zmalloc(totlen); + memcpy(payload,hdr,sizeof(*hdr)); + hdr = (clusterMsg*) payload; + } + memcpy(hdr->data.publish.msg.bulk_data,channel->ptr,sdslen(channel->ptr)); + memcpy(hdr->data.publish.msg.bulk_data+sdslen(channel->ptr), + message->ptr,sdslen(message->ptr)); + + if (link) + clusterSendMessage(link,payload,totlen); + else + clusterBroadcastMessage(payload,totlen); + + decrRefCount(channel); + decrRefCount(message); + if (payload != buf) zfree(payload); +} + +/* Send a FAIL message to all the nodes we are able to contact. + * The FAIL message is sent when we detect that a node is failing + * (REDIS_NODE_PFAIL) and we also receive a gossip confirmation of this: + * we switch the node state to REDIS_NODE_FAIL and ask all the other + * nodes to do the same ASAP. */ +void clusterSendFail(char *nodename) { + unsigned char buf[sizeof(clusterMsg)]; + clusterMsg *hdr = (clusterMsg*) buf; + + clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_FAIL); + memcpy(hdr->data.fail.about.nodename,nodename,REDIS_CLUSTER_NAMELEN); + clusterBroadcastMessage(buf,ntohl(hdr->totlen)); +} + +/* Send an UPDATE message to the specified link carrying the specified 'node' + * slots configuration. The node name, slots bitmap, and configEpoch info + * are included. */ +void clusterSendUpdate(clusterLink *link, clusterNode *node) { + unsigned char buf[sizeof(clusterMsg)]; + clusterMsg *hdr = (clusterMsg*) buf; + + if (link == NULL) return; + clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_UPDATE); + memcpy(hdr->data.update.nodecfg.nodename,node->name,REDIS_CLUSTER_NAMELEN); + hdr->data.update.nodecfg.configEpoch = htonu64(node->configEpoch); + memcpy(hdr->data.update.nodecfg.slots,node->slots,sizeof(node->slots)); + clusterSendMessage(link,buf,ntohl(hdr->totlen)); +} + +/* ----------------------------------------------------------------------------- + * CLUSTER Pub/Sub support + * + * For now we do very little, just propagating PUBLISH messages across the whole + * cluster. In the future we'll try to get smarter and avoiding propagating those + * messages to hosts without receives for a given channel. + * -------------------------------------------------------------------------- */ +void clusterPropagatePublish(robj *channel, robj *message) { + clusterSendPublish(NULL, channel, message); +} + +/* ----------------------------------------------------------------------------- + * SLAVE node specific functions + * -------------------------------------------------------------------------- */ + +/* This function sends a FAILOVE_AUTH_REQUEST message to every node in order to + * see if there is the quorum for this slave instance to failover its failing + * master. + * + * Note that we send the failover request to everybody, master and slave nodes, + * but only the masters are supposed to reply to our query. */ +void clusterRequestFailoverAuth(void) { + unsigned char buf[sizeof(clusterMsg)]; + clusterMsg *hdr = (clusterMsg*) buf; + uint32_t totlen; + + clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_FAILOVER_AUTH_REQUEST); + /* If this is a manual failover, set the CLUSTERMSG_FLAG0_FORCEACK bit + * in the header to communicate the nodes receiving the message that + * they should authorized the failover even if the master is working. */ + if (server.cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_FORCEACK; + totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + hdr->totlen = htonl(totlen); + clusterBroadcastMessage(buf,totlen); +} + +/* Send a FAILOVER_AUTH_ACK message to the specified node. */ +void clusterSendFailoverAuth(clusterNode *node) { + unsigned char buf[sizeof(clusterMsg)]; + clusterMsg *hdr = (clusterMsg*) buf; + uint32_t totlen; + + if (!node->link) return; + clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_FAILOVER_AUTH_ACK); + totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + hdr->totlen = htonl(totlen); + clusterSendMessage(node->link,buf,totlen); +} + +/* Send a MFSTART message to the specified node. */ +void clusterSendMFStart(clusterNode *node) { + unsigned char buf[sizeof(clusterMsg)]; + clusterMsg *hdr = (clusterMsg*) buf; + uint32_t totlen; + + if (!node->link) return; + clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_MFSTART); + totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + hdr->totlen = htonl(totlen); + clusterSendMessage(node->link,buf,totlen); +} + +/* Vote for the node asking for our vote if there are the conditions. */ +void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { + clusterNode *master = node->slaveof; + uint64_t requestCurrentEpoch = ntohu64(request->currentEpoch); + uint64_t requestConfigEpoch = ntohu64(request->configEpoch); + unsigned char *claimed_slots = request->myslots; + int force_ack = request->mflags[0] & CLUSTERMSG_FLAG0_FORCEACK; + int j; + + /* IF we are not a master serving at least 1 slot, we don't have the + * right to vote, as the cluster size in Redis Cluster is the number + * of masters serving at least one slot, and quorum is the cluster + * size + 1 */ + if (nodeIsSlave(myself) || myself->numslots == 0) return; + + /* Request epoch must be >= our currentEpoch. + * Note that it is impossible for it to actually be greater since + * our currentEpoch was updated as a side effect of receiving this + * request, if the request epoch was greater. */ + if (requestCurrentEpoch < server.cluster->currentEpoch) { + redisLog(REDIS_WARNING, + "Failover auth denied to %.40s: reqEpoch (%llu) < curEpoch(%llu)", + node->name, + (unsigned long long) requestCurrentEpoch, + (unsigned long long) server.cluster->currentEpoch); + return; + } + + /* I already voted for this epoch? Return ASAP. */ + if (server.cluster->lastVoteEpoch == server.cluster->currentEpoch) { + redisLog(REDIS_WARNING, + "Failover auth denied to %.40s: already voted for epoch %llu", + node->name, + (unsigned long long) server.cluster->currentEpoch); + return; + } + + /* Node must be a slave and its master down. + * The master can be non failing if the request is flagged + * with CLUSTERMSG_FLAG0_FORCEACK (manual failover). */ + if (nodeIsMaster(node) || master == NULL || + (!nodeFailed(master) && !force_ack)) + { + if (nodeIsMaster(node)) { + redisLog(REDIS_WARNING, + "Failover auth denied to %.40s: it is a master node", + node->name); + } else if (master == NULL) { + redisLog(REDIS_WARNING, + "Failover auth denied to %.40s: I don't know its master", + node->name); + } else if (!nodeFailed(master)) { + redisLog(REDIS_WARNING, + "Failover auth denied to %.40s: its master is up", + node->name); + } + return; + } + + /* We did not voted for a slave about this master for two + * times the node timeout. This is not strictly needed for correctness + * of the algorithm but makes the base case more linear. */ + if (mstime() - node->slaveof->voted_time < server.cluster_node_timeout * 2) + { + redisLog(REDIS_WARNING, + "Failover auth denied to %.40s: " + "can't vote about this master before %lld milliseconds", + node->name, + (long long) ((server.cluster_node_timeout*2)- + (mstime() - node->slaveof->voted_time))); + return; + } + + /* The slave requesting the vote must have a configEpoch for the claimed + * slots that is >= the one of the masters currently serving the same + * slots in the current configuration. */ + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + if (bitmapTestBit(claimed_slots, j) == 0) continue; + if (server.cluster->slots[j] == NULL || + server.cluster->slots[j]->configEpoch <= requestConfigEpoch) + { + continue; + } + /* If we reached this point we found a slot that in our current slots + * is served by a master with a greater configEpoch than the one claimed + * by the slave requesting our vote. Refuse to vote for this slave. */ + redisLog(REDIS_WARNING, + "Failover auth denied to %.40s: " + "slot %d epoch (%llu) > reqEpoch (%llu)", + node->name, j, + (unsigned long long) server.cluster->slots[j]->configEpoch, + (unsigned long long) requestConfigEpoch); + return; + } + + /* We can vote for this slave. */ + clusterSendFailoverAuth(node); + server.cluster->lastVoteEpoch = server.cluster->currentEpoch; + node->slaveof->voted_time = mstime(); + redisLog(REDIS_WARNING, "Failover auth granted to %.40s for epoch %llu", + node->name, (unsigned long long) server.cluster->currentEpoch); +} + +/* This function returns the "rank" of this instance, a slave, in the context + * of its master-slaves ring. The rank of the slave is given by the number of + * other slaves for the same master that have a better replication offset + * compared to the local one (better means, greater, so they claim more data). + * + * A slave with rank 0 is the one with the greatest (most up to date) + * replication offset, and so forth. Note that because how the rank is computed + * multiple slaves may have the same rank, in case they have the same offset. + * + * The slave rank is used to add a delay to start an election in order to + * get voted and replace a failing master. Slaves with better replication + * offsets are more likely to win. */ +int clusterGetSlaveRank(void) { + long long myoffset; + int j, rank = 0; + clusterNode *master; + + redisAssert(nodeIsSlave(myself)); + master = myself->slaveof; + if (master == NULL) return 0; /* Never called by slaves without master. */ + + myoffset = replicationGetSlaveOffset(); + for (j = 0; j < master->numslaves; j++) + if (master->slaves[j] != myself && + master->slaves[j]->repl_offset > myoffset) rank++; + return rank; +} + +/* This function is called by clusterHandleSlaveFailover() in order to + * let the slave log why it is not able to failover. Sometimes there are + * not the conditions, but since the failover function is called again and + * again, we can't log the same things continuously. + * + * This function works by logging only if a given set of conditions are + * true: + * + * 1) The reason for which the failover can't be initiated changed. + * The reasons also include a NONE reason we reset the state to + * when the slave finds that its master is fine (no FAIL flag). + * 2) Also, the log is emitted again if the master is still down and + * the reason for not failing over is still the same, but more than + * REDIS_CLUSTER_CANT_FAILOVER_RELOG_PERIOD seconds elapsed. + * 3) Finally, the function only logs if the slave is down for more than + * five seconds + NODE_TIMEOUT. This way nothing is logged when a + * failover starts in a reasonable time. + * + * The function is called with the reason why the slave can't failover + * which is one of the integer macros REDIS_CLUSTER_CANT_FAILOVER_*. + * + * The function is guaranteed to be called only if 'myself' is a slave. */ +void clusterLogCantFailover(int reason) { + char *msg; + static time_t lastlog_time = 0; + mstime_t nolog_fail_time = server.cluster_node_timeout + 5000; + + /* Don't log if we have the same reason for some time. */ + if (reason == server.cluster->cant_failover_reason && + time(NULL)-lastlog_time < REDIS_CLUSTER_CANT_FAILOVER_RELOG_PERIOD) + return; + + server.cluster->cant_failover_reason = reason; + + /* We also don't emit any log if the master failed no long ago, the + * goal of this function is to log slaves in a stalled condition for + * a long time. */ + if (myself->slaveof && + nodeFailed(myself->slaveof) && + (mstime() - myself->slaveof->fail_time) < nolog_fail_time) return; + + switch(reason) { + case REDIS_CLUSTER_CANT_FAILOVER_DATA_AGE: + msg = "Disconnected from master for longer than allowed."; + break; + case REDIS_CLUSTER_CANT_FAILOVER_WAITING_DELAY: + msg = "Waiting the delay before I can start a new failover."; + break; + case REDIS_CLUSTER_CANT_FAILOVER_EXPIRED: + msg = "Failover attempt expired."; + break; + case REDIS_CLUSTER_CANT_FAILOVER_WAITING_VOTES: + msg = "Waiting for votes, but majority still not reached."; + break; + default: + msg = "Unknown reason code."; + break; + } + lastlog_time = time(NULL); + redisLog(REDIS_WARNING,"Currently unable to failover: %s", msg); +} + +/* This function implements the final part of automatic and manual failovers, + * where the slave grabs its master's hash slots, and propagates the new + * configuration. + * + * Note that it's up to the caller to be sure that the node got a new + * configuration epoch already. */ +void clusterFailoverReplaceYourMaster(void) { + int j; + clusterNode *oldmaster = myself->slaveof; + + if (nodeIsMaster(myself) || oldmaster == NULL) return; + + /* 1) Turn this node into a master. */ + clusterSetNodeAsMaster(myself); + replicationUnsetMaster(); + + /* 2) Claim all the slots assigned to our master. */ + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + if (clusterNodeGetSlotBit(oldmaster,j)) { + clusterDelSlot(j); + clusterAddSlot(myself,j); + } + } + + /* 3) Update state and save config. */ + clusterUpdateState(); + clusterSaveConfigOrDie(1); + + /* 4) Pong all the other nodes so that they can update the state + * accordingly and detect that we switched to master role. */ + clusterBroadcastPong(CLUSTER_BROADCAST_ALL); + + /* 5) If there was a manual failover in progress, clear the state. */ + resetManualFailover(); +} + +/* This function is called if we are a slave node and our master serving + * a non-zero amount of hash slots is in FAIL state. + * + * The gaol of this function is: + * 1) To check if we are able to perform a failover, is our data updated? + * 2) Try to get elected by masters. + * 3) Perform the failover informing all the other nodes. + */ +void clusterHandleSlaveFailover(void) { + mstime_t data_age; + mstime_t auth_age = mstime() - server.cluster->failover_auth_time; + int needed_quorum = (server.cluster->size / 2) + 1; + int manual_failover = server.cluster->mf_end != 0 && + server.cluster->mf_can_start; + mstime_t auth_timeout, auth_retry_time; + + server.cluster->todo_before_sleep &= ~CLUSTER_TODO_HANDLE_FAILOVER; + + /* Compute the failover timeout (the max time we have to send votes + * and wait for replies), and the failover retry time (the time to wait + * before trying to get voted again). + * + * Timeout is MIN(NODE_TIMEOUT*2,2000) milliseconds. + * Retry is two times the Timeout. + */ + auth_timeout = server.cluster_node_timeout*2; + if (auth_timeout < 2000) auth_timeout = 2000; + auth_retry_time = auth_timeout*2; + + /* Pre conditions to run the function, that must be met both in case + * of an automatic or manual failover: + * 1) We are a slave. + * 2) Our master is flagged as FAIL, or this is a manual failover. + * 3) It is serving slots. */ + if (nodeIsMaster(myself) || + myself->slaveof == NULL || + (!nodeFailed(myself->slaveof) && !manual_failover) || + myself->slaveof->numslots == 0) + { + /* There are no reasons to failover, so we set the reason why we + * are returning without failing over to NONE. */ + server.cluster->cant_failover_reason = REDIS_CLUSTER_CANT_FAILOVER_NONE; + return; + } + + /* Set data_age to the number of seconds we are disconnected from + * the master. */ + if (server.repl_state == REDIS_REPL_CONNECTED) { + data_age = (mstime_t)(server.unixtime - server.master->lastinteraction) + * 1000; + } else { + data_age = (mstime_t)(server.unixtime - server.repl_down_since) * 1000; + } + + /* Remove the node timeout from the data age as it is fine that we are + * disconnected from our master at least for the time it was down to be + * flagged as FAIL, that's the baseline. */ + if (data_age > server.cluster_node_timeout) + data_age -= server.cluster_node_timeout; + + /* Check if our data is recent enough according to the slave validity + * factor configured by the user. + * + * Check bypassed for manual failovers. */ + if (server.cluster_slave_validity_factor && + data_age > + (((mstime_t)server.repl_ping_slave_period * 1000) + + (server.cluster_node_timeout * server.cluster_slave_validity_factor))) + { + if (!manual_failover) { + clusterLogCantFailover(REDIS_CLUSTER_CANT_FAILOVER_DATA_AGE); + return; + } + } + + /* If the previous failover attempt timedout and the retry time has + * elapsed, we can setup a new one. */ + if (auth_age > auth_retry_time) { + server.cluster->failover_auth_time = mstime() + + 500 + /* Fixed delay of 500 milliseconds, let FAIL msg propagate. */ + random() % 500; /* Random delay between 0 and 500 milliseconds. */ + server.cluster->failover_auth_count = 0; + server.cluster->failover_auth_sent = 0; + server.cluster->failover_auth_rank = clusterGetSlaveRank(); + /* We add another delay that is proportional to the slave rank. + * Specifically 1 second * rank. This way slaves that have a probably + * less updated replication offset, are penalized. */ + server.cluster->failover_auth_time += + server.cluster->failover_auth_rank * 1000; + /* However if this is a manual failover, no delay is needed. */ + if (server.cluster->mf_end) { + server.cluster->failover_auth_time = mstime(); + server.cluster->failover_auth_rank = 0; + } + redisLog(REDIS_WARNING, + "Start of election delayed for %lld milliseconds " + "(rank #%d, offset %lld).", + server.cluster->failover_auth_time - mstime(), + server.cluster->failover_auth_rank, + replicationGetSlaveOffset()); + /* Now that we have a scheduled election, broadcast our offset + * to all the other slaves so that they'll updated their offsets + * if our offset is better. */ + clusterBroadcastPong(CLUSTER_BROADCAST_LOCAL_SLAVES); + return; + } + + /* It is possible that we received more updated offsets from other + * slaves for the same master since we computed our election delay. + * Update the delay if our rank changed. + * + * Not performed if this is a manual failover. */ + if (server.cluster->failover_auth_sent == 0 && + server.cluster->mf_end == 0) + { + int newrank = clusterGetSlaveRank(); + if (newrank > server.cluster->failover_auth_rank) { + long long added_delay = + (newrank - server.cluster->failover_auth_rank) * 1000; + server.cluster->failover_auth_time += added_delay; + server.cluster->failover_auth_rank = newrank; + redisLog(REDIS_WARNING, + "Slave rank updated to #%d, added %lld milliseconds of delay.", + newrank, added_delay); + } + } + + /* Return ASAP if we can't still start the election. */ + if (mstime() < server.cluster->failover_auth_time) { + clusterLogCantFailover(REDIS_CLUSTER_CANT_FAILOVER_WAITING_DELAY); + return; + } + + /* Return ASAP if the election is too old to be valid. */ + if (auth_age > auth_timeout) { + clusterLogCantFailover(REDIS_CLUSTER_CANT_FAILOVER_EXPIRED); + return; + } + + /* Ask for votes if needed. */ + if (server.cluster->failover_auth_sent == 0) { + server.cluster->currentEpoch++; + server.cluster->failover_auth_epoch = server.cluster->currentEpoch; + redisLog(REDIS_WARNING,"Starting a failover election for epoch %llu.", + (unsigned long long) server.cluster->currentEpoch); + clusterRequestFailoverAuth(); + server.cluster->failover_auth_sent = 1; + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| + CLUSTER_TODO_UPDATE_STATE| + CLUSTER_TODO_FSYNC_CONFIG); + return; /* Wait for replies. */ + } + + /* Check if we reached the quorum. */ + if (server.cluster->failover_auth_count >= needed_quorum) { + /* We have the quorum, we can finally failover the master. */ + + redisLog(REDIS_WARNING, + "Failover election won: I'm the new master."); + + /* Update my configEpoch to the epoch of the election. */ + if (myself->configEpoch < server.cluster->failover_auth_epoch) { + myself->configEpoch = server.cluster->failover_auth_epoch; + redisLog(REDIS_WARNING, + "configEpoch set to %llu after successful failover", + (unsigned long long) myself->configEpoch); + } + + /* Take responsability for the cluster slots. */ + clusterFailoverReplaceYourMaster(); + } else { + clusterLogCantFailover(REDIS_CLUSTER_CANT_FAILOVER_WAITING_VOTES); + } +} + +/* ----------------------------------------------------------------------------- + * CLUSTER slave migration + * + * Slave migration is the process that allows a slave of a master that is + * already covered by at least another slave, to "migrate" to a master that + * is orpaned, that is, left with no working slaves. + * -------------------------------------------------------------------------- */ + +/* This function is responsible to decide if this replica should be migrated + * to a different (orphaned) master. It is called by the clusterCron() function + * only if: + * + * 1) We are a slave node. + * 2) It was detected that there is at least one orphaned master in + * the cluster. + * 3) We are a slave of one of the masters with the greatest number of + * slaves. + * + * This checks are performed by the caller since it requires to iterate + * the nodes anyway, so we spend time into clusterHandleSlaveMigration() + * if definitely needed. + * + * The fuction is called with a pre-computed max_slaves, that is the max + * number of working (not in FAIL state) slaves for a single master. + * + * Additional conditions for migration are examined inside the function. + */ +void clusterHandleSlaveMigration(int max_slaves) { + int j, okslaves = 0; + clusterNode *mymaster = myself->slaveof, *target = NULL, *candidate = NULL; + dictIterator *di; + dictEntry *de; + + /* Step 1: Don't migrate if the cluster state is not ok. */ + if (server.cluster->state != REDIS_CLUSTER_OK) return; + + /* Step 2: Don't migrate if my master will not be left with at least + * 'migration-barrier' slaves after my migration. */ + if (mymaster == NULL) return; + for (j = 0; j < mymaster->numslaves; j++) + if (!nodeFailed(mymaster->slaves[j]) && + !nodeTimedOut(mymaster->slaves[j])) okslaves++; + if (okslaves <= server.cluster_migration_barrier) return; + + /* Step 3: Idenitfy a candidate for migration, and check if among the + * masters with the greatest number of ok slaves, I'm the one with the + * smaller node ID. + * + * Note that this means that eventually a replica migration will occurr + * since slaves that are reachable again always have their FAIL flag + * cleared. At the same time this does not mean that there are no + * race conditions possible (two slaves migrating at the same time), but + * this is extremely unlikely to happen, and harmless. */ + candidate = myself; + di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + int okslaves; + + /* Only iterate over working masters. */ + if (nodeIsSlave(node) || nodeFailed(node)) continue; + /* If this master never had slaves so far, don't migrate. We want + * to migrate to a master that remained orphaned, not masters that + * were never configured to have slaves. */ + if (node->numslaves == 0) continue; + okslaves = clusterCountNonFailingSlaves(node); + + if (okslaves == 0 && target == NULL && node->numslots > 0) + target = node; + + if (okslaves == max_slaves) { + for (j = 0; j < node->numslaves; j++) { + if (memcmp(node->slaves[j]->name, + candidate->name, + REDIS_CLUSTER_NAMELEN) < 0) + { + candidate = node->slaves[j]; + } + } + } + } + dictReleaseIterator(di); + + /* Step 4: perform the migration if there is a target, and if I'm the + * candidate. */ + if (target && candidate == myself) { + redisLog(REDIS_WARNING,"Migrating to orphaned master %.40s", + target->name); + clusterSetMaster(target); + } +} + +/* ----------------------------------------------------------------------------- + * CLUSTER manual failover + * + * This are the important steps performed by slaves during a manual failover: + * 1) User send CLUSTER FAILOVER command. The failover state is initialized + * setting mf_end to the millisecond unix time at which we'll abort the + * attempt. + * 2) Slave sends a MFSTART message to the master requesting to pause clients + * for two times the manual failover timeout REDIS_CLUSTER_MF_TIMEOUT. + * When master is paused for manual failover, it also starts to flag + * packets with CLUSTERMSG_FLAG0_PAUSED. + * 3) Slave waits for master to send its replication offset flagged as PAUSED. + * 4) If slave received the offset from the master, and its offset matches, + * mf_can_start is set to 1, and clusterHandleSlaveFailover() will perform + * the failover as usually, with the difference that the vote request + * will be modified to force masters to vote for a slave that has a + * working master. + * + * From the point of view of the master things are simpler: when a + * PAUSE_CLIENTS packet is received the master sets mf_end as well and + * the sender in mf_slave. During the time limit for the manual failover + * the master will just send PINGs more often to this slave, flagged with + * the PAUSED flag, so that the slave will set mf_master_offset when receiving + * a packet from the master with this flag set. + * + * The gaol of the manual failover is to perform a fast failover without + * data loss due to the asynchronous master-slave replication. + * -------------------------------------------------------------------------- */ + +/* Reset the manual failover state. This works for both masters and slavesa + * as all the state about manual failover is cleared. + * + * The function can be used both to initialize the manual failover state at + * startup or to abort a manual failover in progress. */ +void resetManualFailover(void) { + if (server.cluster->mf_end && clientsArePaused()) { + server.clients_pause_end_time = 0; + clientsArePaused(); /* Just use the side effect of the function. */ + } + server.cluster->mf_end = 0; /* No manual failover in progress. */ + server.cluster->mf_can_start = 0; + server.cluster->mf_slave = NULL; + server.cluster->mf_master_offset = 0; +} + +/* If a manual failover timed out, abort it. */ +void manualFailoverCheckTimeout(void) { + if (server.cluster->mf_end && server.cluster->mf_end < mstime()) { + redisLog(REDIS_WARNING,"Manual failover timed out."); + resetManualFailover(); + } +} + +/* This function is called from the cluster cron function in order to go + * forward with a manual failover state machine. */ +void clusterHandleManualFailover(void) { + /* Return ASAP if no manual failover is in progress. */ + if (server.cluster->mf_end == 0) return; + + /* If mf_can_start is non-zero, the failover was already triggered so the + * next steps are performed by clusterHandleSlaveFailover(). */ + if (server.cluster->mf_can_start) return; + + if (server.cluster->mf_master_offset == 0) return; /* Wait for offset... */ + + if (server.cluster->mf_master_offset == replicationGetSlaveOffset()) { + /* Our replication offset matches the master replication offset + * announced after clients were paused. We can start the failover. */ + server.cluster->mf_can_start = 1; + redisLog(REDIS_WARNING, + "All master replication stream processed, " + "manual failover can start."); + } +} + +/* ----------------------------------------------------------------------------- + * CLUSTER cron job + * -------------------------------------------------------------------------- */ + +/* This is executed 10 times every second */ +void clusterCron(void) { + dictIterator *di; + dictEntry *de; + int update_state = 0; + int orphaned_masters; /* How many masters there are without ok slaves. */ + int max_slaves; /* Max number of ok slaves for a single master. */ + int this_slaves; /* Number of ok slaves for our master (if we are slave). */ + mstime_t min_pong = 0, now = mstime(); + clusterNode *min_pong_node = NULL; + static unsigned long long iteration = 0; + mstime_t handshake_timeout; + + iteration++; /* Number of times this function was called so far. */ + + /* The handshake timeout is the time after which a handshake node that was + * not turned into a normal node is removed from the nodes. Usually it is + * just the NODE_TIMEOUT value, but when NODE_TIMEOUT is too small we use + * the value of 1 second. */ + handshake_timeout = server.cluster_node_timeout; + if (handshake_timeout < 1000) handshake_timeout = 1000; + + /* Check if we have disconnected nodes and re-establish the connection. */ + di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + + if (node->flags & (REDIS_NODE_MYSELF|REDIS_NODE_NOADDR)) continue; + + /* A Node in HANDSHAKE state has a limited lifespan equal to the + * configured node timeout. */ + if (nodeInHandshake(node) && now - node->ctime > handshake_timeout) { + clusterDelNode(node); + continue; + } + + if (node->link == NULL) { + int fd; + mstime_t old_ping_sent; + clusterLink *link; + + fd = anetTcpNonBlockBindConnect(server.neterr, node->ip, + node->port+REDIS_CLUSTER_PORT_INCR, REDIS_BIND_ADDR); + if (fd == -1) { + /* We got a synchronous error from connect before + * clusterSendPing() had a chance to be called. + * If node->ping_sent is zero, failure detection can't work, + * so we claim we actually sent a ping now (that will + * be really sent as soon as the link is obtained). */ + if (node->ping_sent == 0) node->ping_sent = mstime(); + redisLog(REDIS_DEBUG, "Unable to connect to " + "Cluster Node [%s]:%d -> %s", node->ip, + node->port+REDIS_CLUSTER_PORT_INCR, + server.neterr); + continue; + } + link = createClusterLink(node); + link->fd = fd; + node->link = link; + aeCreateFileEvent(server.el,link->fd,AE_READABLE, + clusterReadHandler,link); + /* Queue a PING in the new connection ASAP: this is crucial + * to avoid false positives in failure detection. + * + * If the node is flagged as MEET, we send a MEET message instead + * of a PING one, to force the receiver to add us in its node + * table. */ + old_ping_sent = node->ping_sent; + clusterSendPing(link, node->flags & REDIS_NODE_MEET ? + CLUSTERMSG_TYPE_MEET : CLUSTERMSG_TYPE_PING); + if (old_ping_sent) { + /* If there was an active ping before the link was + * disconnected, we want to restore the ping time, otherwise + * replaced by the clusterSendPing() call. */ + node->ping_sent = old_ping_sent; + } + /* We can clear the flag after the first packet is sent. + * If we'll never receive a PONG, we'll never send new packets + * to this node. Instead after the PONG is received and we + * are no longer in meet/handshake status, we want to send + * normal PING packets. */ + node->flags &= ~REDIS_NODE_MEET; + + redisLog(REDIS_DEBUG,"Connecting with Node %.40s at %s:%d", + node->name, node->ip, node->port+REDIS_CLUSTER_PORT_INCR); + } + } + dictReleaseIterator(di); + + /* Ping some random node 1 time every 10 iterations, so that we usually ping + * one random node every second. */ + if (!(iteration % 10)) { + int j; + + /* Check a few random nodes and ping the one with the oldest + * pong_received time. */ + for (j = 0; j < 5; j++) { + de = dictGetRandomKey(server.cluster->nodes); + clusterNode *this = dictGetVal(de); + + /* Don't ping nodes disconnected or with a ping currently active. */ + if (this->link == NULL || this->ping_sent != 0) continue; + if (this->flags & (REDIS_NODE_MYSELF|REDIS_NODE_HANDSHAKE)) + continue; + if (min_pong_node == NULL || min_pong > this->pong_received) { + min_pong_node = this; + min_pong = this->pong_received; + } + } + if (min_pong_node) { + redisLog(REDIS_DEBUG,"Pinging node %.40s", min_pong_node->name); + clusterSendPing(min_pong_node->link, CLUSTERMSG_TYPE_PING); + } + } + + /* Iterate nodes to check if we need to flag something as failing. + * This loop is also responsible to: + * 1) Check if there are orphaned masters (masters without non failing + * slaves). + * 2) Count the max number of non failing slaves for a single master. + * 3) Count the number of slaves for our master, if we are a slave. */ + orphaned_masters = 0; + max_slaves = 0; + this_slaves = 0; + di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + now = mstime(); /* Use an updated time at every iteration. */ + mstime_t delay; + + if (node->flags & + (REDIS_NODE_MYSELF|REDIS_NODE_NOADDR|REDIS_NODE_HANDSHAKE)) + continue; + + /* Orphaned master check, useful only if the current instance + * is a slave that may migrate to another master. */ + if (nodeIsSlave(myself) && nodeIsMaster(node) && !nodeFailed(node)) { + int okslaves = clusterCountNonFailingSlaves(node); + + /* A master is orphaned if it is serving a non-zero number of + * slots, have no working slaves, but used to have at least one + * slave. */ + if (okslaves == 0 && node->numslots > 0 && node->numslaves) + orphaned_masters++; + if (okslaves > max_slaves) max_slaves = okslaves; + if (nodeIsSlave(myself) && myself->slaveof == node) + this_slaves = okslaves; + } + + /* If we are waiting for the PONG more than half the cluster + * timeout, reconnect the link: maybe there is a connection + * issue even if the node is alive. */ + if (node->link && /* is connected */ + now - node->link->ctime > + server.cluster_node_timeout && /* was not already reconnected */ + node->ping_sent && /* we already sent a ping */ + node->pong_received < node->ping_sent && /* still waiting pong */ + /* and we are waiting for the pong more than timeout/2 */ + now - node->ping_sent > server.cluster_node_timeout/2) + { + /* Disconnect the link, it will be reconnected automatically. */ + freeClusterLink(node->link); + } + + /* If we have currently no active ping in this instance, and the + * received PONG is older than half the cluster timeout, send + * a new ping now, to ensure all the nodes are pinged without + * a too big delay. */ + if (node->link && + node->ping_sent == 0 && + (now - node->pong_received) > server.cluster_node_timeout/2) + { + clusterSendPing(node->link, CLUSTERMSG_TYPE_PING); + continue; + } + + /* If we are a master and one of the slaves requested a manual + * failover, ping it continuously. */ + if (server.cluster->mf_end && + nodeIsMaster(myself) && + server.cluster->mf_slave == node && + node->link) + { + clusterSendPing(node->link, CLUSTERMSG_TYPE_PING); + continue; + } + + /* Check only if we have an active ping for this instance. */ + if (node->ping_sent == 0) continue; + + /* Compute the delay of the PONG. Note that if we already received + * the PONG, then node->ping_sent is zero, so can't reach this + * code at all. */ + delay = now - node->ping_sent; + + if (delay > server.cluster_node_timeout) { + /* Timeout reached. Set the node as possibly failing if it is + * not already in this state. */ + if (!(node->flags & (REDIS_NODE_PFAIL|REDIS_NODE_FAIL))) { + redisLog(REDIS_DEBUG,"*** NODE %.40s possibly failing", + node->name); + node->flags |= REDIS_NODE_PFAIL; + update_state = 1; + } + } + } + dictReleaseIterator(di); + + /* If we are a slave node but the replication is still turned off, + * enable it if we know the address of our master and it appears to + * be up. */ + if (nodeIsSlave(myself) && + server.masterhost == NULL && + myself->slaveof && + nodeHasAddr(myself->slaveof)) + { + replicationSetMaster(myself->slaveof->ip, myself->slaveof->port); + } + + /* Abourt a manual failover if the timeout is reached. */ + manualFailoverCheckTimeout(); + + if (nodeIsSlave(myself)) { + clusterHandleManualFailover(); + clusterHandleSlaveFailover(); + /* If there are orphaned slaves, and we are a slave among the masters + * with the max number of non-failing slaves, consider migrating to + * the orphaned masters. Note that it does not make sense to try + * a migration if there is no master with at least *two* working + * slaves. */ + if (orphaned_masters && max_slaves >= 2 && this_slaves == max_slaves) + clusterHandleSlaveMigration(max_slaves); + } + + if (update_state || server.cluster->state == REDIS_CLUSTER_FAIL) + clusterUpdateState(); +} + +/* This function is called before the event handler returns to sleep for + * events. It is useful to perform operations that must be done ASAP in + * reaction to events fired but that are not safe to perform inside event + * handlers, or to perform potentially expansive tasks that we need to do + * a single time before replying to clients. */ +void clusterBeforeSleep(void) { + /* Handle failover, this is needed when it is likely that there is already + * the quorum from masters in order to react fast. */ + if (server.cluster->todo_before_sleep & CLUSTER_TODO_HANDLE_FAILOVER) + clusterHandleSlaveFailover(); + + /* Update the cluster state. */ + if (server.cluster->todo_before_sleep & CLUSTER_TODO_UPDATE_STATE) + clusterUpdateState(); + + /* Save the config, possibly using fsync. */ + if (server.cluster->todo_before_sleep & CLUSTER_TODO_SAVE_CONFIG) { + int fsync = server.cluster->todo_before_sleep & + CLUSTER_TODO_FSYNC_CONFIG; + clusterSaveConfigOrDie(fsync); + } + + /* Reset our flags (not strictly needed since every single function + * called for flags set should be able to clear its flag). */ + server.cluster->todo_before_sleep = 0; +} + +void clusterDoBeforeSleep(int flags) { + server.cluster->todo_before_sleep |= flags; +} + +/* ----------------------------------------------------------------------------- + * Slots management + * -------------------------------------------------------------------------- */ + +/* Test bit 'pos' in a generic bitmap. Return 1 if the bit is set, + * otherwise 0. */ +int bitmapTestBit(unsigned char *bitmap, int pos) { + off_t byte = pos/8; + int bit = pos&7; + return (bitmap[byte] & (1<slots,slot); + bitmapSetBit(n->slots,slot); + if (!old) n->numslots++; + return old; +} + +/* Clear the slot bit and return the old value. */ +int clusterNodeClearSlotBit(clusterNode *n, int slot) { + int old = bitmapTestBit(n->slots,slot); + bitmapClearBit(n->slots,slot); + if (old) n->numslots--; + return old; +} + +/* Return the slot bit from the cluster node structure. */ +int clusterNodeGetSlotBit(clusterNode *n, int slot) { + return bitmapTestBit(n->slots,slot); +} + +/* Add the specified slot to the list of slots that node 'n' will + * serve. Return REDIS_OK if the operation ended with success. + * If the slot is already assigned to another instance this is considered + * an error and REDIS_ERR is returned. */ +int clusterAddSlot(clusterNode *n, int slot) { + if (server.cluster->slots[slot]) return REDIS_ERR; + clusterNodeSetSlotBit(n,slot); + server.cluster->slots[slot] = n; + return REDIS_OK; +} + +/* Delete the specified slot marking it as unassigned. + * Returns REDIS_OK if the slot was assigned, otherwise if the slot was + * already unassigned REDIS_ERR is returned. */ +int clusterDelSlot(int slot) { + clusterNode *n = server.cluster->slots[slot]; + + if (!n) return REDIS_ERR; + redisAssert(clusterNodeClearSlotBit(n,slot) == 1); + server.cluster->slots[slot] = NULL; + return REDIS_OK; +} + +/* Delete all the slots associated with the specified node. + * The number of deleted slots is returned. */ +int clusterDelNodeSlots(clusterNode *node) { + int deleted = 0, j; + + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + if (clusterNodeGetSlotBit(node,j)) clusterDelSlot(j); + deleted++; + } + return deleted; +} + +/* Clear the migrating / importing state for all the slots. + * This is useful at initialization and when turning a master into slave. */ +void clusterCloseAllSlots(void) { + memset(server.cluster->migrating_slots_to,0, + sizeof(server.cluster->migrating_slots_to)); + memset(server.cluster->importing_slots_from,0, + sizeof(server.cluster->importing_slots_from)); +} + +/* ----------------------------------------------------------------------------- + * Cluster state evaluation function + * -------------------------------------------------------------------------- */ + +/* The following are defines that are only used in the evaluation function + * and are based on heuristics. Actaully the main point about the rejoin and + * writable delay is that they should be a few orders of magnitude larger + * than the network latency. */ +#define REDIS_CLUSTER_MAX_REJOIN_DELAY 5000 +#define REDIS_CLUSTER_MIN_REJOIN_DELAY 500 +#define REDIS_CLUSTER_WRITABLE_DELAY 2000 + +void clusterUpdateState(void) { + int j, new_state; + int reachable_masters = 0; + static mstime_t among_minority_time; + static mstime_t first_call_time = 0; + + server.cluster->todo_before_sleep &= ~CLUSTER_TODO_UPDATE_STATE; + + /* If this is a master node, wait some time before turning the state + * into OK, since it is not a good idea to rejoin the cluster as a writable + * master, after a reboot, without giving the cluster a chance to + * reconfigure this node. Note that the delay is calculated starting from + * the first call to this function and not since the server start, in order + * to don't count the DB loading time. */ + if (first_call_time == 0) first_call_time = mstime(); + if (nodeIsMaster(myself) && + server.cluster->state == REDIS_CLUSTER_FAIL && + mstime() - first_call_time < REDIS_CLUSTER_WRITABLE_DELAY) return; + + /* Start assuming the state is OK. We'll turn it into FAIL if there + * are the right conditions. */ + new_state = REDIS_CLUSTER_OK; + + /* Check if all the slots are covered. */ + if (server.cluster_require_full_coverage) { + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + if (server.cluster->slots[j] == NULL || + server.cluster->slots[j]->flags & (REDIS_NODE_FAIL)) + { + new_state = REDIS_CLUSTER_FAIL; + break; + } + } + } + + /* Compute the cluster size, that is the number of master nodes + * serving at least a single slot. + * + * At the same time count the number of reachable masters having + * at least one slot. */ + { + dictIterator *di; + dictEntry *de; + + server.cluster->size = 0; + di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + + if (nodeIsMaster(node) && node->numslots) { + server.cluster->size++; + if ((node->flags & (REDIS_NODE_FAIL|REDIS_NODE_PFAIL)) == 0) + reachable_masters++; + } + } + dictReleaseIterator(di); + } + + /* If we are in a minority partition, change the cluster state + * to FAIL. */ + { + int needed_quorum = (server.cluster->size / 2) + 1; + + if (reachable_masters < needed_quorum) { + new_state = REDIS_CLUSTER_FAIL; + among_minority_time = mstime(); + } + } + + /* Log a state change */ + if (new_state != server.cluster->state) { + mstime_t rejoin_delay = server.cluster_node_timeout; + + /* If the instance is a master and was partitioned away with the + * minority, don't let it accept queries for some time after the + * partition heals, to make sure there is enough time to receive + * a configuration update. */ + if (rejoin_delay > REDIS_CLUSTER_MAX_REJOIN_DELAY) + rejoin_delay = REDIS_CLUSTER_MAX_REJOIN_DELAY; + if (rejoin_delay < REDIS_CLUSTER_MIN_REJOIN_DELAY) + rejoin_delay = REDIS_CLUSTER_MIN_REJOIN_DELAY; + + if (new_state == REDIS_CLUSTER_OK && + nodeIsMaster(myself) && + mstime() - among_minority_time < rejoin_delay) + { + return; + } + + /* Change the state and log the event. */ + redisLog(REDIS_WARNING,"Cluster state changed: %s", + new_state == REDIS_CLUSTER_OK ? "ok" : "fail"); + server.cluster->state = new_state; + } +} + +/* This function is called after the node startup in order to verify that data + * loaded from disk is in agreement with the cluster configuration: + * + * 1) If we find keys about hash slots we have no responsibility for, the + * following happens: + * A) If no other node is in charge according to the current cluster + * configuration, we add these slots to our node. + * B) If according to our config other nodes are already in charge for + * this lots, we set the slots as IMPORTING from our point of view + * in order to justify we have those slots, and in order to make + * redis-trib aware of the issue, so that it can try to fix it. + * 2) If we find data in a DB different than DB0 we return REDIS_ERR to + * signal the caller it should quit the server with an error message + * or take other actions. + * + * The function always returns REDIS_OK even if it will try to correct + * the error described in "1". However if data is found in DB different + * from DB0, REDIS_ERR is returned. + * + * The function also uses the logging facility in order to warn the user + * about desynchronizations between the data we have in memory and the + * cluster configuration. */ +int verifyClusterConfigWithData(void) { + int j; + int update_config = 0; + + /* If this node is a slave, don't perform the check at all as we + * completely depend on the replication stream. */ + if (nodeIsSlave(myself)) return REDIS_OK; + + /* Make sure we only have keys in DB0. */ + for (j = 1; j < server.dbnum; j++) { + if (dictSize(server.db[j].dict)) return REDIS_ERR; + } + + /* Check that all the slots we see populated memory have a corresponding + * entry in the cluster table. Otherwise fix the table. */ + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + if (!countKeysInSlot(j)) continue; /* No keys in this slot. */ + /* Check if we are assigned to this slot or if we are importing it. + * In both cases check the next slot as the configuration makes + * sense. */ + if (server.cluster->slots[j] == myself || + server.cluster->importing_slots_from[j] != NULL) continue; + + /* If we are here data and cluster config don't agree, and we have + * slot 'j' populated even if we are not importing it, nor we are + * assigned to this slot. Fix this condition. */ + + update_config++; + /* Case A: slot is unassigned. Take responsibility for it. */ + if (server.cluster->slots[j] == NULL) { + redisLog(REDIS_WARNING, "I have keys for unassigned slot %d. " + "Taking responsibility for it.",j); + clusterAddSlot(myself,j); + } else { + redisLog(REDIS_WARNING, "I have keys for slot %d, but the slot is " + "assigned to another node. " + "Setting it to importing state.",j); + server.cluster->importing_slots_from[j] = server.cluster->slots[j]; + } + } + if (update_config) clusterSaveConfigOrDie(1); + return REDIS_OK; +} + +/* ----------------------------------------------------------------------------- + * SLAVE nodes handling + * -------------------------------------------------------------------------- */ + +/* Set the specified node 'n' as master for this node. + * If this node is currently a master, it is turned into a slave. */ +void clusterSetMaster(clusterNode *n) { + redisAssert(n != myself); + redisAssert(myself->numslots == 0); + + if (nodeIsMaster(myself)) { + myself->flags &= ~REDIS_NODE_MASTER; + myself->flags |= REDIS_NODE_SLAVE; + clusterCloseAllSlots(); + } else { + if (myself->slaveof) + clusterNodeRemoveSlave(myself->slaveof,myself); + } + myself->slaveof = n; + clusterNodeAddSlave(n,myself); + replicationSetMaster(n->ip, n->port); + resetManualFailover(); +} + +/* ----------------------------------------------------------------------------- + * Nodes to string representation functions. + * -------------------------------------------------------------------------- */ + +struct redisNodeFlags { + uint16_t flag; + char *name; +}; + +static struct redisNodeFlags redisNodeFlagsTable[] = { + {REDIS_NODE_MYSELF, "myself,"}, + {REDIS_NODE_MASTER, "master,"}, + {REDIS_NODE_SLAVE, "slave,"}, + {REDIS_NODE_PFAIL, "fail?,"}, + {REDIS_NODE_FAIL, "fail,"}, + {REDIS_NODE_HANDSHAKE, "handshake,"}, + {REDIS_NODE_NOADDR, "noaddr,"} +}; + +/* Concatenate the comma separated list of node flags to the given SDS + * string 'ci'. */ +sds representRedisNodeFlags(sds ci, uint16_t flags) { + if (flags == 0) { + ci = sdscat(ci,"noflags,"); + } else { + int i, size = sizeof(redisNodeFlagsTable)/sizeof(struct redisNodeFlags); + for (i = 0; i < size; i++) { + struct redisNodeFlags *nodeflag = redisNodeFlagsTable + i; + if (flags & nodeflag->flag) ci = sdscat(ci, nodeflag->name); + } + } + sdsIncrLen(ci,-1); /* Remove trailing comma. */ + return ci; +} + +/* Generate a csv-alike representation of the specified cluster node. + * See clusterGenNodesDescription() top comment for more information. + * + * The function returns the string representation as an SDS string. */ +sds clusterGenNodeDescription(clusterNode *node) { + int j, start; + sds ci; + + /* Node coordinates */ + ci = sdscatprintf(sdsempty(),"%.40s %s:%d ", + node->name, + node->ip, + node->port); + + /* Flags */ + ci = representRedisNodeFlags(ci, node->flags); + + /* Slave of... or just "-" */ + if (node->slaveof) + ci = sdscatprintf(ci," %.40s ",node->slaveof->name); + else + ci = sdscatlen(ci," - ",3); + + /* Latency from the POV of this node, config epoch, link status */ + ci = sdscatprintf(ci,"%lld %lld %llu %s", + (long long) node->ping_sent, + (long long) node->pong_received, + (unsigned long long) node->configEpoch, + (node->link || node->flags & REDIS_NODE_MYSELF) ? + "connected" : "disconnected"); + + /* Slots served by this instance */ + start = -1; + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + int bit; + + if ((bit = clusterNodeGetSlotBit(node,j)) != 0) { + if (start == -1) start = j; + } + if (start != -1 && (!bit || j == REDIS_CLUSTER_SLOTS-1)) { + if (bit && j == REDIS_CLUSTER_SLOTS-1) j++; + + if (start == j-1) { + ci = sdscatprintf(ci," %d",start); + } else { + ci = sdscatprintf(ci," %d-%d",start,j-1); + } + start = -1; + } + } + + /* Just for MYSELF node we also dump info about slots that + * we are migrating to other instances or importing from other + * instances. */ + if (node->flags & REDIS_NODE_MYSELF) { + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + if (server.cluster->migrating_slots_to[j]) { + ci = sdscatprintf(ci," [%d->-%.40s]",j, + server.cluster->migrating_slots_to[j]->name); + } else if (server.cluster->importing_slots_from[j]) { + ci = sdscatprintf(ci," [%d-<-%.40s]",j, + server.cluster->importing_slots_from[j]->name); + } + } + } + return ci; +} + +/* Generate a csv-alike representation of the nodes we are aware of, + * including the "myself" node, and return an SDS string containing the + * representation (it is up to the caller to free it). + * + * All the nodes matching at least one of the node flags specified in + * "filter" are excluded from the output, so using zero as a filter will + * include all the known nodes in the representation, including nodes in + * the HANDSHAKE state. + * + * The representation obtained using this function is used for the output + * of the CLUSTER NODES function, and as format for the cluster + * configuration file (nodes.conf) for a given node. */ +sds clusterGenNodesDescription(int filter) { + sds ci = sdsempty(), ni; + dictIterator *di; + dictEntry *de; + + di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + + if (node->flags & filter) continue; + ni = clusterGenNodeDescription(node); + ci = sdscatsds(ci,ni); + sdsfree(ni); + ci = sdscatlen(ci,"\n",1); + } + dictReleaseIterator(di); + return ci; +} + +/* ----------------------------------------------------------------------------- + * CLUSTER command + * -------------------------------------------------------------------------- */ + +int getSlotOrReply(redisClient *c, robj *o) { + long long slot; + + if (getLongLongFromObject(o,&slot) != REDIS_OK || + slot < 0 || slot >= REDIS_CLUSTER_SLOTS) + { + addReplyError(c,"Invalid or out of range slot"); + return -1; + } + return (int) slot; +} + +void clusterReplyMultiBulkSlots(redisClient *c) { + /* Format: 1) 1) start slot + * 2) end slot + * 3) 1) master IP + * 2) master port + * 4) 1) replica IP + * 2) replica port + * ... continued until done + */ + + int num_masters = 0; + void *slot_replylen = addDeferredMultiBulkLength(c); + + dictEntry *de; + dictIterator *di = dictGetSafeIterator(server.cluster->nodes); + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + int j = 0, start = -1; + + /* Skip slaves (that are iterated when producing the output of their + * master) and masters not serving any slot. */ + if (!nodeIsMaster(node) || node->numslots == 0) continue; + + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + int bit, i; + + if ((bit = clusterNodeGetSlotBit(node,j)) != 0) { + if (start == -1) start = j; + } + if (start != -1 && (!bit || j == REDIS_CLUSTER_SLOTS-1)) { + int nested_elements = 3; /* slots (2) + master addr (1). */ + void *nested_replylen = addDeferredMultiBulkLength(c); + + if (bit && j == REDIS_CLUSTER_SLOTS-1) j++; + + /* If slot exists in output map, add to it's list. + * else, create a new output map for this slot */ + if (start == j-1) { + addReplyLongLong(c, start); /* only one slot; low==high */ + addReplyLongLong(c, start); + } else { + addReplyLongLong(c, start); /* low */ + addReplyLongLong(c, j-1); /* high */ + } + start = -1; + + /* First node reply position is always the master */ + addReplyMultiBulkLen(c, 2); + addReplyBulkCString(c, node->ip); + addReplyLongLong(c, node->port); + + /* Remaining nodes in reply are replicas for slot range */ + for (i = 0; i < node->numslaves; i++) { + /* This loop is copy/pasted from clusterGenNodeDescription() + * with modifications for per-slot node aggregation */ + if (nodeFailed(node->slaves[i])) continue; + addReplyMultiBulkLen(c, 2); + addReplyBulkCString(c, node->slaves[i]->ip); + addReplyLongLong(c, node->slaves[i]->port); + nested_elements++; + } + setDeferredMultiBulkLength(c, nested_replylen, nested_elements); + num_masters++; + } + } + } + dictReleaseIterator(di); + setDeferredMultiBulkLength(c, slot_replylen, num_masters); +} + +void clusterCommand(redisClient *c) { + if (server.cluster_enabled == 0) { + addReplyError(c,"This instance has cluster support disabled"); + return; + } + + if (!strcasecmp(c->argv[1]->ptr,"meet") && c->argc == 4) { + long long port; + + if (getLongLongFromObject(c->argv[3], &port) != REDIS_OK) { + addReplyErrorFormat(c,"Invalid TCP port specified: %s", + (char*)c->argv[3]->ptr); + return; + } + + if (clusterStartHandshake(c->argv[2]->ptr,port) == 0 && + errno == EINVAL) + { + addReplyErrorFormat(c,"Invalid node address specified: %s:%s", + (char*)c->argv[2]->ptr, (char*)c->argv[3]->ptr); + } else { + addReply(c,shared.ok); + } + } else if (!strcasecmp(c->argv[1]->ptr,"nodes") && c->argc == 2) { + /* CLUSTER NODES */ + robj *o; + sds ci = clusterGenNodesDescription(0); + + o = createObject(REDIS_STRING,ci); + addReplyBulk(c,o); + decrRefCount(o); + } else if (!strcasecmp(c->argv[1]->ptr,"myid") && c->argc == 2) { + /* CLUSTER MYID */ + addReplyBulkCBuffer(c,myself->name, REDIS_CLUSTER_NAMELEN); + } else if (!strcasecmp(c->argv[1]->ptr,"slots") && c->argc == 2) { + /* CLUSTER SLOTS */ + clusterReplyMultiBulkSlots(c); + } else if (!strcasecmp(c->argv[1]->ptr,"flushslots") && c->argc == 2) { + /* CLUSTER FLUSHSLOTS */ + if (dictSize(server.db[0].dict) != 0) { + addReplyError(c,"DB must be empty to perform CLUSTER FLUSHSLOTS."); + return; + } + clusterDelNodeSlots(myself); + clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); + addReply(c,shared.ok); + } else if ((!strcasecmp(c->argv[1]->ptr,"addslots") || + !strcasecmp(c->argv[1]->ptr,"delslots")) && c->argc >= 3) + { + /* CLUSTER ADDSLOTS [slot] ... */ + /* CLUSTER DELSLOTS [slot] ... */ + int j, slot; + unsigned char *slots = zmalloc(REDIS_CLUSTER_SLOTS); + int del = !strcasecmp(c->argv[1]->ptr,"delslots"); + + memset(slots,0,REDIS_CLUSTER_SLOTS); + /* Check that all the arguments are parseable and that all the + * slots are not already busy. */ + for (j = 2; j < c->argc; j++) { + if ((slot = getSlotOrReply(c,c->argv[j])) == -1) { + zfree(slots); + return; + } + if (del && server.cluster->slots[slot] == NULL) { + addReplyErrorFormat(c,"Slot %d is already unassigned", slot); + zfree(slots); + return; + } else if (!del && server.cluster->slots[slot]) { + addReplyErrorFormat(c,"Slot %d is already busy", slot); + zfree(slots); + return; + } + if (slots[slot]++ == 1) { + addReplyErrorFormat(c,"Slot %d specified multiple times", + (int)slot); + zfree(slots); + return; + } + } + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + if (slots[j]) { + int retval; + + /* If this slot was set as importing we can clear this + * state as now we are the real owner of the slot. */ + if (server.cluster->importing_slots_from[j]) + server.cluster->importing_slots_from[j] = NULL; + + retval = del ? clusterDelSlot(j) : + clusterAddSlot(myself,j); + redisAssertWithInfo(c,NULL,retval == REDIS_OK); + } + } + zfree(slots); + clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); + addReply(c,shared.ok); + } else if (!strcasecmp(c->argv[1]->ptr,"setslot") && c->argc >= 4) { + /* SETSLOT 10 MIGRATING */ + /* SETSLOT 10 IMPORTING */ + /* SETSLOT 10 STABLE */ + /* SETSLOT 10 NODE */ + int slot; + clusterNode *n; + + if ((slot = getSlotOrReply(c,c->argv[2])) == -1) return; + + if (!strcasecmp(c->argv[3]->ptr,"migrating") && c->argc == 5) { + if (server.cluster->slots[slot] != myself) { + addReplyErrorFormat(c,"I'm not the owner of hash slot %u",slot); + return; + } + if ((n = clusterLookupNode(c->argv[4]->ptr)) == NULL) { + addReplyErrorFormat(c,"I don't know about node %s", + (char*)c->argv[4]->ptr); + return; + } + server.cluster->migrating_slots_to[slot] = n; + } else if (!strcasecmp(c->argv[3]->ptr,"importing") && c->argc == 5) { + if (server.cluster->slots[slot] == myself) { + addReplyErrorFormat(c, + "I'm already the owner of hash slot %u",slot); + return; + } + if ((n = clusterLookupNode(c->argv[4]->ptr)) == NULL) { + addReplyErrorFormat(c,"I don't know about node %s", + (char*)c->argv[3]->ptr); + return; + } + server.cluster->importing_slots_from[slot] = n; + } else if (!strcasecmp(c->argv[3]->ptr,"stable") && c->argc == 4) { + /* CLUSTER SETSLOT STABLE */ + server.cluster->importing_slots_from[slot] = NULL; + server.cluster->migrating_slots_to[slot] = NULL; + } else if (!strcasecmp(c->argv[3]->ptr,"node") && c->argc == 5) { + /* CLUSTER SETSLOT NODE */ + clusterNode *n = clusterLookupNode(c->argv[4]->ptr); + + if (!n) { + addReplyErrorFormat(c,"Unknown node %s", + (char*)c->argv[4]->ptr); + return; + } + /* If this hash slot was served by 'myself' before to switch + * make sure there are no longer local keys for this hash slot. */ + if (server.cluster->slots[slot] == myself && n != myself) { + if (countKeysInSlot(slot) != 0) { + addReplyErrorFormat(c, + "Can't assign hashslot %d to a different node " + "while I still hold keys for this hash slot.", slot); + return; + } + } + /* If this slot is in migrating status but we have no keys + * for it assigning the slot to another node will clear + * the migratig status. */ + if (countKeysInSlot(slot) == 0 && + server.cluster->migrating_slots_to[slot]) + server.cluster->migrating_slots_to[slot] = NULL; + + /* If this node was importing this slot, assigning the slot to + * itself also clears the importing status. */ + if (n == myself && + server.cluster->importing_slots_from[slot]) + { + /* This slot was manually migrated, set this node configEpoch + * to a new epoch so that the new version can be propagated + * by the cluster. + * + * Note that if this ever results in a collision with another + * node getting the same configEpoch, for example because a + * failover happens at the same time we close the slot, the + * configEpoch collision resolution will fix it assigning + * a different epoch to each node. */ + if (clusterBumpConfigEpochWithoutConsensus() == REDIS_OK) { + redisLog(REDIS_WARNING, + "configEpoch updated after importing slot %d", slot); + } + server.cluster->importing_slots_from[slot] = NULL; + } + clusterDelSlot(slot); + clusterAddSlot(n,slot); + } else { + addReplyError(c, + "Invalid CLUSTER SETSLOT action or number of arguments"); + return; + } + clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|CLUSTER_TODO_UPDATE_STATE); + addReply(c,shared.ok); + } else if (!strcasecmp(c->argv[1]->ptr,"info") && c->argc == 2) { + /* CLUSTER INFO */ + char *statestr[] = {"ok","fail","needhelp"}; + int slots_assigned = 0, slots_ok = 0, slots_pfail = 0, slots_fail = 0; + uint64_t myepoch; + int j; + + for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) { + clusterNode *n = server.cluster->slots[j]; + + if (n == NULL) continue; + slots_assigned++; + if (nodeFailed(n)) { + slots_fail++; + } else if (nodeTimedOut(n)) { + slots_pfail++; + } else { + slots_ok++; + } + } + + myepoch = (nodeIsSlave(myself) && myself->slaveof) ? + myself->slaveof->configEpoch : myself->configEpoch; + + sds info = sdscatprintf(sdsempty(), + "cluster_state:%s\r\n" + "cluster_slots_assigned:%d\r\n" + "cluster_slots_ok:%d\r\n" + "cluster_slots_pfail:%d\r\n" + "cluster_slots_fail:%d\r\n" + "cluster_known_nodes:%lu\r\n" + "cluster_size:%d\r\n" + "cluster_current_epoch:%llu\r\n" + "cluster_my_epoch:%llu\r\n" + "cluster_stats_messages_sent:%lld\r\n" + "cluster_stats_messages_received:%lld\r\n" + , statestr[server.cluster->state], + slots_assigned, + slots_ok, + slots_pfail, + slots_fail, + dictSize(server.cluster->nodes), + server.cluster->size, + (unsigned long long) server.cluster->currentEpoch, + (unsigned long long) myepoch, + server.cluster->stats_bus_messages_sent, + server.cluster->stats_bus_messages_received + ); + addReplySds(c,sdscatprintf(sdsempty(),"$%lu\r\n", + (unsigned long)sdslen(info))); + addReplySds(c,info); + addReply(c,shared.crlf); + } else if (!strcasecmp(c->argv[1]->ptr,"saveconfig") && c->argc == 2) { + int retval = clusterSaveConfig(1); + + if (retval == 0) + addReply(c,shared.ok); + else + addReplyErrorFormat(c,"error saving the cluster node config: %s", + strerror(errno)); + } else if (!strcasecmp(c->argv[1]->ptr,"keyslot") && c->argc == 3) { + /* CLUSTER KEYSLOT */ + sds key = c->argv[2]->ptr; + + addReplyLongLong(c,keyHashSlot(key,sdslen(key))); + } else if (!strcasecmp(c->argv[1]->ptr,"countkeysinslot") && c->argc == 3) { + /* CLUSTER COUNTKEYSINSLOT */ + long long slot; + + if (getLongLongFromObjectOrReply(c,c->argv[2],&slot,NULL) != REDIS_OK) + return; + if (slot < 0 || slot >= REDIS_CLUSTER_SLOTS) { + addReplyError(c,"Invalid slot"); + return; + } + addReplyLongLong(c,countKeysInSlot(slot)); + } else if (!strcasecmp(c->argv[1]->ptr,"getkeysinslot") && c->argc == 4) { + /* CLUSTER GETKEYSINSLOT */ + long long maxkeys, slot; + unsigned int numkeys, j; + robj **keys; + + if (getLongLongFromObjectOrReply(c,c->argv[2],&slot,NULL) != REDIS_OK) + return; + if (getLongLongFromObjectOrReply(c,c->argv[3],&maxkeys,NULL) + != REDIS_OK) + return; + if (slot < 0 || slot >= REDIS_CLUSTER_SLOTS || maxkeys < 0) { + addReplyError(c,"Invalid slot or number of keys"); + return; + } + + keys = zmalloc(sizeof(robj*)*maxkeys); + numkeys = getKeysInSlot(slot, keys, maxkeys); + addReplyMultiBulkLen(c,numkeys); + for (j = 0; j < numkeys; j++) addReplyBulk(c,keys[j]); + zfree(keys); + } else if (!strcasecmp(c->argv[1]->ptr,"forget") && c->argc == 3) { + /* CLUSTER FORGET */ + clusterNode *n = clusterLookupNode(c->argv[2]->ptr); + + if (!n) { + addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[2]->ptr); + return; + } else if (n == myself) { + addReplyError(c,"I tried hard but I can't forget myself..."); + return; + } else if (nodeIsSlave(myself) && myself->slaveof == n) { + addReplyError(c,"Can't forget my master!"); + return; + } + clusterBlacklistAddNode(n); + clusterDelNode(n); + clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE| + CLUSTER_TODO_SAVE_CONFIG); + addReply(c,shared.ok); + } else if (!strcasecmp(c->argv[1]->ptr,"replicate") && c->argc == 3) { + /* CLUSTER REPLICATE */ + clusterNode *n = clusterLookupNode(c->argv[2]->ptr); + + /* Lookup the specified node in our table. */ + if (!n) { + addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[2]->ptr); + return; + } + + /* I can't replicate myself. */ + if (n == myself) { + addReplyError(c,"Can't replicate myself"); + return; + } + + /* Can't replicate a slave. */ + if (nodeIsSlave(n)) { + addReplyError(c,"I can only replicate a master, not a slave."); + return; + } + + /* If the instance is currently a master, it should have no assigned + * slots nor keys to accept to replicate some other node. + * Slaves can switch to another master without issues. */ + if (nodeIsMaster(myself) && + (myself->numslots != 0 || dictSize(server.db[0].dict) != 0)) { + addReplyError(c, + "To set a master the node must be empty and " + "without assigned slots."); + return; + } + + /* Set the master. */ + clusterSetMaster(n); + clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); + addReply(c,shared.ok); + } else if (!strcasecmp(c->argv[1]->ptr,"slaves") && c->argc == 3) { + /* CLUSTER SLAVES */ + clusterNode *n = clusterLookupNode(c->argv[2]->ptr); + int j; + + /* Lookup the specified node in our table. */ + if (!n) { + addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[2]->ptr); + return; + } + + if (nodeIsSlave(n)) { + addReplyError(c,"The specified node is not a master"); + return; + } + + addReplyMultiBulkLen(c,n->numslaves); + for (j = 0; j < n->numslaves; j++) { + sds ni = clusterGenNodeDescription(n->slaves[j]); + addReplyBulkCString(c,ni); + sdsfree(ni); + } + } else if (!strcasecmp(c->argv[1]->ptr,"count-failure-reports") && + c->argc == 3) + { + /* CLUSTER COUNT-FAILURE-REPORTS */ + clusterNode *n = clusterLookupNode(c->argv[2]->ptr); + + if (!n) { + addReplyErrorFormat(c,"Unknown node %s", (char*)c->argv[2]->ptr); + return; + } else { + addReplyLongLong(c,clusterNodeFailureReportsCount(n)); + } + } else if (!strcasecmp(c->argv[1]->ptr,"failover") && + (c->argc == 2 || c->argc == 3)) + { + /* CLUSTER FAILOVER [FORCE|TAKEOVER] */ + int force = 0, takeover = 0; + + if (c->argc == 3) { + if (!strcasecmp(c->argv[2]->ptr,"force")) { + force = 1; + } else if (!strcasecmp(c->argv[2]->ptr,"takeover")) { + takeover = 1; + force = 1; /* Takeover also implies force. */ + } else { + addReply(c,shared.syntaxerr); + return; + } + } + + /* Check preconditions. */ + if (nodeIsMaster(myself)) { + addReplyError(c,"You should send CLUSTER FAILOVER to a slave"); + return; + } else if (myself->slaveof == NULL) { + addReplyError(c,"I'm a slave but my master is unknown to me"); + return; + } else if (!force && + (nodeFailed(myself->slaveof) || + myself->slaveof->link == NULL)) + { + addReplyError(c,"Master is down or failed, " + "please use CLUSTER FAILOVER FORCE"); + return; + } + resetManualFailover(); + server.cluster->mf_end = mstime() + REDIS_CLUSTER_MF_TIMEOUT; + + if (takeover) { + /* A takeover does not perform any initial check. It just + * generates a new configuration epoch for this node without + * consensus, claims the master's slots, and broadcast the new + * configuration. */ + redisLog(REDIS_WARNING,"Taking over the master (user request)."); + clusterBumpConfigEpochWithoutConsensus(); + clusterFailoverReplaceYourMaster(); + } else if (force) { + /* If this is a forced failover, we don't need to talk with our + * master to agree about the offset. We just failover taking over + * it without coordination. */ + redisLog(REDIS_WARNING,"Forced failover user request accepted."); + server.cluster->mf_can_start = 1; + } else { + redisLog(REDIS_WARNING,"Manual failover user request accepted."); + clusterSendMFStart(myself->slaveof); + } + addReply(c,shared.ok); + } else if (!strcasecmp(c->argv[1]->ptr,"set-config-epoch") && c->argc == 3) + { + /* CLUSTER SET-CONFIG-EPOCH + * + * The user is allowed to set the config epoch only when a node is + * totally fresh: no config epoch, no other known node, and so forth. + * This happens at cluster creation time to start with a cluster where + * every node has a different node ID, without to rely on the conflicts + * resolution system which is too slow when a big cluster is created. */ + long long epoch; + + if (getLongLongFromObjectOrReply(c,c->argv[2],&epoch,NULL) != REDIS_OK) + return; + + if (epoch < 0) { + addReplyErrorFormat(c,"Invalid config epoch specified: %lld",epoch); + } else if (dictSize(server.cluster->nodes) > 1) { + addReplyError(c,"The user can assign a config epoch only when the " + "node does not know any other node."); + } else if (myself->configEpoch != 0) { + addReplyError(c,"Node config epoch is already non-zero"); + } else { + myself->configEpoch = epoch; + redisLog(REDIS_WARNING, + "configEpoch set to %llu via CLUSTER SET-CONFIG-EPOCH", + (unsigned long long) myself->configEpoch); + + if (server.cluster->currentEpoch < (uint64_t)epoch) + server.cluster->currentEpoch = epoch; + /* No need to fsync the config here since in the unlucky event + * of a failure to persist the config, the conflict resolution code + * will assign an unique config to this node. */ + clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE| + CLUSTER_TODO_SAVE_CONFIG); + addReply(c,shared.ok); + } + } else if (!strcasecmp(c->argv[1]->ptr,"reset") && + (c->argc == 2 || c->argc == 3)) + { + /* CLUSTER RESET [SOFT|HARD] */ + int hard = 0; + + /* Parse soft/hard argument. Default is soft. */ + if (c->argc == 3) { + if (!strcasecmp(c->argv[2]->ptr,"hard")) { + hard = 1; + } else if (!strcasecmp(c->argv[2]->ptr,"soft")) { + hard = 0; + } else { + addReply(c,shared.syntaxerr); + return; + } + } + + /* Slaves can be reset while containing data, but not master nodes + * that must be empty. */ + if (nodeIsMaster(myself) && dictSize(c->db->dict) != 0) { + addReplyError(c,"CLUSTER RESET can't be called with " + "master nodes containing keys"); + return; + } + clusterReset(hard); + addReply(c,shared.ok); + } else { + addReplyError(c,"Wrong CLUSTER subcommand or number of arguments"); + } +} + +/* ----------------------------------------------------------------------------- + * DUMP, RESTORE and MIGRATE commands + * -------------------------------------------------------------------------- */ + +/* Generates a DUMP-format representation of the object 'o', adding it to the + * io stream pointed by 'rio'. This function can't fail. */ +void createDumpPayload(rio *payload, robj *o) { + unsigned char buf[2]; + uint64_t crc; + + /* Serialize the object in a RDB-like format. It consist of an object type + * byte followed by the serialized object. This is understood by RESTORE. */ + rioInitWithBuffer(payload,sdsempty()); + redisAssert(rdbSaveObjectType(payload,o)); + redisAssert(rdbSaveObject(payload,o)); + + /* Write the footer, this is how it looks like: + * ----------------+---------------------+---------------+ + * ... RDB payload | 2 bytes RDB version | 8 bytes CRC64 | + * ----------------+---------------------+---------------+ + * RDB version and CRC are both in little endian. + */ + + /* RDB version */ + buf[0] = REDIS_RDB_VERSION & 0xff; + buf[1] = (REDIS_RDB_VERSION >> 8) & 0xff; + payload->io.buffer.ptr = sdscatlen(payload->io.buffer.ptr,buf,2); + + /* CRC64 */ + crc = crc64(0,(unsigned char*)payload->io.buffer.ptr, + sdslen(payload->io.buffer.ptr)); + memrev64ifbe(&crc); + payload->io.buffer.ptr = sdscatlen(payload->io.buffer.ptr,&crc,8); +} + +/* Verify that the RDB version of the dump payload matches the one of this Redis + * instance and that the checksum is ok. + * If the DUMP payload looks valid REDIS_OK is returned, otherwise REDIS_ERR + * is returned. */ +int verifyDumpPayload(unsigned char *p, size_t len) { + unsigned char *footer; + uint16_t rdbver; + uint64_t crc; + + /* At least 2 bytes of RDB version and 8 of CRC64 should be present. */ + if (len < 10) return REDIS_ERR; + footer = p+(len-10); + + /* Verify RDB version */ + rdbver = (footer[1] << 8) | footer[0]; + if (rdbver != REDIS_RDB_VERSION) return REDIS_ERR; + + /* Verify CRC64 */ + crc = crc64(0,p,len-8); + memrev64ifbe(&crc); + return (memcmp(&crc,footer+2,8) == 0) ? REDIS_OK : REDIS_ERR; +} + +/* DUMP keyname + * DUMP is actually not used by Redis Cluster but it is the obvious + * complement of RESTORE and can be useful for different applications. */ +void dumpCommand(redisClient *c) { + robj *o, *dumpobj; + rio payload; + + /* Check if the key is here. */ + if ((o = lookupKeyRead(c->db,c->argv[1])) == NULL) { + addReply(c,shared.nullbulk); + return; + } + + /* Create the DUMP encoded representation. */ + createDumpPayload(&payload,o); + + /* Transfer to the client */ + dumpobj = createObject(REDIS_STRING,payload.io.buffer.ptr); + addReplyBulk(c,dumpobj); + decrRefCount(dumpobj); + return; +} + +/* RESTORE key ttl serialized-value [REPLACE] */ +void restoreCommand(redisClient *c) { + long long ttl; + rio payload; + int j, type, replace = 0; + robj *obj; + + /* Parse additional options */ + for (j = 4; j < c->argc; j++) { + if (!strcasecmp(c->argv[j]->ptr,"replace")) { + replace = 1; + } else { + addReply(c,shared.syntaxerr); + return; + } + } + + /* Make sure this key does not already exist here... */ + if (!replace && lookupKeyWrite(c->db,c->argv[1]) != NULL) { + addReply(c,shared.busykeyerr); + return; + } + + /* Check if the TTL value makes sense */ + if (getLongLongFromObjectOrReply(c,c->argv[2],&ttl,NULL) != REDIS_OK) { + return; + } else if (ttl < 0) { + addReplyError(c,"Invalid TTL value, must be >= 0"); + return; + } + + /* Verify RDB version and data checksum. */ + if (verifyDumpPayload(c->argv[3]->ptr,sdslen(c->argv[3]->ptr)) == REDIS_ERR) + { + addReplyError(c,"DUMP payload version or checksum are wrong"); + return; + } + + rioInitWithBuffer(&payload,c->argv[3]->ptr); + if (((type = rdbLoadObjectType(&payload)) == -1) || + ((obj = rdbLoadObject(type,&payload)) == NULL)) + { + addReplyError(c,"Bad data format"); + return; + } + + /* Remove the old key if needed. */ + if (replace) dbDelete(c->db,c->argv[1]); + + /* Create the key and set the TTL if any */ + dbAdd(c->db,c->argv[1],obj); + if (ttl) setExpire(c->db,c->argv[1],mstime()+ttl); + signalModifiedKey(c->db,c->argv[1]); + addReply(c,shared.ok); + server.dirty++; +} + +/* MIGRATE socket cache implementation. + * + * We take a map between host:ip and a TCP socket that we used to connect + * to this instance in recent time. + * This sockets are closed when the max number we cache is reached, and also + * in serverCron() when they are around for more than a few seconds. */ +#define MIGRATE_SOCKET_CACHE_ITEMS 64 /* max num of items in the cache. */ +#define MIGRATE_SOCKET_CACHE_TTL 10 /* close cached sockets after 10 sec. */ + +typedef struct migrateCachedSocket { + int fd; + long last_dbid; + time_t last_use_time; +} migrateCachedSocket; + +/* Return a migrateCachedSocket containing a TCP socket connected with the + * target instance, possibly returning a cached one. + * + * This function is responsible of sending errors to the client if a + * connection can't be established. In this case -1 is returned. + * Otherwise on success the socket is returned, and the caller should not + * attempt to free it after usage. + * + * If the caller detects an error while using the socket, migrateCloseSocket() + * should be called so that the connection will be created from scratch + * the next time. */ +migrateCachedSocket* migrateGetSocket(redisClient *c, robj *host, robj *port, long timeout) { + int fd; + sds name = sdsempty(); + migrateCachedSocket *cs; + + /* Check if we have an already cached socket for this ip:port pair. */ + name = sdscatlen(name,host->ptr,sdslen(host->ptr)); + name = sdscatlen(name,":",1); + name = sdscatlen(name,port->ptr,sdslen(port->ptr)); + cs = dictFetchValue(server.migrate_cached_sockets,name); + if (cs) { + sdsfree(name); + cs->last_use_time = server.unixtime; + return cs; + } + + /* No cached socket, create one. */ + if (dictSize(server.migrate_cached_sockets) == MIGRATE_SOCKET_CACHE_ITEMS) { + /* Too many items, drop one at random. */ + dictEntry *de = dictGetRandomKey(server.migrate_cached_sockets); + cs = dictGetVal(de); + close(cs->fd); + zfree(cs); + dictDelete(server.migrate_cached_sockets,dictGetKey(de)); + } + + /* Create the socket */ + fd = anetTcpNonBlockConnect(server.neterr,c->argv[1]->ptr, + atoi(c->argv[2]->ptr)); + if (fd == -1) { + sdsfree(name); + addReplyErrorFormat(c,"Can't connect to target node: %s", + server.neterr); + return NULL; + } + anetEnableTcpNoDelay(server.neterr,fd); + + /* Check if it connects within the specified timeout. */ + if ((aeWait(fd,AE_WRITABLE,timeout) & AE_WRITABLE) == 0) { + sdsfree(name); + addReplySds(c, + sdsnew("-IOERR error or timeout connecting to the client\r\n")); + close(fd); + return NULL; + } + + /* Add to the cache and return it to the caller. */ + cs = zmalloc(sizeof(*cs)); + cs->fd = fd; + cs->last_dbid = -1; + cs->last_use_time = server.unixtime; + dictAdd(server.migrate_cached_sockets,name,cs); + return cs; +} + +/* Free a migrate cached connection. */ +void migrateCloseSocket(robj *host, robj *port) { + sds name = sdsempty(); + migrateCachedSocket *cs; + + name = sdscatlen(name,host->ptr,sdslen(host->ptr)); + name = sdscatlen(name,":",1); + name = sdscatlen(name,port->ptr,sdslen(port->ptr)); + cs = dictFetchValue(server.migrate_cached_sockets,name); + if (!cs) { + sdsfree(name); + return; + } + + close(cs->fd); + zfree(cs); + dictDelete(server.migrate_cached_sockets,name); + sdsfree(name); +} + +void migrateCloseTimedoutSockets(void) { + dictIterator *di = dictGetSafeIterator(server.migrate_cached_sockets); + dictEntry *de; + + while((de = dictNext(di)) != NULL) { + migrateCachedSocket *cs = dictGetVal(de); + + if ((server.unixtime - cs->last_use_time) > MIGRATE_SOCKET_CACHE_TTL) { + close(cs->fd); + zfree(cs); + dictDelete(server.migrate_cached_sockets,dictGetKey(de)); + } + } + dictReleaseIterator(di); +} + +/* MIGRATE host port key dbid timeout [COPY | REPLACE] */ +void migrateCommand(redisClient *c) { + migrateCachedSocket *cs; + int copy, replace, j; + long timeout; + long dbid; + long long ttl, expireat; + robj *o; + rio cmd, payload; + int retry_num = 0; + +try_again: + /* Initialization */ + copy = 0; + replace = 0; + ttl = 0; + + /* Parse additional options */ + for (j = 6; j < c->argc; j++) { + if (!strcasecmp(c->argv[j]->ptr,"copy")) { + copy = 1; + } else if (!strcasecmp(c->argv[j]->ptr,"replace")) { + replace = 1; + } else { + addReply(c,shared.syntaxerr); + return; + } + } + + /* Sanity check */ + if (getLongFromObjectOrReply(c,c->argv[5],&timeout,NULL) != REDIS_OK) + return; + if (getLongFromObjectOrReply(c,c->argv[4],&dbid,NULL) != REDIS_OK) + return; + if (timeout <= 0) timeout = 1000; + + /* Check if the key is here. If not we reply with success as there is + * nothing to migrate (for instance the key expired in the meantime), but + * we include such information in the reply string. */ + if ((o = lookupKeyRead(c->db,c->argv[3])) == NULL) { + addReplySds(c,sdsnew("+NOKEY\r\n")); + return; + } + + /* Connect */ + cs = migrateGetSocket(c,c->argv[1],c->argv[2],timeout); + if (cs == NULL) return; /* error sent to the client by migrateGetSocket() */ + + rioInitWithBuffer(&cmd,sdsempty()); + + /* Send the SELECT command if the current DB is not already selected. */ + int select = cs->last_dbid != dbid; /* Should we emit SELECT? */ + if (select) { + redisAssertWithInfo(c,NULL,rioWriteBulkCount(&cmd,'*',2)); + redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,"SELECT",6)); + redisAssertWithInfo(c,NULL,rioWriteBulkLongLong(&cmd,dbid)); + } + + /* Create RESTORE payload and generate the protocol to call the command. */ + expireat = getExpire(c->db,c->argv[3]); + if (expireat != -1) { + ttl = expireat-mstime(); + if (ttl < 1) ttl = 1; + } + redisAssertWithInfo(c,NULL,rioWriteBulkCount(&cmd,'*',replace ? 5 : 4)); + if (server.cluster_enabled) + redisAssertWithInfo(c,NULL, + rioWriteBulkString(&cmd,"RESTORE-ASKING",14)); + else + redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,"RESTORE",7)); + redisAssertWithInfo(c,NULL,sdsEncodedObject(c->argv[3])); + redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,c->argv[3]->ptr, + sdslen(c->argv[3]->ptr))); + redisAssertWithInfo(c,NULL,rioWriteBulkLongLong(&cmd,ttl)); + + /* Emit the payload argument, that is the serialized object using + * the DUMP format. */ + createDumpPayload(&payload,o); + redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,payload.io.buffer.ptr, + sdslen(payload.io.buffer.ptr))); + sdsfree(payload.io.buffer.ptr); + + /* Add the REPLACE option to the RESTORE command if it was specified + * as a MIGRATE option. */ + if (replace) + redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,"REPLACE",7)); + + /* Transfer the query to the other node in 64K chunks. */ + errno = 0; + { + sds buf = cmd.io.buffer.ptr; + size_t pos = 0, towrite; + int nwritten = 0; + + while ((towrite = sdslen(buf)-pos) > 0) { + towrite = (towrite > (64*1024) ? (64*1024) : towrite); + nwritten = syncWrite(cs->fd,buf+pos,towrite,timeout); + if (nwritten != (signed)towrite) goto socket_wr_err; + pos += nwritten; + } + } + + /* Read back the reply. */ + { + char buf1[1024]; + char buf2[1024]; + + /* Read the two replies */ + if (select && syncReadLine(cs->fd, buf1, sizeof(buf1), timeout) <= 0) + goto socket_rd_err; + if (syncReadLine(cs->fd, buf2, sizeof(buf2), timeout) <= 0) + goto socket_rd_err; + if ((select && buf1[0] == '-') || buf2[0] == '-') { + /* On error assume that last_dbid is no longer valid. */ + cs->last_dbid = -1; + addReplyErrorFormat(c,"Target instance replied with error: %s", + (select && buf1[0] == '-') ? buf1+1 : buf2+1); + } else { + /* Update the last_dbid in migrateCachedSocket */ + cs->last_dbid = dbid; + robj *aux; + + addReply(c,shared.ok); + + if (!copy) { + /* No COPY option: remove the local key, signal the change. */ + dbDelete(c->db,c->argv[3]); + signalModifiedKey(c->db,c->argv[3]); + server.dirty++; + + /* Translate MIGRATE as DEL for replication/AOF. */ + aux = createStringObject("DEL",3); + rewriteClientCommandVector(c,2,aux,c->argv[3]); + decrRefCount(aux); + } + } + } + + sdsfree(cmd.io.buffer.ptr); + return; + +socket_wr_err: + sdsfree(cmd.io.buffer.ptr); + migrateCloseSocket(c->argv[1],c->argv[2]); + if (errno != ETIMEDOUT && retry_num++ == 0) goto try_again; + addReplySds(c, + sdsnew("-IOERR error or timeout writing to target instance\r\n")); + return; + +socket_rd_err: + sdsfree(cmd.io.buffer.ptr); + migrateCloseSocket(c->argv[1],c->argv[2]); + if (errno != ETIMEDOUT && retry_num++ == 0) goto try_again; + addReplySds(c, + sdsnew("-IOERR error or timeout reading from target node\r\n")); + return; +} + +/* ----------------------------------------------------------------------------- + * Cluster functions related to serving / redirecting clients + * -------------------------------------------------------------------------- */ + +/* The ASKING command is required after a -ASK redirection. + * The client should issue ASKING before to actually send the command to + * the target instance. See the Redis Cluster specification for more + * information. */ +void askingCommand(redisClient *c) { + if (server.cluster_enabled == 0) { + addReplyError(c,"This instance has cluster support disabled"); + return; + } + c->flags |= REDIS_ASKING; + addReply(c,shared.ok); +} + +/* The READONLY command is used by clients to enter the read-only mode. + * In this mode slaves will not redirect clients as long as clients access + * with read-only commands to keys that are served by the slave's master. */ +void readonlyCommand(redisClient *c) { + if (server.cluster_enabled == 0) { + addReplyError(c,"This instance has cluster support disabled"); + return; + } + c->flags |= REDIS_READONLY; + addReply(c,shared.ok); +} + +/* The READWRITE command just clears the READONLY command state. */ +void readwriteCommand(redisClient *c) { + c->flags &= ~REDIS_READONLY; + addReply(c,shared.ok); +} + +/* Return the pointer to the cluster node that is able to serve the command. + * For the function to succeed the command should only target either: + * + * 1) A single key (even multiple times like LPOPRPUSH mylist mylist). + * 2) Multiple keys in the same hash slot, while the slot is stable (no + * resharding in progress). + * + * On success the function returns the node that is able to serve the request. + * If the node is not 'myself' a redirection must be perfomed. The kind of + * redirection is specified setting the integer passed by reference + * 'error_code', which will be set to REDIS_CLUSTER_REDIR_ASK or + * REDIS_CLUSTER_REDIR_MOVED. + * + * When the node is 'myself' 'error_code' is set to REDIS_CLUSTER_REDIR_NONE. + * + * If the command fails NULL is returned, and the reason of the failure is + * provided via 'error_code', which will be set to: + * + * REDIS_CLUSTER_REDIR_CROSS_SLOT if the request contains multiple keys that + * don't belong to the same hash slot. + * + * REDIS_CLUSTER_REDIR_UNSTABLE if the request contains mutliple keys + * belonging to the same slot, but the slot is not stable (in migration or + * importing state, likely because a resharding is in progress). + * + * REDIS_CLUSTER_REDIR_DOWN_UNBOUND if the request addresses a slot which is + * not bound to any node. In this case the cluster global state should be + * already "down" but it is fragile to rely on the update of the global state, + * so we also handle it here. */ +clusterNode *getNodeByQuery(redisClient *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *error_code) { + clusterNode *n = NULL; + robj *firstkey = NULL; + int multiple_keys = 0; + multiState *ms, _ms; + multiCmd mc; + int i, slot = 0, migrating_slot = 0, importing_slot = 0, missing_keys = 0; + + /* Set error code optimistically for the base case. */ + if (error_code) *error_code = REDIS_CLUSTER_REDIR_NONE; + + /* We handle all the cases as if they were EXEC commands, so we have + * a common code path for everything */ + if (cmd->proc == execCommand) { + /* If REDIS_MULTI flag is not set EXEC is just going to return an + * error. */ + if (!(c->flags & REDIS_MULTI)) return myself; + ms = &c->mstate; + } else { + /* In order to have a single codepath create a fake Multi State + * structure if the client is not in MULTI/EXEC state, this way + * we have a single codepath below. */ + ms = &_ms; + _ms.commands = &mc; + _ms.count = 1; + mc.argv = argv; + mc.argc = argc; + mc.cmd = cmd; + } + + /* Check that all the keys are in the same hash slot, and obtain this + * slot and the node associated. */ + for (i = 0; i < ms->count; i++) { + struct redisCommand *mcmd; + robj **margv; + int margc, *keyindex, numkeys, j; + + mcmd = ms->commands[i].cmd; + margc = ms->commands[i].argc; + margv = ms->commands[i].argv; + + keyindex = getKeysFromCommand(mcmd,margv,margc,&numkeys); + for (j = 0; j < numkeys; j++) { + robj *thiskey = margv[keyindex[j]]; + int thisslot = keyHashSlot((char*)thiskey->ptr, + sdslen(thiskey->ptr)); + + if (firstkey == NULL) { + /* This is the first key we see. Check what is the slot + * and node. */ + firstkey = thiskey; + slot = thisslot; + n = server.cluster->slots[slot]; + + /* Error: If a slot is not served, we are in "cluster down" + * state. However the state is yet to be updated, so this was + * not trapped earlier in processCommand(). Report the same + * error to the client. */ + if (n == NULL) { + getKeysFreeResult(keyindex); + if (error_code) + *error_code = REDIS_CLUSTER_REDIR_DOWN_UNBOUND; + return NULL; + } + + /* If we are migrating or importing this slot, we need to check + * if we have all the keys in the request (the only way we + * can safely serve the request, otherwise we return a TRYAGAIN + * error). To do so we set the importing/migrating state and + * increment a counter for every missing key. */ + if (n == myself && + server.cluster->migrating_slots_to[slot] != NULL) + { + migrating_slot = 1; + } else if (server.cluster->importing_slots_from[slot] != NULL) { + importing_slot = 1; + } + } else { + /* If it is not the first key, make sure it is exactly + * the same key as the first we saw. */ + if (!equalStringObjects(firstkey,thiskey)) { + if (slot != thisslot) { + /* Error: multiple keys from different slots. */ + getKeysFreeResult(keyindex); + if (error_code) + *error_code = REDIS_CLUSTER_REDIR_CROSS_SLOT; + return NULL; + } else { + /* Flag this request as one with multiple different + * keys. */ + multiple_keys = 1; + } + } + } + + /* Migarting / Improrting slot? Count keys we don't have. */ + if ((migrating_slot || importing_slot) && + lookupKeyRead(&server.db[0],thiskey) == NULL) + { + missing_keys++; + } + } + getKeysFreeResult(keyindex); + } + + /* No key at all in command? then we can serve the request + * without redirections or errors. */ + if (n == NULL) return myself; + + /* Return the hashslot by reference. */ + if (hashslot) *hashslot = slot; + + /* This request is about a slot we are migrating into another instance? + * Then if we have all the keys. */ + + /* If we don't have all the keys and we are migrating the slot, send + * an ASK redirection. */ + if (migrating_slot && missing_keys) { + if (error_code) *error_code = REDIS_CLUSTER_REDIR_ASK; + return server.cluster->migrating_slots_to[slot]; + } + + /* If we are receiving the slot, and the client correctly flagged the + * request as "ASKING", we can serve the request. However if the request + * involves multiple keys and we don't have them all, the only option is + * to send a TRYAGAIN error. */ + if (importing_slot && + (c->flags & REDIS_ASKING || cmd->flags & REDIS_CMD_ASKING)) + { + if (multiple_keys && missing_keys) { + if (error_code) *error_code = REDIS_CLUSTER_REDIR_UNSTABLE; + return NULL; + } else { + return myself; + } + } + + /* Handle the read-only client case reading from a slave: if this + * node is a slave and the request is about an hash slot our master + * is serving, we can reply without redirection. */ + if (c->flags & REDIS_READONLY && + cmd->flags & REDIS_CMD_READONLY && + nodeIsSlave(myself) && + myself->slaveof == n) + { + return myself; + } + + /* Base case: just return the right node. However if this node is not + * myself, set error_code to MOVED since we need to issue a rediretion. */ + if (n != myself && error_code) *error_code = REDIS_CLUSTER_REDIR_MOVED; + return n; +} + +/* Send the client the right redirection code, according to error_code + * that should be set to one of REDIS_CLUSTER_REDIR_* macros. + * + * If REDIS_CLUSTER_REDIR_ASK or REDIS_CLUSTER_REDIR_MOVED error codes + * are used, then the node 'n' should not be NULL, but should be the + * node we want to mention in the redirection. Moreover hashslot should + * be set to the hash slot that caused the redirection. */ +void clusterRedirectClient(redisClient *c, clusterNode *n, int hashslot, int error_code) { + if (error_code == REDIS_CLUSTER_REDIR_CROSS_SLOT) { + addReplySds(c,sdsnew("-CROSSSLOT Keys in request don't hash to the same slot\r\n")); + } else if (error_code == REDIS_CLUSTER_REDIR_UNSTABLE) { + /* The request spawns mutliple keys in the same slot, + * but the slot is not "stable" currently as there is + * a migration or import in progress. */ + addReplySds(c,sdsnew("-TRYAGAIN Multiple keys request during rehashing of slot\r\n")); + } else if (error_code == REDIS_CLUSTER_REDIR_DOWN_STATE) { + addReplySds(c,sdsnew("-CLUSTERDOWN The cluster is down\r\n")); + } else if (error_code == REDIS_CLUSTER_REDIR_DOWN_UNBOUND) { + addReplySds(c,sdsnew("-CLUSTERDOWN Hash slot not served\r\n")); + } else if (error_code == REDIS_CLUSTER_REDIR_MOVED || + error_code == REDIS_CLUSTER_REDIR_ASK) + { + addReplySds(c,sdscatprintf(sdsempty(), + "-%s %d %s:%d\r\n", + (error_code == REDIS_CLUSTER_REDIR_ASK) ? "ASK" : "MOVED", + hashslot,n->ip,n->port)); + } else { + redisPanic("getNodeByQuery() unknown error."); + } +} + +/* This function is called by the function processing clients incrementally + * to detect timeouts, in order to handle the following case: + * + * 1) A client blocks with BLPOP or similar blocking operation. + * 2) The master migrates the hash slot elsewhere or turns into a slave. + * 3) The client may remain blocked forever (or up to the max timeout time) + * waiting for a key change that will never happen. + * + * If the client is found to be blocked into an hash slot this node no + * longer handles, the client is sent a redirection error, and the function + * returns 1. Otherwise 0 is returned and no operation is performed. */ +int clusterRedirectBlockedClientIfNeeded(redisClient *c) { + if (c->flags & REDIS_BLOCKED && c->btype == REDIS_BLOCKED_LIST) { + dictEntry *de; + dictIterator *di; + + /* If the cluster is down, unblock the client with the right error. */ + if (server.cluster->state == REDIS_CLUSTER_FAIL) { + clusterRedirectClient(c,NULL,0,REDIS_CLUSTER_REDIR_DOWN_STATE); + return 1; + } + + di = dictGetIterator(c->bpop.keys); + while((de = dictNext(di)) != NULL) { + robj *key = dictGetKey(de); + int slot = keyHashSlot((char*)key->ptr, sdslen(key->ptr)); + clusterNode *node = server.cluster->slots[slot]; + + /* We send an error and unblock the client if: + * 1) The slot is unassigned, emitting a cluster down error. + * 2) The slot is not handled by this node, nor being imported. */ + if (node != myself && + server.cluster->importing_slots_from[slot] == NULL) + { + if (node == NULL) { + clusterRedirectClient(c,NULL,0, + REDIS_CLUSTER_REDIR_DOWN_UNBOUND); + } else { + clusterRedirectClient(c,node,slot, + REDIS_CLUSTER_REDIR_MOVED); + } + return 1; + } + } + dictReleaseIterator(di); + } + return 0; +} diff --git a/redis.submodule/src/cluster.h b/redis.submodule/src/cluster.h new file mode 100644 index 0000000..bf442a2 --- /dev/null +++ b/redis.submodule/src/cluster.h @@ -0,0 +1,256 @@ +#ifndef __REDIS_CLUSTER_H +#define __REDIS_CLUSTER_H + +/*----------------------------------------------------------------------------- + * Redis cluster data structures, defines, exported API. + *----------------------------------------------------------------------------*/ + +#define REDIS_CLUSTER_SLOTS 16384 +#define REDIS_CLUSTER_OK 0 /* Everything looks ok */ +#define REDIS_CLUSTER_FAIL 1 /* The cluster can't work */ +#define REDIS_CLUSTER_NAMELEN 40 /* sha1 hex length */ +#define REDIS_CLUSTER_PORT_INCR 10000 /* Cluster port = baseport + PORT_INCR */ + +/* The following defines are amount of time, sometimes expressed as + * multiplicators of the node timeout value (when ending with MULT). */ +#define REDIS_CLUSTER_DEFAULT_NODE_TIMEOUT 15000 +#define REDIS_CLUSTER_DEFAULT_SLAVE_VALIDITY 10 /* Slave max data age factor. */ +#define REDIS_CLUSTER_DEFAULT_REQUIRE_FULL_COVERAGE 1 +#define REDIS_CLUSTER_FAIL_REPORT_VALIDITY_MULT 2 /* Fail report validity. */ +#define REDIS_CLUSTER_FAIL_UNDO_TIME_MULT 2 /* Undo fail if master is back. */ +#define REDIS_CLUSTER_FAIL_UNDO_TIME_ADD 10 /* Some additional time. */ +#define REDIS_CLUSTER_FAILOVER_DELAY 5 /* Seconds */ +#define REDIS_CLUSTER_DEFAULT_MIGRATION_BARRIER 1 +#define REDIS_CLUSTER_MF_TIMEOUT 5000 /* Milliseconds to do a manual failover. */ +#define REDIS_CLUSTER_MF_PAUSE_MULT 2 /* Master pause manual failover mult. */ + +/* Redirection errors returned by getNodeByQuery(). */ +#define REDIS_CLUSTER_REDIR_NONE 0 /* Node can serve the request. */ +#define REDIS_CLUSTER_REDIR_CROSS_SLOT 1 /* -CROSSSLOT request. */ +#define REDIS_CLUSTER_REDIR_UNSTABLE 2 /* -TRYAGAIN redirection required */ +#define REDIS_CLUSTER_REDIR_ASK 3 /* -ASK redirection required. */ +#define REDIS_CLUSTER_REDIR_MOVED 4 /* -MOVED redirection required. */ +#define REDIS_CLUSTER_REDIR_DOWN_STATE 5 /* -CLUSTERDOWN, global state. */ +#define REDIS_CLUSTER_REDIR_DOWN_UNBOUND 6 /* -CLUSTERDOWN, unbound slot. */ + +struct clusterNode; + +/* clusterLink encapsulates everything needed to talk with a remote node. */ +typedef struct clusterLink { + mstime_t ctime; /* Link creation time */ + int fd; /* TCP socket file descriptor */ + sds sndbuf; /* Packet send buffer */ + sds rcvbuf; /* Packet reception buffer */ + struct clusterNode *node; /* Node related to this link if any, or NULL */ +} clusterLink; + +/* Cluster node flags and macros. */ +#define REDIS_NODE_MASTER 1 /* The node is a master */ +#define REDIS_NODE_SLAVE 2 /* The node is a slave */ +#define REDIS_NODE_PFAIL 4 /* Failure? Need acknowledge */ +#define REDIS_NODE_FAIL 8 /* The node is believed to be malfunctioning */ +#define REDIS_NODE_MYSELF 16 /* This node is myself */ +#define REDIS_NODE_HANDSHAKE 32 /* We have still to exchange the first ping */ +#define REDIS_NODE_NOADDR 64 /* We don't know the address of this node */ +#define REDIS_NODE_MEET 128 /* Send a MEET message to this node */ +#define REDIS_NODE_PROMOTED 256 /* Master was a slave promoted by failover */ +#define REDIS_NODE_NULL_NAME "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + +#define nodeIsMaster(n) ((n)->flags & REDIS_NODE_MASTER) +#define nodeIsSlave(n) ((n)->flags & REDIS_NODE_SLAVE) +#define nodeInHandshake(n) ((n)->flags & REDIS_NODE_HANDSHAKE) +#define nodeHasAddr(n) (!((n)->flags & REDIS_NODE_NOADDR)) +#define nodeWithoutAddr(n) ((n)->flags & REDIS_NODE_NOADDR) +#define nodeTimedOut(n) ((n)->flags & REDIS_NODE_PFAIL) +#define nodeFailed(n) ((n)->flags & REDIS_NODE_FAIL) + +/* Reasons why a slave is not able to failover. */ +#define REDIS_CLUSTER_CANT_FAILOVER_NONE 0 +#define REDIS_CLUSTER_CANT_FAILOVER_DATA_AGE 1 +#define REDIS_CLUSTER_CANT_FAILOVER_WAITING_DELAY 2 +#define REDIS_CLUSTER_CANT_FAILOVER_EXPIRED 3 +#define REDIS_CLUSTER_CANT_FAILOVER_WAITING_VOTES 4 +#define REDIS_CLUSTER_CANT_FAILOVER_RELOG_PERIOD (60*5) /* seconds. */ + +/* This structure represent elements of node->fail_reports. */ +typedef struct clusterNodeFailReport { + struct clusterNode *node; /* Node reporting the failure condition. */ + mstime_t time; /* Time of the last report from this node. */ +} clusterNodeFailReport; + +typedef struct clusterNode { + mstime_t ctime; /* Node object creation time. */ + char name[REDIS_CLUSTER_NAMELEN]; /* Node name, hex string, sha1-size */ + int flags; /* REDIS_NODE_... */ + uint64_t configEpoch; /* Last configEpoch observed for this node */ + unsigned char slots[REDIS_CLUSTER_SLOTS/8]; /* slots handled by this node */ + int numslots; /* Number of slots handled by this node */ + int numslaves; /* Number of slave nodes, if this is a master */ + struct clusterNode **slaves; /* pointers to slave nodes */ + struct clusterNode *slaveof; /* pointer to the master node */ + mstime_t ping_sent; /* Unix time we sent latest ping */ + mstime_t pong_received; /* Unix time we received the pong */ + mstime_t fail_time; /* Unix time when FAIL flag was set */ + mstime_t voted_time; /* Last time we voted for a slave of this master */ + mstime_t repl_offset_time; /* Unix time we received offset for this node */ + long long repl_offset; /* Last known repl offset for this node. */ + char ip[REDIS_IP_STR_LEN]; /* Latest known IP address of this node */ + int port; /* Latest known port of this node */ + clusterLink *link; /* TCP/IP link with this node */ + list *fail_reports; /* List of nodes signaling this as failing */ +} clusterNode; + +typedef struct clusterState { + clusterNode *myself; /* This node */ + uint64_t currentEpoch; + int state; /* REDIS_CLUSTER_OK, REDIS_CLUSTER_FAIL, ... */ + int size; /* Num of master nodes with at least one slot */ + dict *nodes; /* Hash table of name -> clusterNode structures */ + dict *nodes_black_list; /* Nodes we don't re-add for a few seconds. */ + clusterNode *migrating_slots_to[REDIS_CLUSTER_SLOTS]; + clusterNode *importing_slots_from[REDIS_CLUSTER_SLOTS]; + clusterNode *slots[REDIS_CLUSTER_SLOTS]; + zskiplist *slots_to_keys; + /* The following fields are used to take the slave state on elections. */ + mstime_t failover_auth_time; /* Time of previous or next election. */ + int failover_auth_count; /* Number of votes received so far. */ + int failover_auth_sent; /* True if we already asked for votes. */ + int failover_auth_rank; /* This slave rank for current auth request. */ + uint64_t failover_auth_epoch; /* Epoch of the current election. */ + int cant_failover_reason; /* Why a slave is currently not able to + failover. See the CANT_FAILOVER_* macros. */ + /* Manual failover state in common. */ + mstime_t mf_end; /* Manual failover time limit (ms unixtime). + It is zero if there is no MF in progress. */ + /* Manual failover state of master. */ + clusterNode *mf_slave; /* Slave performing the manual failover. */ + /* Manual failover state of slave. */ + long long mf_master_offset; /* Master offset the slave needs to start MF + or zero if stil not received. */ + int mf_can_start; /* If non-zero signal that the manual failover + can start requesting masters vote. */ + /* The followign fields are used by masters to take state on elections. */ + uint64_t lastVoteEpoch; /* Epoch of the last vote granted. */ + int todo_before_sleep; /* Things to do in clusterBeforeSleep(). */ + long long stats_bus_messages_sent; /* Num of msg sent via cluster bus. */ + long long stats_bus_messages_received; /* Num of msg rcvd via cluster bus.*/ +} clusterState; + +/* clusterState todo_before_sleep flags. */ +#define CLUSTER_TODO_HANDLE_FAILOVER (1<<0) +#define CLUSTER_TODO_UPDATE_STATE (1<<1) +#define CLUSTER_TODO_SAVE_CONFIG (1<<2) +#define CLUSTER_TODO_FSYNC_CONFIG (1<<3) + +/* Redis cluster messages header */ + +/* Note that the PING, PONG and MEET messages are actually the same exact + * kind of packet. PONG is the reply to ping, in the exact format as a PING, + * while MEET is a special PING that forces the receiver to add the sender + * as a node (if it is not already in the list). */ +#define CLUSTERMSG_TYPE_PING 0 /* Ping */ +#define CLUSTERMSG_TYPE_PONG 1 /* Pong (reply to Ping) */ +#define CLUSTERMSG_TYPE_MEET 2 /* Meet "let's join" message */ +#define CLUSTERMSG_TYPE_FAIL 3 /* Mark node xxx as failing */ +#define CLUSTERMSG_TYPE_PUBLISH 4 /* Pub/Sub Publish propagation */ +#define CLUSTERMSG_TYPE_FAILOVER_AUTH_REQUEST 5 /* May I failover? */ +#define CLUSTERMSG_TYPE_FAILOVER_AUTH_ACK 6 /* Yes, you have my vote */ +#define CLUSTERMSG_TYPE_UPDATE 7 /* Another node slots configuration */ +#define CLUSTERMSG_TYPE_MFSTART 8 /* Pause clients for manual failover */ + +/* Initially we don't know our "name", but we'll find it once we connect + * to the first node, using the getsockname() function. Then we'll use this + * address for all the next messages. */ +typedef struct { + char nodename[REDIS_CLUSTER_NAMELEN]; + uint32_t ping_sent; + uint32_t pong_received; + char ip[REDIS_IP_STR_LEN]; /* IP address last time it was seen */ + uint16_t port; /* port last time it was seen */ + uint16_t flags; /* node->flags copy */ + uint16_t notused1; /* Some room for future improvements. */ + uint32_t notused2; +} clusterMsgDataGossip; + +typedef struct { + char nodename[REDIS_CLUSTER_NAMELEN]; +} clusterMsgDataFail; + +typedef struct { + uint32_t channel_len; + uint32_t message_len; + /* We can't reclare bulk_data as bulk_data[] since this structure is + * nested. The 8 bytes are removed from the count during the message + * length computation. */ + unsigned char bulk_data[8]; +} clusterMsgDataPublish; + +typedef struct { + uint64_t configEpoch; /* Config epoch of the specified instance. */ + char nodename[REDIS_CLUSTER_NAMELEN]; /* Name of the slots owner. */ + unsigned char slots[REDIS_CLUSTER_SLOTS/8]; /* Slots bitmap. */ +} clusterMsgDataUpdate; + +union clusterMsgData { + /* PING, MEET and PONG */ + struct { + /* Array of N clusterMsgDataGossip structures */ + clusterMsgDataGossip gossip[1]; + } ping; + + /* FAIL */ + struct { + clusterMsgDataFail about; + } fail; + + /* PUBLISH */ + struct { + clusterMsgDataPublish msg; + } publish; + + /* UPDATE */ + struct { + clusterMsgDataUpdate nodecfg; + } update; +}; + +#define CLUSTER_PROTO_VER 0 /* Cluster bus protocol version. */ + +typedef struct { + char sig[4]; /* Siganture "RCmb" (Redis Cluster message bus). */ + uint32_t totlen; /* Total length of this message */ + uint16_t ver; /* Protocol version, currently set to 0. */ + uint16_t notused0; /* 2 bytes not used. */ + uint16_t type; /* Message type */ + uint16_t count; /* Only used for some kind of messages. */ + uint64_t currentEpoch; /* The epoch accordingly to the sending node. */ + uint64_t configEpoch; /* The config epoch if it's a master, or the last + epoch advertised by its master if it is a + slave. */ + uint64_t offset; /* Master replication offset if node is a master or + processed replication offset if node is a slave. */ + char sender[REDIS_CLUSTER_NAMELEN]; /* Name of the sender node */ + unsigned char myslots[REDIS_CLUSTER_SLOTS/8]; + char slaveof[REDIS_CLUSTER_NAMELEN]; + char notused1[32]; /* 32 bytes reserved for future usage. */ + uint16_t port; /* Sender TCP base port */ + uint16_t flags; /* Sender node flags */ + unsigned char state; /* Cluster state from the POV of the sender */ + unsigned char mflags[3]; /* Message flags: CLUSTERMSG_FLAG[012]_... */ + union clusterMsgData data; +} clusterMsg; + +#define CLUSTERMSG_MIN_LEN (sizeof(clusterMsg)-sizeof(union clusterMsgData)) + +/* Message flags better specify the packet content or are used to + * provide some information about the node state. */ +#define CLUSTERMSG_FLAG0_PAUSED (1<<0) /* Master paused for manual failover. */ +#define CLUSTERMSG_FLAG0_FORCEACK (1<<1) /* Give ACK to AUTH_REQUEST even if + master is up. */ + +/* ---------------------- API exported outside cluster.c -------------------- */ +clusterNode *getNodeByQuery(redisClient *c, struct redisCommand *cmd, robj **argv, int argc, int *hashslot, int *ask); +int clusterRedirectBlockedClientIfNeeded(redisClient *c); +void clusterRedirectClient(redisClient *c, clusterNode *n, int hashslot, int error_code); + +#endif /* __REDIS_CLUSTER_H */ diff --git a/redis.submodule/src/crc16.c b/redis.submodule/src/crc16.c new file mode 100644 index 0000000..1ec9161 --- /dev/null +++ b/redis.submodule/src/crc16.c @@ -0,0 +1,88 @@ +#include "redis.h" + +/* + * Copyright 2001-2010 Georges Menie (www.menie.org) + * Copyright 2010-2012 Salvatore Sanfilippo (adapted to Redis coding style) + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* CRC16 implementation according to CCITT standards. + * + * Note by @antirez: this is actually the XMODEM CRC 16 algorithm, using the + * following parameters: + * + * Name : "XMODEM", also known as "ZMODEM", "CRC-16/ACORN" + * Width : 16 bit + * Poly : 1021 (That is actually x^16 + x^12 + x^5 + 1) + * Initialization : 0000 + * Reflect Input byte : False + * Reflect Output CRC : False + * Xor constant to output CRC : 0000 + * Output for "123456789" : 31C3 + */ + +static const uint16_t crc16tab[256]= { + 0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7, + 0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef, + 0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6, + 0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de, + 0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485, + 0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d, + 0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4, + 0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc, + 0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823, + 0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b, + 0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12, + 0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a, + 0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41, + 0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49, + 0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70, + 0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78, + 0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f, + 0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067, + 0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e, + 0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256, + 0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d, + 0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405, + 0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c, + 0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634, + 0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab, + 0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3, + 0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a, + 0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92, + 0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9, + 0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1, + 0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8, + 0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0 +}; + +uint16_t crc16(const char *buf, int len) { + int counter; + uint16_t crc = 0; + for (counter = 0; counter < len; counter++) + crc = (crc<<8) ^ crc16tab[((crc>>8) ^ *buf++)&0x00FF]; + return crc; +} diff --git a/redis.submodule/src/redis-trib.rb b/redis.submodule/src/redis-trib.rb new file mode 100755 index 0000000..068e60d --- /dev/null +++ b/redis.submodule/src/redis-trib.rb @@ -0,0 +1,1388 @@ +#!/usr/bin/env ruby + +# TODO (temporary here, we'll move this into the Github issues once +# redis-trib initial implementation is completed). +# +# - Make sure that if the rehashing fails in the middle redis-trib will try +# to recover. +# - When redis-trib performs a cluster check, if it detects a slot move in +# progress it should prompt the user to continue the move from where it +# stopped. +# - Gracefully handle Ctrl+C in move_slot to prompt the user if really stop +# while rehashing, and performing the best cleanup possible if the user +# forces the quit. +# - When doing "fix" set a global Fix to true, and prompt the user to +# fix the problem if automatically fixable every time there is something +# to fix. For instance: +# 1) If there is a node that pretend to receive a slot, or to migrate a +# slot, but has no entries in that slot, fix it. +# 2) If there is a node having keys in slots that are not owned by it +# fix this condition moving the entries in the same node. +# 3) Perform more possibly slow tests about the state of the cluster. +# 4) When aborted slot migration is detected, fix it. + +require 'rubygems' +require 'redis' + +ClusterHashSlots = 16384 + +def xputs(s) + case s[0..2] + when ">>>" + color="29;1" + when "[ER" + color="31;1" + when "[OK" + color="32" + when "[FA","***" + color="33" + else + color=nil + end + + color = nil if ENV['TERM'] != "xterm" + print "\033[#{color}m" if color + print s + print "\033[0m" if color + print "\n" +end + +class ClusterNode + def initialize(addr) + s = addr.split(":") + if s.length < 2 + puts "Invalid IP or Port (given as #{addr}) - use IP:Port format" + exit 1 + end + port = s.pop # removes port from split array + ip = s.join(":") # if s.length > 1 here, it's IPv6, so restore address + @r = nil + @info = {} + @info[:host] = ip + @info[:port] = port + @info[:slots] = {} + @info[:migrating] = {} + @info[:importing] = {} + @info[:replicate] = false + @dirty = false # True if we need to flush slots info into node. + @friends = [] + end + + def friends + @friends + end + + def slots + @info[:slots] + end + + def has_flag?(flag) + @info[:flags].index(flag) + end + + def to_s + "#{@info[:host]}:#{@info[:port]}" + end + + def connect(o={}) + return if @r + print "Connecting to node #{self}: " + STDOUT.flush + begin + @r = Redis.new(:host => @info[:host], :port => @info[:port], :timeout => 60) + @r.ping + rescue + xputs "[ERR] Sorry, can't connect to node #{self}" + exit 1 if o[:abort] + @r = nil + end + xputs "OK" + end + + def assert_cluster + info = @r.info + if !info["cluster_enabled"] || info["cluster_enabled"].to_i == 0 + xputs "[ERR] Node #{self} is not configured as a cluster node." + exit 1 + end + end + + def assert_empty + if !(@r.cluster("info").split("\r\n").index("cluster_known_nodes:1")) || + (@r.info['db0']) + xputs "[ERR] Node #{self} is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0." + exit 1 + end + end + + def load_info(o={}) + self.connect + nodes = @r.cluster("nodes").split("\n") + nodes.each{|n| + # name addr flags role ping_sent ping_recv link_status slots + split = n.split + name,addr,flags,master_id,ping_sent,ping_recv,config_epoch,link_status = split[0..6] + slots = split[8..-1] + info = { + :name => name, + :addr => addr, + :flags => flags.split(","), + :replicate => master_id, + :ping_sent => ping_sent.to_i, + :ping_recv => ping_recv.to_i, + :link_status => link_status + } + info[:replicate] = false if master_id == "-" + + if info[:flags].index("myself") + @info = @info.merge(info) + @info[:slots] = {} + slots.each{|s| + if s[0..0] == '[' + if s.index("->-") # Migrating + slot,dst = s[1..-1].split("->-") + @info[:migrating][slot.to_i] = dst + elsif s.index("-<-") # Importing + slot,src = s[1..-1].split("-<-") + @info[:importing][slot.to_i] = src + end + elsif s.index("-") + start,stop = s.split("-") + self.add_slots((start.to_i)..(stop.to_i)) + else + self.add_slots((s.to_i)..(s.to_i)) + end + } if slots + @dirty = false + @r.cluster("info").split("\n").each{|e| + k,v=e.split(":") + k = k.to_sym + v.chop! + if k != :cluster_state + @info[k] = v.to_i + else + @info[k] = v + end + } + elsif o[:getfriends] + @friends << info + end + } + end + + def add_slots(slots) + slots.each{|s| + @info[:slots][s] = :new + } + @dirty = true + end + + def set_as_replica(node_id) + @info[:replicate] = node_id + @dirty = true + end + + def flush_node_config + return if !@dirty + if @info[:replicate] + begin + @r.cluster("replicate",@info[:replicate]) + rescue + # If the cluster did not already joined it is possible that + # the slave does not know the master node yet. So on errors + # we return ASAP leaving the dirty flag set, to flush the + # config later. + return + end + else + new = [] + @info[:slots].each{|s,val| + if val == :new + new << s + @info[:slots][s] = true + end + } + @r.cluster("addslots",*new) + end + @dirty = false + end + + def info_string + # We want to display the hash slots assigned to this node + # as ranges, like in: "1-5,8-9,20-25,30" + # + # Note: this could be easily written without side effects, + # we use 'slots' just to split the computation into steps. + + # First step: we want an increasing array of integers + # for instance: [1,2,3,4,5,8,9,20,21,22,23,24,25,30] + slots = @info[:slots].keys.sort + + # As we want to aggregate adjacent slots we convert all the + # slot integers into ranges (with just one element) + # So we have something like [1..1,2..2, ... and so forth. + slots.map!{|x| x..x} + + # Finally we group ranges with adjacent elements. + slots = slots.reduce([]) {|a,b| + if !a.empty? && b.first == (a[-1].last)+1 + a[0..-2] + [(a[-1].first)..(b.last)] + else + a + [b] + end + } + + # Now our task is easy, we just convert ranges with just one + # element into a number, and a real range into a start-end format. + # Finally we join the array using the comma as separator. + slots = slots.map{|x| + x.count == 1 ? x.first.to_s : "#{x.first}-#{x.last}" + }.join(",") + + role = self.has_flag?("master") ? "M" : "S" + + if self.info[:replicate] and @dirty + is = "S: #{self.info[:name]} #{self.to_s}" + else + is = "#{role}: #{self.info[:name]} #{self.to_s}\n"+ + " slots:#{slots} (#{self.slots.length} slots) "+ + "#{(self.info[:flags]-["myself"]).join(",")}" + end + if self.info[:replicate] + is += "\n replicates #{info[:replicate]}" + elsif self.has_flag?("master") && self.info[:replicas] + is += "\n #{info[:replicas].length} additional replica(s)" + end + is + end + + # Return a single string representing nodes and associated slots. + # TODO: remove slaves from config when slaves will be handled + # by Redis Cluster. + def get_config_signature + config = [] + @r.cluster("nodes").each_line{|l| + s = l.split + slots = s[8..-1].select {|x| x[0..0] != "["} + next if slots.length == 0 + config << s[0]+":"+(slots.sort.join(",")) + } + config.sort.join("|") + end + + def info + @info + end + + def is_dirty? + @dirty + end + + def r + @r + end +end + +class RedisTrib + def initialize + @nodes = [] + @fix = false + @errors = [] + end + + def check_arity(req_args, num_args) + if ((req_args > 0 and num_args != req_args) || + (req_args < 0 and num_args < req_args.abs)) + xputs "[ERR] Wrong number of arguments for specified sub command" + exit 1 + end + end + + def add_node(node) + @nodes << node + end + + def cluster_error(msg) + @errors << msg + xputs msg + end + + def get_node_by_name(name) + @nodes.each{|n| + return n if n.info[:name] == name.downcase + } + return nil + end + + # This function returns the master that has the least number of replicas + # in the cluster. If there are multiple masters with the same smaller + # number of replicas, one at random is returned. + def get_master_with_least_replicas + masters = @nodes.select{|n| n.has_flag? "master"} + sorted = masters.sort{|a,b| + a.info[:replicas].length <=> b.info[:replicas].length + } + sorted[0] + end + + def check_cluster + xputs ">>> Performing Cluster Check (using node #{@nodes[0]})" + show_nodes + check_config_consistency + check_open_slots + check_slots_coverage + end + + # Merge slots of every known node. If the resulting slots are equal + # to ClusterHashSlots, then all slots are served. + def covered_slots + slots = {} + @nodes.each{|n| + slots = slots.merge(n.slots) + } + slots + end + + def check_slots_coverage + xputs ">>> Check slots coverage..." + slots = covered_slots + if slots.length == ClusterHashSlots + xputs "[OK] All #{ClusterHashSlots} slots covered." + else + cluster_error \ + "[ERR] Not all #{ClusterHashSlots} slots are covered by nodes." + fix_slots_coverage if @fix + end + end + + def check_open_slots + xputs ">>> Check for open slots..." + open_slots = [] + @nodes.each{|n| + if n.info[:migrating].size > 0 + cluster_error \ + "[WARNING] Node #{n} has slots in migrating state (#{n.info[:migrating].keys.join(",")})." + open_slots += n.info[:migrating].keys + elsif n.info[:importing].size > 0 + cluster_error \ + "[WARNING] Node #{n} has slots in importing state (#{n.info[:importing].keys.join(",")})." + open_slots += n.info[:importing].keys + end + } + open_slots.uniq! + if open_slots.length > 0 + xputs "[WARNING] The following slots are open: #{open_slots.join(",")}" + end + if @fix + open_slots.each{|slot| fix_open_slot slot} + end + end + + def nodes_with_keys_in_slot(slot) + nodes = [] + @nodes.each{|n| + nodes << n if n.r.cluster("getkeysinslot",slot,1).length > 0 + } + nodes + end + + def fix_slots_coverage + not_covered = (0...ClusterHashSlots).to_a - covered_slots.keys + xputs ">>> Fixing slots coverage..." + xputs "List of not covered slots: " + not_covered.join(",") + + # For every slot, take action depending on the actual condition: + # 1) No node has keys for this slot. + # 2) A single node has keys for this slot. + # 3) Multiple nodes have keys for this slot. + slots = {} + not_covered.each{|slot| + nodes = nodes_with_keys_in_slot(slot) + slots[slot] = nodes + xputs "Slot #{slot} has keys in #{nodes.length} nodes: #{nodes.join}" + } + + none = slots.select {|k,v| v.length == 0} + single = slots.select {|k,v| v.length == 1} + multi = slots.select {|k,v| v.length > 1} + + # Handle case "1": keys in no node. + if none.length > 0 + xputs "The folowing uncovered slots have no keys across the cluster:" + xputs none.keys.join(",") + yes_or_die "Fix these slots by covering with a random node?" + none.each{|slot,nodes| + node = @nodes.sample + xputs ">>> Covering slot #{slot} with #{node}" + node.r.cluster("addslots",slot) + } + end + + # Handle case "2": keys only in one node. + if single.length > 0 + xputs "The folowing uncovered slots have keys in just one node:" + puts single.keys.join(",") + yes_or_die "Fix these slots by covering with those nodes?" + single.each{|slot,nodes| + xputs ">>> Covering slot #{slot} with #{nodes[0]}" + nodes[0].r.cluster("addslots",slot) + } + end + + # Handle case "3": keys in multiple nodes. + if multi.length > 0 + xputs "The folowing uncovered slots have keys in multiple nodes:" + xputs multi.keys.join(",") + yes_or_die "Fix these slots by moving keys into a single node?" + multi.each{|slot,nodes| + xputs ">>> Covering slot #{slot} moving keys to #{nodes[0]}" + # TODO + # 1) Set all nodes as "MIGRATING" for this slot, so that we + # can access keys in the hash slot using ASKING. + # 2) Move everything to node[0] + # 3) Clear MIGRATING from nodes, and ADDSLOTS the slot to + # node[0]. + raise "TODO: Work in progress" + } + end + end + + # Return the owner of the specified slot + def get_slot_owner(slot) + @nodes.each{|n| + n.slots.each{|s,_| + return n if s == slot + } + } + nil + end + + # Slot 'slot' was found to be in importing or migrating state in one or + # more nodes. This function fixes this condition by migrating keys where + # it seems more sensible. + def fix_open_slot(slot) + puts ">>> Fixing open slot #{slot}" + + # Try to obtain the current slot owner, according to the current + # nodes configuration. + owner = get_slot_owner(slot) + + # If there is no slot owner, set as owner the slot with the biggest + # number of keys, among the set of migrating / importing nodes. + if !owner + xputs "*** Fix me, some work to do here." + # Select owner... + # Use ADDSLOTS to assign the slot. + exit 1 + end + + migrating = [] + importing = [] + @nodes.each{|n| + next if n.has_flag? "slave" + if n.info[:migrating][slot] + migrating << n + elsif n.info[:importing][slot] + importing << n + elsif n.r.cluster("countkeysinslot",slot) > 0 && n != owner + xputs "*** Found keys about slot #{slot} in node #{n}!" + importing << n + end + } + puts "Set as migrating in: #{migrating.join(",")}" + puts "Set as importing in: #{importing.join(",")}" + + # Case 1: The slot is in migrating state in one slot, and in + # importing state in 1 slot. That's trivial to address. + if migrating.length == 1 && importing.length == 1 + move_slot(migrating[0],importing[0],slot,:verbose=>true,:fix=>true) + # Case 2: There are multiple nodes that claim the slot as importing, + # they probably got keys about the slot after a restart so opened + # the slot. In this case we just move all the keys to the owner + # according to the configuration. + elsif migrating.length == 0 && importing.length > 0 + xputs ">>> Moving all the #{slot} slot keys to its owner #{owner}" + importing.each {|node| + next if node == owner + move_slot(node,owner,slot,:verbose=>true,:fix=>true,:cold=>true) + xputs ">>> Setting #{slot} as STABLE in #{node}" + node.r.cluster("setslot",slot,"stable") + } + # Case 3: There are no slots claiming to be in importing state, but + # there is a migrating node that actually don't have any key. We + # can just close the slot, probably a reshard interrupted in the middle. + elsif importing.length == 0 && migrating.length == 1 && + migrating[0].r.cluster("getkeysinslot",slot,10).length == 0 + migrating[0].r.cluster("setslot",slot,"stable") + else + xputs "[ERR] Sorry, Redis-trib can't fix this slot yet (work in progress). Slot is set as migrating in #{migrating.join(",")}, as importing in #{importing.join(",")}, owner is #{owner}" + end + end + + # Check if all the nodes agree about the cluster configuration + def check_config_consistency + if !is_config_consistent? + cluster_error "[ERR] Nodes don't agree about configuration!" + else + xputs "[OK] All nodes agree about slots configuration." + end + end + + def is_config_consistent? + signatures=[] + @nodes.each{|n| + signatures << n.get_config_signature + } + return signatures.uniq.length == 1 + end + + def wait_cluster_join + print "Waiting for the cluster to join" + while !is_config_consistent? + print "." + STDOUT.flush + sleep 1 + end + print "\n" + end + + def alloc_slots + nodes_count = @nodes.length + masters_count = @nodes.length / (@replicas+1) + masters = [] + + # The first step is to split instances by IP. This is useful as + # we'll try to allocate master nodes in different physical machines + # (as much as possible) and to allocate slaves of a given master in + # different physical machines as well. + # + # This code assumes just that if the IP is different, than it is more + # likely that the instance is running in a different physical host + # or at least a different virtual machine. + ips = {} + @nodes.each{|n| + ips[n.info[:host]] = [] if !ips[n.info[:host]] + ips[n.info[:host]] << n + } + + # Select master instances + puts "Using #{masters_count} masters:" + interleaved = [] + stop = false + while not stop do + # Take one node from each IP until we run out of nodes + # across every IP. + ips.each do |ip,nodes| + if nodes.empty? + # if this IP has no remaining nodes, check for termination + if interleaved.length == nodes_count + # stop when 'interleaved' has accumulated all nodes + stop = true + next + end + else + # else, move one node from this IP to 'interleaved' + interleaved.push nodes.shift + end + end + end + + masters = interleaved.slice!(0, masters_count) + nodes_count -= masters.length + + masters.each{|m| puts m} + + # Alloc slots on masters + slots_per_node = ClusterHashSlots.to_f / masters_count + first = 0 + cursor = 0.0 + masters.each_with_index{|n,masternum| + last = (cursor+slots_per_node-1).round + if last > ClusterHashSlots || masternum == masters.length-1 + last = ClusterHashSlots-1 + end + last = first if last < first # Min step is 1. + n.add_slots first..last + first = last+1 + cursor += slots_per_node + } + + # Select N replicas for every master. + # We try to split the replicas among all the IPs with spare nodes + # trying to avoid the host where the master is running, if possible. + # + # Note we loop two times. The first loop assigns the requested + # number of replicas to each master. The second loop assigns any + # remaining instances as extra replicas to masters. Some masters + # may end up with more than their requested number of replicas, but + # all nodes will be used. + assignment_verbose = false + + [:requested,:unused].each do |assign| + masters.each do |m| + assigned_replicas = 0 + while assigned_replicas < @replicas + break if nodes_count == 0 + if assignment_verbose + if assign == :requested + puts "Requesting total of #{@replicas} replicas " \ + "(#{assigned_replicas} replicas assigned " \ + "so far with #{nodes_count} total remaining)." + elsif assign == :unused + puts "Assigning extra instance to replication " \ + "role too (#{nodes_count} remaining)." + end + end + + # Return the first node not matching our current master + node = interleaved.find{|n| n.info[:host] != m.info[:host]} + + # If we found a node, use it as a best-first match. + # Otherwise, we didn't find a node on a different IP, so we + # go ahead and use a same-IP replica. + if node + slave = node + interleaved.delete node + else + slave = interleaved.shift + end + slave.set_as_replica(m.info[:name]) + nodes_count -= 1 + assigned_replicas += 1 + puts "Adding replica #{slave} to #{m}" + + # If we are in the "assign extra nodes" loop, + # we want to assign one extra replica to each + # master before repeating masters. + # This break lets us assign extra replicas to masters + # in a round-robin way. + break if assign == :unused + end + end + end + end + + def flush_nodes_config + @nodes.each{|n| + n.flush_node_config + } + end + + def show_nodes + @nodes.each{|n| + xputs n.info_string + } + end + + # Redis Cluster config epoch collision resolution code is able to eventually + # set a different epoch to each node after a new cluster is created, but + # it is slow compared to assign a progressive config epoch to each node + # before joining the cluster. However we do just a best-effort try here + # since if we fail is not a problem. + def assign_config_epoch + config_epoch = 1 + @nodes.each{|n| + begin + n.r.cluster("set-config-epoch",config_epoch) + rescue + end + config_epoch += 1 + } + end + + def join_cluster + # We use a brute force approach to make sure the node will meet + # each other, that is, sending CLUSTER MEET messages to all the nodes + # about the very same node. + # Thanks to gossip this information should propagate across all the + # cluster in a matter of seconds. + first = false + @nodes.each{|n| + if !first then first = n.info; next; end # Skip the first node + n.r.cluster("meet",first[:host],first[:port]) + } + end + + def yes_or_die(msg) + print "#{msg} (type 'yes' to accept): " + STDOUT.flush + if !(STDIN.gets.chomp.downcase == "yes") + xputs "*** Aborting..." + exit 1 + end + end + + def load_cluster_info_from_node(nodeaddr) + node = ClusterNode.new(nodeaddr) + node.connect(:abort => true) + node.assert_cluster + node.load_info(:getfriends => true) + add_node(node) + node.friends.each{|f| + next if f[:flags].index("noaddr") || + f[:flags].index("disconnected") || + f[:flags].index("fail") + fnode = ClusterNode.new(f[:addr]) + fnode.connect() + next if !fnode.r + begin + fnode.load_info() + add_node(fnode) + rescue => e + xputs "[ERR] Unable to load info for node #{fnode}" + end + } + populate_nodes_replicas_info + end + + # This function is called by load_cluster_info_from_node in order to + # add additional information to every node as a list of replicas. + def populate_nodes_replicas_info + # Start adding the new field to every node. + @nodes.each{|n| + n.info[:replicas] = [] + } + + # Populate the replicas field using the replicate field of slave + # nodes. + @nodes.each{|n| + if n.info[:replicate] + master = get_node_by_name(n.info[:replicate]) + if !master + xputs "*** WARNING: #{n} claims to be slave of unknown node ID #{n.info[:replicate]}." + else + master.info[:replicas] << n + end + end + } + end + + # Given a list of source nodes return a "resharding plan" + # with what slots to move in order to move "numslots" slots to another + # instance. + def compute_reshard_table(sources,numslots) + moved = [] + # Sort from bigger to smaller instance, for two reasons: + # 1) If we take less slots than instances it is better to start + # getting from the biggest instances. + # 2) We take one slot more from the first instance in the case of not + # perfect divisibility. Like we have 3 nodes and need to get 10 + # slots, we take 4 from the first, and 3 from the rest. So the + # biggest is always the first. + sources = sources.sort{|a,b| b.slots.length <=> a.slots.length} + source_tot_slots = sources.inject(0) {|sum,source| + sum+source.slots.length + } + sources.each_with_index{|s,i| + # Every node will provide a number of slots proportional to the + # slots it has assigned. + n = (numslots.to_f/source_tot_slots*s.slots.length) + if i == 0 + n = n.ceil + else + n = n.floor + end + s.slots.keys.sort[(0...n)].each{|slot| + if moved.length < numslots + moved << {:source => s, :slot => slot} + end + } + } + return moved + end + + def show_reshard_table(table) + table.each{|e| + puts " Moving slot #{e[:slot]} from #{e[:source].info[:name]}" + } + end + + # Move slots between source and target nodes using MIGRATE. + # + # Options: + # :verbose -- Print a dot for every moved key. + # :fix -- We are moving in the context of a fix. Use REPLACE. + # :cold -- Move keys without opening / reconfiguring the nodes. + def move_slot(source,target,slot,o={}) + # We start marking the slot as importing in the destination node, + # and the slot as migrating in the target host. Note that the order of + # the operations is important, as otherwise a client may be redirected + # to the target node that does not yet know it is importing this slot. + print "Moving slot #{slot} from #{source} to #{target}: "; STDOUT.flush + if !o[:cold] + target.r.cluster("setslot",slot,"importing",source.info[:name]) + source.r.cluster("setslot",slot,"migrating",target.info[:name]) + end + # Migrate all the keys from source to target using the MIGRATE command + while true + keys = source.r.cluster("getkeysinslot",slot,10) + break if keys.length == 0 + keys.each{|key| + begin + source.r.client.call(["migrate",target.info[:host],target.info[:port],key,0,15000]) + rescue => e + if o[:fix] && e.to_s =~ /BUSYKEY/ + xputs "*** Target key #{key} exists. Replacing it for FIX." + source.r.client.call(["migrate",target.info[:host],target.info[:port],key,0,15000,:replace]) + else + puts "" + xputs "[ERR] #{e}" + exit 1 + end + end + print "." if o[:verbose] + STDOUT.flush + } + end + + puts + # Set the new node as the owner of the slot in all the known nodes. + if !o[:cold] + @nodes.each{|n| + n.r.cluster("setslot",slot,"node",target.info[:name]) + } + end + end + + # redis-trib subcommands implementations + + def check_cluster_cmd(argv,opt) + load_cluster_info_from_node(argv[0]) + check_cluster + end + + def fix_cluster_cmd(argv,opt) + @fix = true + load_cluster_info_from_node(argv[0]) + check_cluster + end + + def reshard_cluster_cmd(argv,opt) + load_cluster_info_from_node(argv[0]) + check_cluster + if @errors.length != 0 + puts "*** Please fix your cluster problems before resharding" + exit 1 + end + + # Get number of slots + if opt['slots'] + numslots = opt['slots'].to_i + else + numslots = 0 + while numslots <= 0 or numslots > ClusterHashSlots + print "How many slots do you want to move (from 1 to #{ClusterHashSlots})? " + numslots = STDIN.gets.to_i + end + end + + # Get the target instance + if opt['to'] + target = get_node_by_name(opt['to']) + if !target || target.has_flag?("slave") + xputs "*** The specified node is not known or not a master, please retry." + exit 1 + end + else + target = nil + while not target + print "What is the receiving node ID? " + target = get_node_by_name(STDIN.gets.chop) + if !target || target.has_flag?("slave") + xputs "*** The specified node is not known or not a master, please retry." + target = nil + end + end + end + + # Get the source instances + sources = [] + if opt['from'] + opt['from'].split(',').each{|node_id| + if node_id == "all" + sources = "all" + break + end + src = get_node_by_name(node_id) + if !src || src.has_flag?("slave") + xputs "*** The specified node is not known or is not a master, please retry." + exit 1 + end + sources << src + } + else + xputs "Please enter all the source node IDs." + xputs " Type 'all' to use all the nodes as source nodes for the hash slots." + xputs " Type 'done' once you entered all the source nodes IDs." + while true + print "Source node ##{sources.length+1}:" + line = STDIN.gets.chop + src = get_node_by_name(line) + if line == "done" + break + elsif line == "all" + sources = "all" + break + elsif !src || src.has_flag?("slave") + xputs "*** The specified node is not known or is not a master, please retry." + elsif src.info[:name] == target.info[:name] + xputs "*** It is not possible to use the target node as source node." + else + sources << src + end + end + end + + if sources.length == 0 + puts "*** No source nodes given, operation aborted" + exit 1 + end + + # Handle soures == all. + if sources == "all" + sources = [] + @nodes.each{|n| + next if n.info[:name] == target.info[:name] + next if n.has_flag?("slave") + sources << n + } + end + + # Check if the destination node is the same of any source nodes. + if sources.index(target) + xputs "*** Target node is also listed among the source nodes!" + exit 1 + end + + puts "\nReady to move #{numslots} slots." + puts " Source nodes:" + sources.each{|s| puts " "+s.info_string} + puts " Destination node:" + puts " #{target.info_string}" + reshard_table = compute_reshard_table(sources,numslots) + puts " Resharding plan:" + show_reshard_table(reshard_table) + if !opt['yes'] + print "Do you want to proceed with the proposed reshard plan (yes/no)? " + yesno = STDIN.gets.chop + exit(1) if (yesno != "yes") + end + reshard_table.each{|e| + move_slot(e[:source],target,e[:slot],:verbose=>true) + } + end + + # This is an helper function for create_cluster_cmd that verifies if + # the number of nodes and the specified replicas have a valid configuration + # where there are at least three master nodes and enough replicas per node. + def check_create_parameters + masters = @nodes.length/(@replicas+1) + if masters < 3 + puts "*** ERROR: Invalid configuration for cluster creation." + puts "*** Redis Cluster requires at least 3 master nodes." + puts "*** This is not possible with #{@nodes.length} nodes and #{@replicas} replicas per node." + puts "*** At least #{3*(@replicas+1)} nodes are required." + exit 1 + end + end + + def create_cluster_cmd(argv,opt) + opt = {'replicas' => 0}.merge(opt) + @replicas = opt['replicas'].to_i + + xputs ">>> Creating cluster" + argv[0..-1].each{|n| + node = ClusterNode.new(n) + node.connect(:abort => true) + node.assert_cluster + node.load_info + node.assert_empty + add_node(node) + } + check_create_parameters + xputs ">>> Performing hash slots allocation on #{@nodes.length} nodes..." + alloc_slots + show_nodes + yes_or_die "Can I set the above configuration?" + flush_nodes_config + xputs ">>> Nodes configuration updated" + xputs ">>> Assign a different config epoch to each node" + assign_config_epoch + xputs ">>> Sending CLUSTER MEET messages to join the cluster" + join_cluster + # Give one second for the join to start, in order to avoid that + # wait_cluster_join will find all the nodes agree about the config as + # they are still empty with unassigned slots. + sleep 1 + wait_cluster_join + flush_nodes_config # Useful for the replicas + check_cluster + end + + def addnode_cluster_cmd(argv,opt) + xputs ">>> Adding node #{argv[0]} to cluster #{argv[1]}" + + # Check the existing cluster + load_cluster_info_from_node(argv[1]) + check_cluster + + # If --master-id was specified, try to resolve it now so that we + # abort before starting with the node configuration. + if opt['slave'] + if opt['master-id'] + master = get_node_by_name(opt['master-id']) + if !master + xputs "[ERR] No such master ID #{opt['master-id']}" + end + else + master = get_master_with_least_replicas + xputs "Automatically selected master #{master}" + end + end + + # Add the new node + new = ClusterNode.new(argv[0]) + new.connect(:abort => true) + new.assert_cluster + new.load_info + new.assert_empty + first = @nodes.first.info + add_node(new) + + # Send CLUSTER MEET command to the new node + xputs ">>> Send CLUSTER MEET to node #{new} to make it join the cluster." + new.r.cluster("meet",first[:host],first[:port]) + + # Additional configuration is needed if the node is added as + # a slave. + if opt['slave'] + wait_cluster_join + xputs ">>> Configure node as replica of #{master}." + new.r.cluster("replicate",master.info[:name]) + end + xputs "[OK] New node added correctly." + end + + def delnode_cluster_cmd(argv,opt) + id = argv[1].downcase + xputs ">>> Removing node #{id} from cluster #{argv[0]}" + + # Load cluster information + load_cluster_info_from_node(argv[0]) + + # Check if the node exists and is not empty + node = get_node_by_name(id) + + if !node + xputs "[ERR] No such node ID #{id}" + exit 1 + end + + if node.slots.length != 0 + xputs "[ERR] Node #{node} is not empty! Reshard data away and try again." + exit 1 + end + + # Send CLUSTER FORGET to all the nodes but the node to remove + xputs ">>> Sending CLUSTER FORGET messages to the cluster..." + @nodes.each{|n| + next if n == node + if n.info[:replicate] && n.info[:replicate].downcase == id + # Reconfigure the slave to replicate with some other node + master = get_master_with_least_replicas + xputs ">>> #{n} as replica of #{master}" + n.r.cluster("replicate",master.info[:name]) + end + n.r.cluster("forget",argv[1]) + } + + # Finally shutdown the node + xputs ">>> SHUTDOWN the node." + node.r.shutdown + end + + def set_timeout_cluster_cmd(argv,opt) + timeout = argv[1].to_i + if timeout < 100 + puts "Setting a node timeout of less than 100 milliseconds is a bad idea." + exit 1 + end + + # Load cluster information + load_cluster_info_from_node(argv[0]) + ok_count = 0 + err_count = 0 + + # Send CLUSTER FORGET to all the nodes but the node to remove + xputs ">>> Reconfiguring node timeout in every cluster node..." + @nodes.each{|n| + begin + n.r.config("set","cluster-node-timeout",timeout) + n.r.config("rewrite") + ok_count += 1 + xputs "*** New timeout set for #{n}" + rescue => e + puts "ERR setting node-timeot for #{n}: #{e}" + err_count += 1 + end + } + xputs ">>> New node timeout set. #{ok_count} OK, #{err_count} ERR." + end + + def call_cluster_cmd(argv,opt) + cmd = argv[1..-1] + cmd[0] = cmd[0].upcase + + # Load cluster information + load_cluster_info_from_node(argv[0]) + xputs ">>> Calling #{cmd.join(" ")}" + @nodes.each{|n| + begin + res = n.r.send(*cmd) + puts "#{n}: #{res}" + rescue => e + puts "#{n}: #{e}" + end + } + end + + def import_cluster_cmd(argv,opt) + source_addr = opt['from'] + xputs ">>> Importing data from #{source_addr} to cluster #{argv[1]}" + use_copy = opt['copy'] + use_replace = opt['replace'] + + # Check the existing cluster. + load_cluster_info_from_node(argv[0]) + check_cluster + + # Connect to the source node. + xputs ">>> Connecting to the source Redis instance" + src_host,src_port = source_addr.split(":") + source = Redis.new(:host =>src_host, :port =>src_port) + if source.info['cluster_enabled'].to_i == 1 + xputs "[ERR] The source node should not be a cluster node." + end + xputs "*** Importing #{source.dbsize} keys from DB 0" + + # Build a slot -> node map + slots = {} + @nodes.each{|n| + n.slots.each{|s,_| + slots[s] = n + } + } + + # Use SCAN to iterate over the keys, migrating to the + # right node as needed. + cursor = nil + while cursor != 0 + cursor,keys = source.scan(cursor, :count => 1000) + cursor = cursor.to_i + keys.each{|k| + # Migrate keys using the MIGRATE command. + slot = key_to_slot(k) + target = slots[slot] + print "Migrating #{k} to #{target}: " + STDOUT.flush + begin + cmd = ["migrate",target.info[:host],target.info[:port],k,0,15000] + cmd << :copy if use_copy + cmd << :replace if use_replace + source.client.call(cmd) + rescue => e + puts e + else + puts "OK" + end + } + end + end + + def help_cluster_cmd(argv,opt) + show_help + exit 0 + end + + # Parse the options for the specific command "cmd". + # Returns an hash populate with option => value pairs, and the index of + # the first non-option argument in ARGV. + def parse_options(cmd) + idx = 1 ; # Current index into ARGV + options={} + while idx < ARGV.length && ARGV[idx][0..1] == '--' + if ARGV[idx][0..1] == "--" + option = ARGV[idx][2..-1] + idx += 1 + if ALLOWED_OPTIONS[cmd] == nil || ALLOWED_OPTIONS[cmd][option] == nil + puts "Unknown option '#{option}' for command '#{cmd}'" + exit 1 + end + if ALLOWED_OPTIONS[cmd][option] + value = ARGV[idx] + idx += 1 + else + value = true + end + options[option] = value + else + # Remaining arguments are not options. + break + end + end + + # Enforce mandatory options + if ALLOWED_OPTIONS[cmd] + ALLOWED_OPTIONS[cmd].each {|option,val| + if !options[option] && val == :required + puts "Option '--#{option}' is required "+ \ + "for subcommand '#{cmd}'" + exit 1 + end + } + end + return options,idx + end +end + +################################################################################# +# Libraries +# +# We try to don't depend on external libs since this is a critical part +# of Redis Cluster. +################################################################################# + +# This is the CRC16 algorithm used by Redis Cluster to hash keys. +# Implementation according to CCITT standards. +# +# This is actually the XMODEM CRC 16 algorithm, using the +# following parameters: +# +# Name : "XMODEM", also known as "ZMODEM", "CRC-16/ACORN" +# Width : 16 bit +# Poly : 1021 (That is actually x^16 + x^12 + x^5 + 1) +# Initialization : 0000 +# Reflect Input byte : False +# Reflect Output CRC : False +# Xor constant to output CRC : 0000 +# Output for "123456789" : 31C3 + +module RedisClusterCRC16 + def RedisClusterCRC16.crc16(bytes) + crc = 0 + bytes.each_byte{|b| + crc = ((crc<<8) & 0xffff) ^ XMODEMCRC16Lookup[((crc>>8)^b) & 0xff] + } + crc + end + +private + XMODEMCRC16Lookup = [ + 0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7, + 0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef, + 0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6, + 0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de, + 0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485, + 0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d, + 0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4, + 0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc, + 0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823, + 0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b, + 0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12, + 0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a, + 0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41, + 0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49, + 0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70, + 0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78, + 0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f, + 0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067, + 0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e, + 0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256, + 0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d, + 0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405, + 0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c, + 0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634, + 0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab, + 0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3, + 0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a, + 0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92, + 0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9, + 0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1, + 0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8, + 0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0 + ] +end + +# Turn a key name into the corrisponding Redis Cluster slot. +def key_to_slot(key) + # Only hash what is inside {...} if there is such a pattern in the key. + # Note that the specification requires the content that is between + # the first { and the first } after the first {. If we found {} without + # nothing in the middle, the whole key is hashed as usually. + s = key.index "{" + if s + e = key.index "}",s+1 + if e && e != s+1 + key = key[s+1..e-1] + end + end + RedisClusterCRC16.crc16(key) % 16384 +end + +################################################################################# +# Definition of commands +################################################################################# + +COMMANDS={ + "create" => ["create_cluster_cmd", -2, "host1:port1 ... hostN:portN"], + "check" => ["check_cluster_cmd", 2, "host:port"], + "fix" => ["fix_cluster_cmd", 2, "host:port"], + "reshard" => ["reshard_cluster_cmd", 2, "host:port"], + "add-node" => ["addnode_cluster_cmd", 3, "new_host:new_port existing_host:existing_port"], + "del-node" => ["delnode_cluster_cmd", 3, "host:port node_id"], + "set-timeout" => ["set_timeout_cluster_cmd", 3, "host:port milliseconds"], + "call" => ["call_cluster_cmd", -3, "host:port command arg arg .. arg"], + "import" => ["import_cluster_cmd", 2, "host:port"], + "help" => ["help_cluster_cmd", 1, "(show this help)"] +} + +ALLOWED_OPTIONS={ + "create" => {"replicas" => true}, + "add-node" => {"slave" => false, "master-id" => true}, + "import" => {"from" => :required, "copy" => false, "replace" => false}, + "reshard" => {"from" => true, "to" => true, "slots" => true, "yes" => false} +} + +def show_help + puts "Usage: redis-trib \n\n" + COMMANDS.each{|k,v| + o = "" + puts " #{k.ljust(15)} #{v[2]}" + if ALLOWED_OPTIONS[k] + ALLOWED_OPTIONS[k].each{|optname,has_arg| + puts " --#{optname}" + (has_arg ? " " : "") + } + end + } + puts "\nFor check, fix, reshard, del-node, set-timeout you can specify the host and port of any working node in the cluster.\n" +end + +# Sanity check +if ARGV.length == 0 + show_help + exit 1 +end + +rt = RedisTrib.new +cmd_spec = COMMANDS[ARGV[0].downcase] +if !cmd_spec + puts "Unknown redis-trib subcommand '#{ARGV[0]}'" + exit 1 +end + +# Parse options +cmd_options,first_non_option = rt.parse_options(ARGV[0].downcase) +rt.check_arity(cmd_spec[1],ARGV.length-(first_non_option-1)) + +# Dispatch +rt.send(cmd_spec[0],ARGV[first_non_option..-1],cmd_options) diff --git a/redis.submodule/tests/cluster/cluster.tcl b/redis.submodule/tests/cluster/cluster.tcl new file mode 100644 index 0000000..0647914 --- /dev/null +++ b/redis.submodule/tests/cluster/cluster.tcl @@ -0,0 +1,130 @@ +# Cluster-specific test functions. +# +# Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com +# This software is released under the BSD License. See the COPYING file for +# more information. + +# Returns a parsed CLUSTER NODES output as a list of dictionaries. +proc get_cluster_nodes id { + set lines [split [R $id cluster nodes] "\r\n"] + set nodes {} + foreach l $lines { + set l [string trim $l] + if {$l eq {}} continue + set args [split $l] + set node [dict create \ + id [lindex $args 0] \ + addr [lindex $args 1] \ + flags [split [lindex $args 2] ,] \ + slaveof [lindex $args 3] \ + ping_sent [lindex $args 4] \ + pong_recv [lindex $args 5] \ + config_epoch [lindex $args 6] \ + linkstate [lindex $args 7] \ + slots [lrange $args 8 -1] \ + ] + lappend nodes $node + } + return $nodes +} + +# Test node for flag. +proc has_flag {node flag} { + expr {[lsearch -exact [dict get $node flags] $flag] != -1} +} + +# Returns the parsed myself node entry as a dictionary. +proc get_myself id { + set nodes [get_cluster_nodes $id] + foreach n $nodes { + if {[has_flag $n myself]} {return $n} + } + return {} +} + +# Return the value of the specified CLUSTER INFO field. +proc CI {n field} { + get_info_field [R $n cluster info] $field +} + +# Assuming nodes are reest, this function performs slots allocation. +# Only the first 'n' nodes are used. +proc cluster_allocate_slots {n} { + set slot 16383 + while {$slot >= 0} { + # Allocate successive slots to random nodes. + set node [randomInt $n] + lappend slots_$node $slot + incr slot -1 + } + for {set j 0} {$j < $n} {incr j} { + R $j cluster addslots {*}[set slots_${j}] + } +} + +# Check that cluster nodes agree about "state", or raise an error. +proc assert_cluster_state {state} { + foreach_redis_id id { + if {[instance_is_killed redis $id]} continue + wait_for_condition 1000 50 { + [CI $id cluster_state] eq $state + } else { + fail "Cluster node $id cluster_state:[CI $id cluster_state]" + } + } +} + +# Search the first node starting from ID $first that is not +# already configured as a slave. +proc cluster_find_available_slave {first} { + foreach_redis_id id { + if {$id < $first} continue + if {[instance_is_killed redis $id]} continue + set me [get_myself $id] + if {[dict get $me slaveof] eq {-}} {return $id} + } + fail "No available slaves" +} + +# Add 'slaves' slaves to a cluster composed of 'masters' masters. +# It assumes that masters are allocated sequentially from instance ID 0 +# to N-1. +proc cluster_allocate_slaves {masters slaves} { + for {set j 0} {$j < $slaves} {incr j} { + set master_id [expr {$j % $masters}] + set slave_id [cluster_find_available_slave $masters] + set master_myself [get_myself $master_id] + R $slave_id cluster replicate [dict get $master_myself id] + } +} + +# Create a cluster composed of the specified number of masters and slaves. +proc create_cluster {masters slaves} { + cluster_allocate_slots $masters + if {$slaves} { + cluster_allocate_slaves $masters $slaves + } + assert_cluster_state ok +} + +# Set the cluster node-timeout to all the reachalbe nodes. +proc set_cluster_node_timeout {to} { + foreach_redis_id id { + catch {R $id CONFIG SET cluster-node-timeout $to} + } +} + +# Check if the cluster is writable and readable. Use node "id" +# as a starting point to talk with the cluster. +proc cluster_write_test {id} { + set prefix [randstring 20 20 alpha] + set port [get_instance_attrib redis $id port] + set cluster [redis_cluster 127.0.0.1:$port] + for {set j 0} {$j < 100} {incr j} { + $cluster set key.$j $prefix.$j + } + for {set j 0} {$j < 100} {incr j} { + assert {[$cluster get key.$j] eq "$prefix.$j"} + } + $cluster close +} diff --git a/redis.submodule/tests/cluster/run.tcl b/redis.submodule/tests/cluster/run.tcl new file mode 100644 index 0000000..93603dd --- /dev/null +++ b/redis.submodule/tests/cluster/run.tcl @@ -0,0 +1,28 @@ +# Cluster test suite. Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com +# This software is released under the BSD License. See the COPYING file for +# more information. + +cd tests/cluster +source cluster.tcl +source ../instances.tcl +source ../../support/cluster.tcl ; # Redis Cluster client. + +set ::instances_count 20 ; # How many instances we use at max. + +proc main {} { + parse_options + spawn_instance redis $::redis_base_port $::instances_count { + "cluster-enabled yes" + "appendonly yes" + } + run_tests + cleanup + end_tests +} + +if {[catch main e]} { + puts $::errorInfo + if {$::pause_on_error} pause_on_error + cleanup + exit 1 +} diff --git a/redis.submodule/tests/cluster/tests/00-base.tcl b/redis.submodule/tests/cluster/tests/00-base.tcl new file mode 100644 index 0000000..280befb --- /dev/null +++ b/redis.submodule/tests/cluster/tests/00-base.tcl @@ -0,0 +1,59 @@ +# Check the basic monitoring and failover capabilities. + +source "../tests/includes/init-tests.tcl" + +if {$::simulate_error} { + test "This test will fail" { + fail "Simulated error" + } +} + +test "Different nodes have different IDs" { + set ids {} + set numnodes 0 + foreach_redis_id id { + incr numnodes + # Every node should just know itself. + set nodeid [dict get [get_myself $id] id] + assert {$nodeid ne {}} + lappend ids $nodeid + } + set numids [llength [lsort -unique $ids]] + assert {$numids == $numnodes} +} + +test "It is possible to perform slot allocation" { + cluster_allocate_slots 5 +} + +test "After the join, every node gets a different config epoch" { + set trynum 60 + while {[incr trynum -1] != 0} { + # We check that this condition is true for *all* the nodes. + set ok 1 ; # Will be set to 0 every time a node is not ok. + foreach_redis_id id { + set epochs {} + foreach n [get_cluster_nodes $id] { + lappend epochs [dict get $n config_epoch] + } + if {[lsort $epochs] != [lsort -unique $epochs]} { + set ok 0 ; # At least one collision! + } + } + if {$ok} break + after 1000 + puts -nonewline . + flush stdout + } + if {$trynum == 0} { + fail "Config epoch conflict resolution is not working." + } +} + +test "Nodes should report cluster_state is ok now" { + assert_cluster_state ok +} + +test "It is possible to write and read from the cluster" { + cluster_write_test 0 +} diff --git a/redis.submodule/tests/cluster/tests/01-faildet.tcl b/redis.submodule/tests/cluster/tests/01-faildet.tcl new file mode 100644 index 0000000..8fe87c9 --- /dev/null +++ b/redis.submodule/tests/cluster/tests/01-faildet.tcl @@ -0,0 +1,38 @@ +# Check the basic monitoring and failover capabilities. + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster should start ok" { + assert_cluster_state ok +} + +test "Killing two slave nodes" { + kill_instance redis 5 + kill_instance redis 6 +} + +test "Cluster should be still up" { + assert_cluster_state ok +} + +test "Killing one master node" { + kill_instance redis 0 +} + +# Note: the only slave of instance 0 is already down so no +# failover is possible, that would change the state back to ok. +test "Cluster should be down now" { + assert_cluster_state fail +} + +test "Restarting master node" { + restart_instance redis 0 +} + +test "Cluster should be up again" { + assert_cluster_state ok +} diff --git a/redis.submodule/tests/cluster/tests/02-failover.tcl b/redis.submodule/tests/cluster/tests/02-failover.tcl new file mode 100644 index 0000000..6b2fd09 --- /dev/null +++ b/redis.submodule/tests/cluster/tests/02-failover.tcl @@ -0,0 +1,65 @@ +# Check the basic monitoring and failover capabilities. + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 0 +} + +test "Instance #5 is a slave" { + assert {[RI 5 role] eq {slave}} +} + +test "Instance #5 synced with the master" { + wait_for_condition 1000 50 { + [RI 5 master_link_status] eq {up} + } else { + fail "Instance #5 master link status is not up" + } +} + +set current_epoch [CI 1 cluster_current_epoch] + +test "Killing one master node" { + kill_instance redis 0 +} + +test "Wait for failover" { + wait_for_condition 1000 50 { + [CI 1 cluster_current_epoch] > $current_epoch + } else { + fail "No failover detected" + } +} + +test "Cluster should eventually be up again" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 1 +} + +test "Instance #5 is now a master" { + assert {[RI 5 role] eq {master}} +} + +test "Restarting the previously killed master node" { + restart_instance redis 0 +} + +test "Instance #0 gets converted into a slave" { + wait_for_condition 1000 50 { + [RI 0 role] eq {slave} + } else { + fail "Old master was not converted into slave" + } +} diff --git a/redis.submodule/tests/cluster/tests/03-failover-loop.tcl b/redis.submodule/tests/cluster/tests/03-failover-loop.tcl new file mode 100644 index 0000000..8e1bcd6 --- /dev/null +++ b/redis.submodule/tests/cluster/tests/03-failover-loop.tcl @@ -0,0 +1,115 @@ +# Failover stress test. +# In this test a different node is killed in a loop for N +# iterations. The test checks that certain properties +# are preseved across iterations. + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +set iterations 20 +set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]] + +while {[incr iterations -1]} { + set tokill [randomInt 10] + set other [expr {($tokill+1)%10}] ; # Some other instance. + set key [randstring 20 20 alpha] + set val [randstring 20 20 alpha] + set role [RI $tokill role] + if {$role eq {master}} { + set slave {} + set myid [dict get [get_myself $tokill] id] + foreach_redis_id id { + if {$id == $tokill} continue + if {[dict get [get_myself $id] slaveof] eq $myid} { + set slave $id + } + } + if {$slave eq {}} { + fail "Unable to retrieve slave's ID for master #$tokill" + } + } + + puts "--- Iteration $iterations ---" + + if {$role eq {master}} { + test "Wait for slave of #$tokill to sync" { + wait_for_condition 1000 50 { + [string match {*state=online*} [RI $tokill slave0]] + } else { + fail "Slave of node #$tokill is not ok" + } + } + set slave_config_epoch [CI $slave cluster_my_epoch] + } + + test "Cluster is writable before failover" { + for {set i 0} {$i < 100} {incr i} { + catch {$cluster set $key:$i $val:$i} err + assert {$err eq {OK}} + } + # Wait for the write to propagate to the slave if we + # are going to kill a master. + if {$role eq {master}} { + R $tokill wait 1 20000 + } + } + + test "Killing node #$tokill" { + kill_instance redis $tokill + } + + if {$role eq {master}} { + test "Wait failover by #$slave with old epoch $slave_config_epoch" { + wait_for_condition 1000 50 { + [CI $slave cluster_my_epoch] > $slave_config_epoch + } else { + fail "No failover detected, epoch is still [CI $slave cluster_my_epoch]" + } + } + } + + test "Cluster should eventually be up again" { + assert_cluster_state ok + } + + test "Cluster is writable again" { + for {set i 0} {$i < 100} {incr i} { + catch {$cluster set $key:$i:2 $val:$i:2} err + assert {$err eq {OK}} + } + } + + test "Restarting node #$tokill" { + restart_instance redis $tokill + } + + test "Instance #$tokill is now a slave" { + wait_for_condition 1000 50 { + [RI $tokill role] eq {slave} + } else { + fail "Restarted instance is not a slave" + } + } + + test "We can read back the value we set before" { + for {set i 0} {$i < 100} {incr i} { + catch {$cluster get $key:$i} err + assert {$err eq "$val:$i"} + catch {$cluster get $key:$i:2} err + assert {$err eq "$val:$i:2"} + } + } +} + +test "Post condition: current_epoch >= my_epoch everywhere" { + foreach_redis_id id { + assert {[CI $id cluster_current_epoch] >= [CI $id cluster_my_epoch]} + } +} diff --git a/redis.submodule/tests/cluster/tests/04-resharding.tcl b/redis.submodule/tests/cluster/tests/04-resharding.tcl new file mode 100644 index 0000000..8811762 --- /dev/null +++ b/redis.submodule/tests/cluster/tests/04-resharding.tcl @@ -0,0 +1,102 @@ +# Failover stress test. +# In this test a different node is killed in a loop for N +# iterations. The test checks that certain properties +# are preseved across iterations. + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +# Return nno-zero if the specified PID is about a process still in execution, +# otherwise 0 is returned. +proc process_is_running {pid} { + # PS should return with an error if PID is non existing, + # and catch will return non-zero. We want to return non-zero if + # the PID exists, so we invert the return value with expr not operator. + expr {![catch {exec ps -p $pid}]} +} + +# Our resharding test performs the following actions: +# +# - N commands are sent to the cluster in the course of the test. +# - Every command selects a random key from key:0 to key:MAX-1. +# - The operation RPUSH key is perforemd. +# - Tcl remembers into an array all the values pushed to each list. +# - After N/2 commands, the resharding process is started in background. +# - The test continues while the resharding is in progress. +# - At the end of the test, we wait for the resharding process to stop. +# - Finally the keys are checked to see if they contain the value they should. + +set numkeys 50000 +set numops 200000 +set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]] +catch {unset content} +array set content {} +set tribpid {} + +test "Cluster consistency during live resharding" { + for {set j 0} {$j < $numops} {incr j} { + # Trigger the resharding once we execute half the ops. + if {$tribpid ne {} && + ($j % 10000) == 0 && + ![process_is_running $tribpid]} { + set tribpid {} + } + + if {$j >= $numops/2 && $tribpid eq {}} { + puts -nonewline "...Starting resharding..." + flush stdout + set target [dict get [get_myself [randomInt 5]] id] + set tribpid [lindex [exec \ + ../../../src/redis-trib.rb reshard \ + --from all \ + --to $target \ + --slots 100 \ + --yes \ + 127.0.0.1:[get_instance_attrib redis 0 port] \ + | [info nameofexecutable] \ + ../tests/helpers/onlydots.tcl \ + &] 0] + } + + # Write random data to random list. + set listid [randomInt $numkeys] + set key "key:$listid" + set ele [randomValue] + # We write both with Lua scripts and with plain commands. + # This way we are able to stress Lua -> Redis command invocation + # as well, that has tests to prevent Lua to write into wrong + # hash slots. + if {$listid % 2} { + $cluster rpush $key $ele + } else { + $cluster eval {redis.call("rpush",KEYS[1],ARGV[1])} 1 $key $ele + } + lappend content($key) $ele + + if {($j % 1000) == 0} { + puts -nonewline W; flush stdout + } + } + + # Wait for the resharding process to end + wait_for_condition 1000 500 { + [process_is_running $tribpid] == 0 + } else { + fail "Resharding is not terminating after some time." + } + +} + +test "Verify $numkeys keys for consistency with logical content" { + # Check that the Redis Cluster content matches our logical content. + foreach {key value} [array get content] { + assert {[$cluster lrange $key 0 -1] eq $value} + } +} diff --git a/redis.submodule/tests/cluster/tests/05-slave-selection.tcl b/redis.submodule/tests/cluster/tests/05-slave-selection.tcl new file mode 100644 index 0000000..6efedce --- /dev/null +++ b/redis.submodule/tests/cluster/tests/05-slave-selection.tcl @@ -0,0 +1,94 @@ +# Slave selection test +# Check the algorithm trying to pick the slave with the most complete history. + +source "../tests/includes/init-tests.tcl" + +# Create a cluster with 5 master and 10 slaves, so that we have 2 +# slaves for each master. +test "Create a 5 nodes cluster" { + create_cluster 5 10 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "The first master has actually two slaves" { + assert {[llength [lindex [R 0 role] 2]] == 2} +} + +test {Slaves of #0 are instance #5 and #10 as expected} { + set port0 [get_instance_attrib redis 0 port] + assert {[lindex [R 5 role] 2] == $port0} + assert {[lindex [R 10 role] 2] == $port0} +} + +test "Instance #5 and #10 synced with the master" { + wait_for_condition 1000 50 { + [RI 5 master_link_status] eq {up} && + [RI 10 master_link_status] eq {up} + } else { + fail "Instance #5 or #10 master link status is not up" + } +} + +set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]] + +test "Slaves are both able to receive and acknowledge writes" { + for {set j 0} {$j < 100} {incr j} { + $cluster set $j $j + } + assert {[R 0 wait 2 60000] == 2} +} + +test "Write data while slave #10 is paused and can't receive it" { + # Stop the slave with a multi/exec transaction so that the master will + # be killed as soon as it can accept writes again. + R 10 multi + R 10 debug sleep 10 + R 10 client kill 127.0.0.1:$port0 + R 10 deferred 1 + R 10 exec + + # Write some data the slave can't receive. + for {set j 0} {$j < 100} {incr j} { + $cluster set $j $j + } + + # Prevent the master from accepting new slaves. + # Use a large pause value since we'll kill it anyway. + R 0 CLIENT PAUSE 60000 + + # Wait for the slave to return available again + R 10 deferred 0 + assert {[R 10 read] eq {OK OK}} + + # Kill the master so that a reconnection will not be possible. + kill_instance redis 0 +} + +test "Wait for instance #5 (and not #10) to turn into a master" { + wait_for_condition 1000 50 { + [RI 5 role] eq {master} + } else { + fail "No failover detected" + } +} + +test "Wait for the node #10 to return alive before ending the test" { + R 10 ping +} + +test "Cluster should eventually be up again" { + assert_cluster_state ok +} + +test "Node #10 should eventually replicate node #5" { + set port5 [get_instance_attrib redis 5 port] + wait_for_condition 1000 50 { + ([lindex [R 10 role] 2] == $port5) && + ([lindex [R 10 role] 3] eq {connected}) + } else { + fail "#10 didn't became slave of #5" + } +} diff --git a/redis.submodule/tests/cluster/tests/06-slave-stop-cond.tcl b/redis.submodule/tests/cluster/tests/06-slave-stop-cond.tcl new file mode 100644 index 0000000..f2e6705 --- /dev/null +++ b/redis.submodule/tests/cluster/tests/06-slave-stop-cond.tcl @@ -0,0 +1,73 @@ +# Slave stop condition test +# Check that if there is a disconnection time limit, the slave will not try +# to failover its master. + +source "../tests/includes/init-tests.tcl" + +# Create a cluster with 5 master and 5 slaves. +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "The first master has actually one slave" { + assert {[llength [lindex [R 0 role] 2]] == 1} +} + +test {Slaves of #0 is instance #5 as expected} { + set port0 [get_instance_attrib redis 0 port] + assert {[lindex [R 5 role] 2] == $port0} +} + +test "Instance #5 synced with the master" { + wait_for_condition 1000 50 { + [RI 5 master_link_status] eq {up} + } else { + fail "Instance #5 master link status is not up" + } +} + +test "Lower the slave validity factor of #5 to the value of 2" { + assert {[R 5 config set cluster-slave-validity-factor 2] eq {OK}} +} + +test "Break master-slave link and prevent further reconnections" { + # Stop the slave with a multi/exec transaction so that the master will + # be killed as soon as it can accept writes again. + R 5 multi + R 5 client kill 127.0.0.1:$port0 + # here we should sleep 6 or more seconds (node_timeout * slave_validity) + # but the actual validity time is actually incremented by the + # repl-ping-slave-period value which is 10 seconds by default. So we + # need to wait more than 16 seconds. + R 5 debug sleep 20 + R 5 deferred 1 + R 5 exec + + # Prevent the master from accepting new slaves. + # Use a large pause value since we'll kill it anyway. + R 0 CLIENT PAUSE 60000 + + # Wait for the slave to return available again + R 5 deferred 0 + assert {[R 5 read] eq {OK OK}} + + # Kill the master so that a reconnection will not be possible. + kill_instance redis 0 +} + +test "Slave #5 is reachable and alive" { + assert {[R 5 ping] eq {PONG}} +} + +test "Slave #5 should not be able to failover" { + after 10000 + assert {[RI 5 role] eq {slave}} +} + +test "Cluster should be down" { + assert_cluster_state fail +} diff --git a/redis.submodule/tests/cluster/tests/07-replica-migration.tcl b/redis.submodule/tests/cluster/tests/07-replica-migration.tcl new file mode 100644 index 0000000..2ec0742 --- /dev/null +++ b/redis.submodule/tests/cluster/tests/07-replica-migration.tcl @@ -0,0 +1,47 @@ +# Replica migration test. +# Check that orphaned masters are joined by replicas of masters having +# multiple replicas attached, according to the migration barrier settings. + +source "../tests/includes/init-tests.tcl" + +# Create a cluster with 5 master and 10 slaves, so that we have 2 +# slaves for each master. +test "Create a 5 nodes cluster" { + create_cluster 5 10 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "Each master should have two replicas attached" { + foreach_redis_id id { + if {$id < 5} { + wait_for_condition 1000 50 { + [llength [lindex [R 0 role] 2]] == 2 + } else { + fail "Master #$id does not have 2 slaves as expected" + } + } + } +} + +test "Killing all the slaves of master #0 and #1" { + kill_instance redis 5 + kill_instance redis 10 + kill_instance redis 6 + kill_instance redis 11 + after 4000 +} + +foreach_redis_id id { + if {$id < 5} { + test "Master #$id should have at least one replica" { + wait_for_condition 1000 50 { + [llength [lindex [R $id role] 2]] >= 1 + } else { + fail "Master #$id has no replicas" + } + } + } +} diff --git a/redis.submodule/tests/cluster/tests/08-update-msg.tcl b/redis.submodule/tests/cluster/tests/08-update-msg.tcl new file mode 100644 index 0000000..6f9661d --- /dev/null +++ b/redis.submodule/tests/cluster/tests/08-update-msg.tcl @@ -0,0 +1,90 @@ +# Test UPDATE messages sent by other nodes when the currently authorirative +# master is unavaialble. The test is performed in the following steps: +# +# 1) Master goes down. +# 2) Slave failover and becomes new master. +# 3) New master is partitoned away. +# 4) Old master returns. +# 5) At this point we expect the old master to turn into a slave ASAP because +# of the UPDATE messages it will receive from the other nodes when its +# configuration will be found to be outdated. + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 0 +} + +test "Instance #5 is a slave" { + assert {[RI 5 role] eq {slave}} +} + +test "Instance #5 synced with the master" { + wait_for_condition 1000 50 { + [RI 5 master_link_status] eq {up} + } else { + fail "Instance #5 master link status is not up" + } +} + +set current_epoch [CI 1 cluster_current_epoch] + +test "Killing one master node" { + kill_instance redis 0 +} + +test "Wait for failover" { + wait_for_condition 1000 50 { + [CI 1 cluster_current_epoch] > $current_epoch + } else { + fail "No failover detected" + } +} + +test "Cluster should eventually be up again" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 1 +} + +test "Instance #5 is now a master" { + assert {[RI 5 role] eq {master}} +} + +test "Killing the new master #5" { + kill_instance redis 5 +} + +test "Cluster should be down now" { + assert_cluster_state fail +} + +test "Restarting the old master node" { + restart_instance redis 0 +} + +test "Instance #0 gets converted into a slave" { + wait_for_condition 1000 50 { + [RI 0 role] eq {slave} + } else { + fail "Old master was not converted into slave" + } +} + +test "Restarting the new master node" { + restart_instance redis 5 +} + +test "Cluster is up again" { + assert_cluster_state ok +} diff --git a/redis.submodule/tests/cluster/tests/09-pubsub.tcl b/redis.submodule/tests/cluster/tests/09-pubsub.tcl new file mode 100644 index 0000000..e62b91c --- /dev/null +++ b/redis.submodule/tests/cluster/tests/09-pubsub.tcl @@ -0,0 +1,40 @@ +# Test PUBLISH propagation across the cluster. + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +proc test_cluster_publish {instance instances} { + # Subscribe all the instances but the one we use to send. + for {set j 0} {$j < $instances} {incr j} { + if {$j != $instance} { + R $j deferred 1 + R $j subscribe testchannel + R $j read; # Read the subscribe reply + } + } + + set data [randomValue] + R $instance PUBLISH testchannel $data + + # Read the message back from all the nodes. + for {set j 0} {$j < $instances} {incr j} { + if {$j != $instance} { + set msg [R $j read] + assert {$data eq [lindex $msg 2]} + R $j unsubscribe testchannel + R $j read; # Read the unsubscribe reply + R $j deferred 0 + } + } +} + +test "Test publishing to master" { + test_cluster_publish 0 10 +} + +test "Test publishing to slave" { + test_cluster_publish 5 10 +} diff --git a/redis.submodule/tests/cluster/tests/10-manual-failover.tcl b/redis.submodule/tests/cluster/tests/10-manual-failover.tcl new file mode 100644 index 0000000..5441b79 --- /dev/null +++ b/redis.submodule/tests/cluster/tests/10-manual-failover.tcl @@ -0,0 +1,192 @@ +# Check the manual failover + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 0 +} + +test "Instance #5 is a slave" { + assert {[RI 5 role] eq {slave}} +} + +test "Instance #5 synced with the master" { + wait_for_condition 1000 50 { + [RI 5 master_link_status] eq {up} + } else { + fail "Instance #5 master link status is not up" + } +} + +set current_epoch [CI 1 cluster_current_epoch] + +set numkeys 50000 +set numops 10000 +set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]] +catch {unset content} +array set content {} + +test "Send CLUSTER FAILOVER to #5, during load" { + for {set j 0} {$j < $numops} {incr j} { + # Write random data to random list. + set listid [randomInt $numkeys] + set key "key:$listid" + set ele [randomValue] + # We write both with Lua scripts and with plain commands. + # This way we are able to stress Lua -> Redis command invocation + # as well, that has tests to prevent Lua to write into wrong + # hash slots. + if {$listid % 2} { + $cluster rpush $key $ele + } else { + $cluster eval {redis.call("rpush",KEYS[1],ARGV[1])} 1 $key $ele + } + lappend content($key) $ele + + if {($j % 1000) == 0} { + puts -nonewline W; flush stdout + } + + if {$j == $numops/2} {R 5 cluster failover} + } +} + +test "Wait for failover" { + wait_for_condition 1000 50 { + [CI 1 cluster_current_epoch] > $current_epoch + } else { + fail "No failover detected" + } +} + +test "Cluster should eventually be up again" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 1 +} + +test "Instance #5 is now a master" { + assert {[RI 5 role] eq {master}} +} + +test "Verify $numkeys keys for consistency with logical content" { + # Check that the Redis Cluster content matches our logical content. + foreach {key value} [array get content] { + assert {[$cluster lrange $key 0 -1] eq $value} + } +} + +test "Instance #0 gets converted into a slave" { + wait_for_condition 1000 50 { + [RI 0 role] eq {slave} + } else { + fail "Old master was not converted into slave" + } +} + +## Check that manual failover does not happen if we can't talk with the master. + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 0 +} + +test "Instance #5 is a slave" { + assert {[RI 5 role] eq {slave}} +} + +test "Instance #5 synced with the master" { + wait_for_condition 1000 50 { + [RI 5 master_link_status] eq {up} + } else { + fail "Instance #5 master link status is not up" + } +} + +test "Make instance #0 unreachable without killing it" { + R 0 deferred 1 + R 0 DEBUG SLEEP 10 +} + +test "Send CLUSTER FAILOVER to instance #5" { + R 5 cluster failover +} + +test "Instance #5 is still a slave after some time (no failover)" { + after 5000 + assert {[RI 5 role] eq {master}} +} + +test "Wait for instance #0 to return back alive" { + R 0 deferred 0 + assert {[R 0 read] eq {OK}} +} + +## Check with "force" failover happens anyway. + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 0 +} + +test "Instance #5 is a slave" { + assert {[RI 5 role] eq {slave}} +} + +test "Instance #5 synced with the master" { + wait_for_condition 1000 50 { + [RI 5 master_link_status] eq {up} + } else { + fail "Instance #5 master link status is not up" + } +} + +test "Make instance #0 unreachable without killing it" { + R 0 deferred 1 + R 0 DEBUG SLEEP 10 +} + +test "Send CLUSTER FAILOVER to instance #5" { + R 5 cluster failover force +} + +test "Instance #5 is a master after some time" { + wait_for_condition 1000 50 { + [RI 5 role] eq {master} + } else { + fail "Instance #5 is not a master after some time regardless of FORCE" + } +} + +test "Wait for instance #0 to return back alive" { + R 0 deferred 0 + assert {[R 0 read] eq {OK}} +} diff --git a/redis.submodule/tests/cluster/tests/11-manual-takeover.tcl b/redis.submodule/tests/cluster/tests/11-manual-takeover.tcl new file mode 100644 index 0000000..f567c69 --- /dev/null +++ b/redis.submodule/tests/cluster/tests/11-manual-takeover.tcl @@ -0,0 +1,59 @@ +# Manual takeover test + +source "../tests/includes/init-tests.tcl" + +test "Create a 5 nodes cluster" { + create_cluster 5 5 +} + +test "Cluster is up" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 0 +} + +test "Killing majority of master nodes" { + kill_instance redis 0 + kill_instance redis 1 + kill_instance redis 2 +} + +test "Cluster should eventually be down" { + assert_cluster_state fail +} + +test "Use takeover to bring slaves back" { + R 5 cluster failover takeover + R 6 cluster failover takeover + R 7 cluster failover takeover +} + +test "Cluster should eventually be up again" { + assert_cluster_state ok +} + +test "Cluster is writable" { + cluster_write_test 4 +} + +test "Instance #5, #6, #7 are now masters" { + assert {[RI 5 role] eq {master}} + assert {[RI 6 role] eq {master}} + assert {[RI 7 role] eq {master}} +} + +test "Restarting the previously killed master nodes" { + restart_instance redis 0 + restart_instance redis 1 + restart_instance redis 2 +} + +test "Instance #0, #1, #2 gets converted into a slaves" { + wait_for_condition 1000 50 { + [RI 0 role] eq {slave} && [RI 1 role] eq {slave} && [RI 2 role] eq {slave} + } else { + fail "Old masters not converted into slaves" + } +} diff --git a/redis.submodule/tests/cluster/tests/helpers/onlydots.tcl b/redis.submodule/tests/cluster/tests/helpers/onlydots.tcl new file mode 100644 index 0000000..4a6d1ae --- /dev/null +++ b/redis.submodule/tests/cluster/tests/helpers/onlydots.tcl @@ -0,0 +1,16 @@ +# Read the standard input and only shows dots in the output, filtering out +# all the other characters. Designed to avoid bufferization so that when +# we get the output of redis-trib and want to show just the dots, we'll see +# the dots as soon as redis-trib will output them. + +fconfigure stdin -buffering none + +while 1 { + set c [read stdin 1] + if {$c eq {}} { + exit 0; # EOF + } elseif {$c eq {.}} { + puts -nonewline . + flush stdout + } +} diff --git a/redis.submodule/tests/cluster/tests/includes/init-tests.tcl b/redis.submodule/tests/cluster/tests/includes/init-tests.tcl new file mode 100644 index 0000000..466ab8f --- /dev/null +++ b/redis.submodule/tests/cluster/tests/includes/init-tests.tcl @@ -0,0 +1,70 @@ +# Initialization tests -- most units will start including this. + +test "(init) Restart killed instances" { + foreach type {redis} { + foreach_${type}_id id { + if {[get_instance_attrib $type $id pid] == -1} { + puts -nonewline "$type/$id " + flush stdout + restart_instance $type $id + } + } + } +} + +test "Cluster nodes are reachable" { + foreach_redis_id id { + # Every node should be reachable. + wait_for_condition 1000 50 { + ([catch {R $id ping} ping_reply] == 0) && + ($ping_reply eq {PONG}) + } else { + catch {R $id ping} err + fail "Node #$id keeps replying '$err' to PING." + } + } +} + +test "Cluster nodes hard reset" { + foreach_redis_id id { + if {$::valgrind} { + set node_timeout 10000 + } else { + set node_timeout 3000 + } + catch {R $id flushall} ; # May fail for readonly slaves. + R $id MULTI + R $id cluster reset hard + R $id cluster set-config-epoch [expr {$id+1}] + R $id EXEC + R $id config set cluster-node-timeout $node_timeout + R $id config set cluster-slave-validity-factor 10 + R $id config rewrite + } +} + +test "Cluster Join and auto-discovery test" { + # Join node 0 with 1, 1 with 2, ... and so forth. + # If auto-discovery works all nodes will know every other node + # eventually. + set ids {} + foreach_redis_id id {lappend ids $id} + for {set j 0} {$j < [expr [llength $ids]-1]} {incr j} { + set a [lindex $ids $j] + set b [lindex $ids [expr $j+1]] + set b_port [get_instance_attrib redis $b port] + R $a cluster meet 127.0.0.1 $b_port + } + + foreach_redis_id id { + wait_for_condition 1000 50 { + [llength [get_cluster_nodes $id]] == [llength $ids] + } else { + fail "Cluster failed to join into a full mesh." + } + } +} + +test "Before slots allocation, all nodes report cluster failure" { + assert_cluster_state fail +} diff --git a/redis.submodule/tests/cluster/tmp/.gitignore b/redis.submodule/tests/cluster/tmp/.gitignore new file mode 100644 index 0000000..f581f73 --- /dev/null +++ b/redis.submodule/tests/cluster/tmp/.gitignore @@ -0,0 +1,2 @@ +redis_* +sentinel_* diff --git a/redis.submodule/tests/integration/logging.tcl b/redis.submodule/tests/integration/logging.tcl new file mode 100644 index 0000000..c1f4854 --- /dev/null +++ b/redis.submodule/tests/integration/logging.tcl @@ -0,0 +1,24 @@ +set server_path [tmpdir server.log] +set system_name [string tolower [exec uname -s]] + +if {$system_name eq {linux} || $system_name eq {darwin}} { + start_server [list overrides [list dir $server_path]] { + test "Server is able to generate a stack trace on selected systems" { + r config set watchdog-period 200 + r debug sleep 1 + set pattern "*debugCommand*" + set retry 10 + while {$retry} { + set result [exec tail -100 < [srv 0 stdout]] + if {[string match $pattern $result]} { + break + } + incr retry -1 + after 1000 + } + if {$retry == 0} { + error "assertion:expected stack trace not found into log file" + } + } + } +} diff --git a/redis.submodule/tests/sentinel/tests/06-ckquorum.tcl b/redis.submodule/tests/sentinel/tests/06-ckquorum.tcl new file mode 100644 index 0000000..31e5fa2 --- /dev/null +++ b/redis.submodule/tests/sentinel/tests/06-ckquorum.tcl @@ -0,0 +1,34 @@ +# Test for the SENTINEL CKQUORUM command + +source "../tests/includes/init-tests.tcl" +set num_sentinels [llength $::sentinel_instances] + +test "CKQUORUM reports OK and the right amount of Sentinels" { + foreach_sentinel_id id { + assert_match "*OK $num_sentinels usable*" [S $id SENTINEL CKQUORUM mymaster] + } +} + +test "CKQUORUM detects quorum cannot be reached" { + set orig_quorum [expr {$num_sentinels/2+1}] + S 0 SENTINEL SET mymaster quorum [expr {$num_sentinels+1}] + catch {[S 0 SENTINEL CKQUORUM mymaster]} err + assert_match "*NOQUORUM*" $err + S 0 SENTINEL SET mymaster quorum $orig_quorum +} + +test "CKQUORUM detects failover authorization cannot be reached" { + set orig_quorum [expr {$num_sentinels/2+1}] + S 0 SENTINEL SET mymaster quorum 1 + kill_instance sentinel 1 + kill_instance sentinel 2 + kill_instance sentinel 3 + after 5000 + catch {[S 0 SENTINEL CKQUORUM mymaster]} err + assert_match "*NOQUORUM*" $err + S 0 SENTINEL SET mymaster quorum $orig_quorum + restart_instance sentinel 1 + restart_instance sentinel 2 + restart_instance sentinel 3 +} + diff --git a/redis.submodule/tests/support/cluster.tcl b/redis.submodule/tests/support/cluster.tcl new file mode 100644 index 0000000..d4e7d2e --- /dev/null +++ b/redis.submodule/tests/support/cluster.tcl @@ -0,0 +1,305 @@ +# Tcl redis cluster client as a wrapper of redis.rb. +# Copyright (C) 2014 Salvatore Sanfilippo +# Released under the BSD license like Redis itself +# +# Example usage: +# +# set c [redis_cluster 127.0.0.1 6379 127.0.0.1 6380] +# $c set foo +# $c get foo +# $c close + +package require Tcl 8.5 +package provide redis_cluster 0.1 + +namespace eval redis_cluster {} +set ::redis_cluster::id 0 +array set ::redis_cluster::startup_nodes {} +array set ::redis_cluster::nodes {} +array set ::redis_cluster::slots {} + +# List of "plain" commands, which are commands where the sole key is always +# the first argument. +set ::redis_cluster::plain_commands { + get set setnx setex psetex append strlen exists setbit getbit + setrange getrange substr incr decr rpush lpush rpushx lpushx + linsert rpop lpop brpop llen lindex lset lrange ltrim lrem + sadd srem sismember scard spop srandmember smembers sscan zadd + zincrby zrem zremrangebyscore zremrangebyrank zremrangebylex zrange + zrangebyscore zrevrangebyscore zrangebylex zrevrangebylex zcount + zlexcount zrevrange zcard zscore zrank zrevrank zscan hset hsetnx + hget hmset hmget hincrby hincrbyfloat hdel hlen hkeys hvals + hgetall hexists hscan incrby decrby incrbyfloat getset move + expire expireat pexpire pexpireat type ttl pttl persist restore + dump bitcount bitpos pfadd pfcount +} + +proc redis_cluster {nodes} { + set id [incr ::redis_cluster::id] + set ::redis_cluster::startup_nodes($id) $nodes + set ::redis_cluster::nodes($id) {} + set ::redis_cluster::slots($id) {} + set handle [interp alias {} ::redis_cluster::instance$id {} ::redis_cluster::__dispatch__ $id] + $handle refresh_nodes_map + return $handle +} + +# Totally reset the slots / nodes state for the client, calls +# CLUSTER NODES in the first startup node available, populates the +# list of nodes ::redis_cluster::nodes($id) with an hash mapping node +# ip:port to a representation of the node (another hash), and finally +# maps ::redis_cluster::slots($id) with an hash mapping slot numbers +# to node IDs. +# +# This function is called when a new Redis Cluster client is initialized +# and every time we get a -MOVED redirection error. +proc ::redis_cluster::__method__refresh_nodes_map {id} { + # Contact the first responding startup node. + set idx 0; # Index of the node that will respond. + set errmsg {} + foreach start_node $::redis_cluster::startup_nodes($id) { + lassign [split $start_node :] start_host start_port + if {[catch { + set r {} + set r [redis $start_host $start_port] + set nodes_descr [$r cluster nodes] + $r close + } e]} { + if {$r ne {}} {catch {$r close}} + incr idx + if {[string length $errmsg] < 200} { + append errmsg " $start_node: $e" + } + continue ; # Try next. + } else { + break; # Good node found. + } + } + + if {$idx == [llength $::redis_cluster::startup_nodes($id)]} { + error "No good startup node found. $errmsg" + } + + # Put the node that responded as first in the list if it is not + # already the first. + if {$idx != 0} { + set l $::redis_cluster::startup_nodes($id) + set left [lrange $l 0 [expr {$idx-1}]] + set right [lrange $l [expr {$idx+1}] end] + set l [concat [lindex $l $idx] $left $right] + set ::redis_cluster::startup_nodes($id) $l + } + + # Parse CLUSTER NODES output to populate the nodes description. + set nodes {} ; # addr -> node description hash. + foreach line [split $nodes_descr "\n"] { + set line [string trim $line] + if {$line eq {}} continue + set args [split $line " "] + lassign $args nodeid addr flags slaveof pingsent pongrecv configepoch linkstate + set slots [lrange $args 8 end] + if {$addr eq {:0}} { + set addr $start_host:$start_port + } + lassign [split $addr :] host port + + # Connect to the node + set link {} + catch {set link [redis $host $port]} + + # Build this node description as an hash. + set node [dict create \ + id $nodeid \ + addr $addr \ + host $host \ + port $port \ + flags $flags \ + slaveof $slaveof \ + slots $slots \ + link $link \ + ] + dict set nodes $addr $node + lappend ::redis_cluster::startup_nodes($id) $addr + } + + # Close all the existing links in the old nodes map, and set the new + # map as current. + foreach n $::redis_cluster::nodes($id) { + catch { + [dict get $n link] close + } + } + set ::redis_cluster::nodes($id) $nodes + + # Populates the slots -> nodes map. + dict for {addr node} $nodes { + foreach slotrange [dict get $node slots] { + lassign [split $slotrange -] start end + if {$end == {}} {set end $start} + for {set j $start} {$j <= $end} {incr j} { + dict set ::redis_cluster::slots($id) $j $addr + } + } + } + + # Only retain unique entries in the startup nodes list + set ::redis_cluster::startup_nodes($id) [lsort -unique $::redis_cluster::startup_nodes($id)] +} + +# Free a redis_cluster handle. +proc ::redis_cluster::__method__close {id} { + catch { + set nodes $::redis_cluster::nodes($id) + dict for {addr node} $nodes { + catch { + [dict get $node link] close + } + } + } + catch {unset ::redis_cluster::startup_nodes($id)} + catch {unset ::redis_cluster::nodes($id)} + catch {unset ::redis_cluster::slots($id)} + catch {interp alias {} ::redis_cluster::instance$id {}} +} + +proc ::redis_cluster::__dispatch__ {id method args} { + if {[info command ::redis_cluster::__method__$method] eq {}} { + # Get the keys from the command. + set keys [::redis_cluster::get_keys_from_command $method $args] + if {$keys eq {}} { + error "Redis command '$method' is not supported by redis_cluster." + } + + # Resolve the keys in the corresponding hash slot they hash to. + set slot [::redis_cluster::get_slot_from_keys $keys] + if {$slot eq {}} { + error "Invalid command: multiple keys not hashing to the same slot." + } + + # Get the node mapped to this slot. + set node_addr [dict get $::redis_cluster::slots($id) $slot] + if {$node_addr eq {}} { + error "No mapped node for slot $slot." + } + + # Execute the command in the node we think is the slot owner. + set retry 100 + while {[incr retry -1]} { + if {$retry < 5} {after 100} + set node [dict get $::redis_cluster::nodes($id) $node_addr] + set link [dict get $node link] + if {[catch {$link $method {*}$args} e]} { + if {$link eq {} || \ + [string range $e 0 4] eq {MOVED} || \ + [string range $e 0 2] eq {I/O} \ + } { + # MOVED redirection. + ::redis_cluster::__method__refresh_nodes_map $id + set node_addr [dict get $::redis_cluster::slots($id) $slot] + continue + } elseif {[string range $e 0 2] eq {ASK}} { + # ASK redirection. + set node_addr [lindex $e 2] + continue + } else { + # Non redirecting error. + error $e $::errorInfo $::errorCode + } + } else { + # OK query went fine + return $e + } + } + error "Too many redirections or failures contacting Redis Cluster." + } else { + uplevel 1 [list ::redis_cluster::__method__$method $id] $args + } +} + +proc ::redis_cluster::get_keys_from_command {cmd argv} { + set cmd [string tolower $cmd] + # Most Redis commands get just one key as first argument. + if {[lsearch -exact $::redis_cluster::plain_commands $cmd] != -1} { + return [list [lindex $argv 0]] + } + + # Special handling for other commands + switch -exact $cmd { + mget {return $argv} + eval {return [lrange $argv 2 1+[lindex $argv 1]]} + evalsha {return [lrange $argv 2 1+[lindex $argv 1]]} + } + + # All the remaining commands are not handled. + return {} +} + +# Returns the CRC16 of the specified string. +# The CRC parameters are described in the Redis Cluster specification. +set ::redis_cluster::XMODEMCRC16Lookup { + 0x0000 0x1021 0x2042 0x3063 0x4084 0x50a5 0x60c6 0x70e7 + 0x8108 0x9129 0xa14a 0xb16b 0xc18c 0xd1ad 0xe1ce 0xf1ef + 0x1231 0x0210 0x3273 0x2252 0x52b5 0x4294 0x72f7 0x62d6 + 0x9339 0x8318 0xb37b 0xa35a 0xd3bd 0xc39c 0xf3ff 0xe3de + 0x2462 0x3443 0x0420 0x1401 0x64e6 0x74c7 0x44a4 0x5485 + 0xa56a 0xb54b 0x8528 0x9509 0xe5ee 0xf5cf 0xc5ac 0xd58d + 0x3653 0x2672 0x1611 0x0630 0x76d7 0x66f6 0x5695 0x46b4 + 0xb75b 0xa77a 0x9719 0x8738 0xf7df 0xe7fe 0xd79d 0xc7bc + 0x48c4 0x58e5 0x6886 0x78a7 0x0840 0x1861 0x2802 0x3823 + 0xc9cc 0xd9ed 0xe98e 0xf9af 0x8948 0x9969 0xa90a 0xb92b + 0x5af5 0x4ad4 0x7ab7 0x6a96 0x1a71 0x0a50 0x3a33 0x2a12 + 0xdbfd 0xcbdc 0xfbbf 0xeb9e 0x9b79 0x8b58 0xbb3b 0xab1a + 0x6ca6 0x7c87 0x4ce4 0x5cc5 0x2c22 0x3c03 0x0c60 0x1c41 + 0xedae 0xfd8f 0xcdec 0xddcd 0xad2a 0xbd0b 0x8d68 0x9d49 + 0x7e97 0x6eb6 0x5ed5 0x4ef4 0x3e13 0x2e32 0x1e51 0x0e70 + 0xff9f 0xefbe 0xdfdd 0xcffc 0xbf1b 0xaf3a 0x9f59 0x8f78 + 0x9188 0x81a9 0xb1ca 0xa1eb 0xd10c 0xc12d 0xf14e 0xe16f + 0x1080 0x00a1 0x30c2 0x20e3 0x5004 0x4025 0x7046 0x6067 + 0x83b9 0x9398 0xa3fb 0xb3da 0xc33d 0xd31c 0xe37f 0xf35e + 0x02b1 0x1290 0x22f3 0x32d2 0x4235 0x5214 0x6277 0x7256 + 0xb5ea 0xa5cb 0x95a8 0x8589 0xf56e 0xe54f 0xd52c 0xc50d + 0x34e2 0x24c3 0x14a0 0x0481 0x7466 0x6447 0x5424 0x4405 + 0xa7db 0xb7fa 0x8799 0x97b8 0xe75f 0xf77e 0xc71d 0xd73c + 0x26d3 0x36f2 0x0691 0x16b0 0x6657 0x7676 0x4615 0x5634 + 0xd94c 0xc96d 0xf90e 0xe92f 0x99c8 0x89e9 0xb98a 0xa9ab + 0x5844 0x4865 0x7806 0x6827 0x18c0 0x08e1 0x3882 0x28a3 + 0xcb7d 0xdb5c 0xeb3f 0xfb1e 0x8bf9 0x9bd8 0xabbb 0xbb9a + 0x4a75 0x5a54 0x6a37 0x7a16 0x0af1 0x1ad0 0x2ab3 0x3a92 + 0xfd2e 0xed0f 0xdd6c 0xcd4d 0xbdaa 0xad8b 0x9de8 0x8dc9 + 0x7c26 0x6c07 0x5c64 0x4c45 0x3ca2 0x2c83 0x1ce0 0x0cc1 + 0xef1f 0xff3e 0xcf5d 0xdf7c 0xaf9b 0xbfba 0x8fd9 0x9ff8 + 0x6e17 0x7e36 0x4e55 0x5e74 0x2e93 0x3eb2 0x0ed1 0x1ef0 +} + +proc ::redis_cluster::crc16 {s} { + set s [encoding convertto ascii $s] + set crc 0 + foreach char [split $s {}] { + scan $char %c byte + set crc [expr {(($crc<<8)&0xffff) ^ [lindex $::redis_cluster::XMODEMCRC16Lookup [expr {(($crc>>8)^$byte) & 0xff}]]}] + } + return $crc +} + +# Hash a single key returning the slot it belongs to, Implemented hash +# tags as described in the Redis Cluster specification. +proc ::redis_cluster::hash {key} { + # TODO: Handle hash slots. + expr {[::redis_cluster::crc16 $key] & 16383} +} + +# Return the slot the specified keys hash to. +# If the keys hash to multiple slots, an empty string is returned to +# signal that the command can't be run in Redis Cluster. +proc ::redis_cluster::get_slot_from_keys {keys} { + set slot {} + foreach k $keys { + set s [::redis_cluster::hash $k] + if {$slot eq {}} { + set slot $s + } elseif {$slot != $s} { + return {} ; # Error + } + } + return $slot +} diff --git a/redis.submodule/utils/cluster_fail_time.tcl b/redis.submodule/utils/cluster_fail_time.tcl new file mode 100644 index 0000000..8739949 --- /dev/null +++ b/redis.submodule/utils/cluster_fail_time.tcl @@ -0,0 +1,50 @@ +# This simple script is used in order to estimate the average PFAIL->FAIL +# state switch after a failure. + +set ::sleep_time 10 ; # How much to sleep to trigger PFAIL. +set ::fail_port 30016 ; # Node to put in sleep. +set ::other_port 30001 ; # Node to use to monitor the flag switch. + +proc avg vector { + set sum 0.0 + foreach x $vector { + set sum [expr {$sum+$x}] + } + expr {$sum/[llength $vector]} +} + +set samples {} +while 1 { + exec redis-cli -p $::fail_port debug sleep $::sleep_time > /dev/null & + + # Wait for fail? to appear. + while 1 { + set output [exec redis-cli -p $::other_port cluster nodes] + if {[string match {*fail\?*} $output]} break + after 100 + } + + puts "FAIL?" + set start [clock milliseconds] + + # Wait for fail? to disappear. + while 1 { + set output [exec redis-cli -p $::other_port cluster nodes] + if {![string match {*fail\?*} $output]} break + after 100 + } + + puts "FAIL" + set now [clock milliseconds] + set elapsed [expr {$now-$start}] + puts $elapsed + lappend samples $elapsed + + puts "AVG([llength $samples]): [avg $samples]" + + # Wait for the instance to be available again. + exec redis-cli -p $::fail_port ping + + # Wait for the fail flag to be cleared. + after 2000 +} diff --git a/redis.submodule/utils/create-cluster/.gitignore b/redis.submodule/utils/create-cluster/.gitignore new file mode 100644 index 0000000..2988ee9 --- /dev/null +++ b/redis.submodule/utils/create-cluster/.gitignore @@ -0,0 +1,5 @@ +config.sh +*.rdb +*.aof +*.conf +*.log diff --git a/redis.submodule/utils/create-cluster/README b/redis.submodule/utils/create-cluster/README new file mode 100644 index 0000000..1f43748 --- /dev/null +++ b/redis.submodule/utils/create-cluster/README @@ -0,0 +1,27 @@ +Create-custer is a small script used to easily start a big number of Redis +instances configured to run in cluster mode. Its main goal is to allow manual +testing in a condition which is not easy to replicate with the Redis cluster +unit tests, for example when a lot of instances are needed in order to trigger +a give bug. + +The tool can also be used just to easily create a number of instances in a +Redis Cluster in order to experiment a bit with the system. + +USAGE +--- + +To create a cluster, follow this steps: + +1. Edit create-cluster and change the start / end port, depending on the +number of instances you want to create. +2. Use "./create-cluster start" in order to run the instances. +3. Use "./create-cluster create" in order to execute redis-trib create, so that +an actual Redis cluster will be created. +4. Now you are ready to play with the cluster. AOF files and logs for each instances are created in the current directory. + +In order to stop a cluster: + +1. Use "./craete-cluster stop" to stop all the instances. After you stopped the instances you can use "./create-cluster start" to restart them if you change ideas. +2. Use "./create-cluster clean" to remove all the AOF / log files to restat with a clean environment. + +Use the command "./create-cluster help" to get the full list of features. diff --git a/redis.submodule/utils/create-cluster/create-cluster b/redis.submodule/utils/create-cluster/create-cluster new file mode 100755 index 0000000..9894149 --- /dev/null +++ b/redis.submodule/utils/create-cluster/create-cluster @@ -0,0 +1,95 @@ +#!/bin/bash + +# Settings +PORT=30000 +TIMEOUT=2000 +NODES=6 +REPLICAS=1 + +# You may want to put the above config parameters into config.sh in order to +# override the defaults without modifying this script. + +if [ -a config.sh ] +then + source "config.sh" +fi + +# Computed vars +ENDPORT=$((PORT+NODES)) + +if [ "$1" == "start" ] +then + while [ $((PORT < ENDPORT)) != "0" ]; do + PORT=$((PORT+1)) + echo "Starting $PORT" + ../../src/redis-server --port $PORT --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes + done + exit 0 +fi + +if [ "$1" == "create" ] +then + HOSTS="" + while [ $((PORT < ENDPORT)) != "0" ]; do + PORT=$((PORT+1)) + HOSTS="$HOSTS 127.0.0.1:$PORT" + done + ../../src/redis-trib.rb create --replicas $REPLICAS $HOSTS + exit 0 +fi + +if [ "$1" == "stop" ] +then + while [ $((PORT < ENDPORT)) != "0" ]; do + PORT=$((PORT+1)) + echo "Stopping $PORT" + ../../src/redis-cli -p $PORT shutdown nosave + done + exit 0 +fi + +if [ "$1" == "watch" ] +then + PORT=$((PORT+1)) + while [ 1 ]; do + clear + date + ../../src/redis-cli -p $PORT cluster nodes | head -30 + sleep 1 + done + exit 0 +fi + +if [ "$1" == "tail" ] +then + INSTANCE=$2 + PORT=$((PORT+INSTANCE)) + tail -f ${PORT}.log + exit 0 +fi + +if [ "$1" == "call" ] +then + while [ $((PORT < ENDPORT)) != "0" ]; do + PORT=$((PORT+1)) + ../../src/redis-cli -p $PORT $2 $3 $4 $5 $6 $7 $8 $9 + done + exit 0 +fi + +if [ "$1" == "clean" ] +then + rm -rf *.log + rm -rf appendonly*.aof + rm -rf dump*.rdb + rm -rf nodes*.conf + exit 0 +fi + +echo "Usage: $0 [start|create|stop|watch|tail|clean]" +echo "start -- Launch Redis Cluster instances." +echo "create -- Create a cluster using redis-trib create." +echo "stop -- Stop Redis Cluster instances." +echo "watch -- Show CLUSTER NODES output (first 30 lines) of first node." +echo "tail -- Run tail -f of instance at base port + ID." +echo "clean -- Remove all instances data, logs, configs." diff --git a/redis.submodule/utils/lru/README b/redis.submodule/utils/lru/README new file mode 100644 index 0000000..288189e --- /dev/null +++ b/redis.submodule/utils/lru/README @@ -0,0 +1,13 @@ +The test-lru.rb program can be used in order to check the behavior of the +Redis approximated LRU algorithm against the theoretical output of true +LRU algorithm. + +In order to use the program you need to recompile Redis setting the define +REDIS_LRU_CLOCK_RESOLUTION to 1, by editing redis.h. +This allows to execute the program in a fast way since the 1 ms resolution +is enough for all the objects to have a different enough time stamp during +the test. + +The program is executed like this: + + ruby test-lru.rb > /tmp/lru.html diff --git a/redis.submodule/utils/lru/test-lru.rb b/redis.submodule/utils/lru/test-lru.rb new file mode 100644 index 0000000..ee0527e --- /dev/null +++ b/redis.submodule/utils/lru/test-lru.rb @@ -0,0 +1,112 @@ +require 'rubygems' +require 'redis' + +r = Redis.new +r.config("SET","maxmemory","2000000") +r.config("SET","maxmemory-policy","allkeys-lru") +r.config("SET","maxmemory-samples",5) +r.config("RESETSTAT") +r.flushall + +puts < + + +
+EOF
+
+# Fill
+oldsize = r.dbsize
+id = 0
+while true
+    id += 1
+    r.set(id,"foo")
+    newsize = r.dbsize
+    break if newsize == oldsize
+    oldsize = newsize
+end
+
+inserted = r.dbsize
+first_set_max_id = id
+puts "#{r.dbsize} keys inserted"
+
+# Access keys sequentially
+
+puts "Access keys sequentially"
+(1..first_set_max_id).each{|id|
+    r.get(id)
+#    sleep 0.001
+}
+
+# Insert more 50% keys. We expect that the new keys
+half = inserted/2
+puts "Insert enough keys to evict half the keys we inserted"
+add = 0
+while true
+    add += 1
+    id += 1
+    r.set(id,"foo")
+    break if r.info['evicted_keys'].to_i >= half
+end
+
+puts "#{add} additional keys added."
+puts "#{r.dbsize} keys in DB"
+
+# Check if evicted keys respect LRU
+# We consider errors from 1 to N progressively more serious as they violate
+# more the access pattern.
+
+errors = 0
+e = 1
+edecr = 1.0/(first_set_max_id/2)
+(1..(first_set_max_id/2)).each{|id|
+    e -= edecr if e > 0
+    e = 0 if e < 0
+    if r.exists(id)
+        errors += e
+    end
+}
+
+puts "#{errors} errors!"
+puts "
" + +# Generate the graphical representation +(1..id).each{|id| + # Mark first set and added items in a different way. + c = "box" + if id <= first_set_max_id + c << " old" + else + c << " new" + end + + # Add class if exists + c << " ex" if r.exists(id) + puts "
" +} + +# Close HTML page + +puts < + +EOF From 69595d309ad0f1b3a9f5176972ad16307ab450c4 Mon Sep 17 00:00:00 2001 From: Dwight Hubbard Date: Mon, 30 Nov 2015 15:33:02 -0800 Subject: [PATCH 3/5] Make sure the redis update script adds new files to git --- build_scripts/update_redis_server.py | 1 + 1 file changed, 1 insertion(+) diff --git a/build_scripts/update_redis_server.py b/build_scripts/update_redis_server.py index 8adbe99..b7f5b18 100755 --- a/build_scripts/update_redis_server.py +++ b/build_scripts/update_redis_server.py @@ -19,3 +19,4 @@ print(directory) tf.extractall(tempdir) shutil.move(os.path.join(tempdir, directory), 'redis.submodule') + os.system('git add redis.submodule') From 198aee8886e6276b283b51c2cfd3437bc01db146 Mon Sep 17 00:00:00 2001 From: Dwight Hubbard Date: Mon, 30 Nov 2015 15:36:29 -0800 Subject: [PATCH 4/5] Add the update-jemalloc.sh to the script so we don't forget it --- build_scripts/update_redis_server.py | 1 + 1 file changed, 1 insertion(+) diff --git a/build_scripts/update_redis_server.py b/build_scripts/update_redis_server.py index b7f5b18..5116a1c 100755 --- a/build_scripts/update_redis_server.py +++ b/build_scripts/update_redis_server.py @@ -19,4 +19,5 @@ print(directory) tf.extractall(tempdir) shutil.move(os.path.join(tempdir, directory), 'redis.submodule') + os.system('(cd redis.submodule;./deps/update-jemalloc.sh 4.0.4)') os.system('git add redis.submodule') From a8b99cfc3de8c9b978429d65869573051a3a3101 Mon Sep 17 00:00:00 2001 From: Dwight Hubbard Date: Mon, 30 Nov 2015 15:48:21 -0800 Subject: [PATCH 5/5] Update the version major number to match the redis major version --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 86b3d3f..16a59e5 100755 --- a/setup.py +++ b/setup.py @@ -128,7 +128,7 @@ def run(self): # without running setup() to allow external scripts to see the setup settings. args = { 'name': 'redislite', - 'version': '1.0.62', + 'version': '3.0.0', 'author': 'Dwight Hubbard', 'author_email': 'dhubbard@yahoo-inc.com', 'url': 'https://github.com/yahoo/redislite',