diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 2d7a4e5f86..1edf76d386 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -65,7 +65,7 @@ jobs: run: sudo apt-get install tcl8.6 tclx - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} - name: module api test if: true && !contains(github.event.inputs.skiptests, 'modules') run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} @@ -114,7 +114,7 @@ jobs: run: apt-get install -y tcl8.6 tclx procps - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} - name: module api test if: true && !contains(github.event.inputs.skiptests, 'modules') run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} @@ -159,7 +159,7 @@ jobs: run: sudo apt-get install tcl8.6 tclx - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} - name: module api test if: true && !contains(github.event.inputs.skiptests, 'modules') run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} @@ -198,7 +198,7 @@ jobs: run: sudo apt-get install tcl8.6 tclx - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} - name: module api test if: true && !contains(github.event.inputs.skiptests, 'modules') run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} @@ -239,7 +239,7 @@ jobs: run: sudo apt-get install tcl8.6 tclx - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} - name: module api test if: true && !contains(github.event.inputs.skiptests, 'modules') run: | @@ -290,7 +290,7 @@ jobs: - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') run: | - ./runtest --accurate --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}} + ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs --tls --dump-logs ${{github.event.inputs.test_args}} - name: module api test if: true && !contains(github.event.inputs.skiptests, 'modules') run: | @@ -336,7 +336,7 @@ jobs: - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') run: | - ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} - name: module api test if: true && !contains(github.event.inputs.skiptests, 'modules') run: | @@ -379,10 +379,10 @@ jobs: run: sudo apt-get install tcl8.6 tclx - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --config io-threads 2 --config events-per-io-thread 0 --accurate --verbose --tags network --dump-logs ${{github.event.inputs.test_args}} + run: ./runtest --io-threads --accurate --verbose --tags network --dump-logs ${{github.event.inputs.test_args}} - name: cluster tests if: true && !contains(github.event.inputs.skiptests, 'cluster') - run: ./runtest-cluster --config io-threads 2 --config events-per-io-thread 0 ${{github.event.inputs.cluster_test_args}} + run: ./runtest-cluster --io-threads ${{github.event.inputs.cluster_test_args}} test-ubuntu-reclaim-cache: runs-on: ubuntu-latest @@ -757,7 +757,7 @@ jobs: run: dnf -y install tcl tcltls - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} - name: module api test if: true && !contains(github.event.inputs.skiptests, 'modules') run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} @@ -826,7 +826,7 @@ jobs: - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') run: | - ./runtest --accurate --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}} + ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs --tls-module --dump-logs ${{github.event.inputs.test_args}} - name: module api test if: true && !contains(github.event.inputs.skiptests, 'modules') run: | @@ -938,7 +938,7 @@ jobs: run: make SERVER_CFLAGS='-Werror' - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --tags -ipv6 --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}} + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}} - name: module api test if: true && !contains(github.event.inputs.skiptests, 'modules') run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --clients 1 --no-latency --dump-logs ${{github.event.inputs.test_args}} @@ -1090,7 +1090,7 @@ jobs: run: apk add tcl procps tclx - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} - name: module api test if: true && !contains(github.event.inputs.skiptests, 'modules') run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} @@ -1131,7 +1131,7 @@ jobs: run: apk add tcl procps tclx - name: test if: true && !contains(github.event.inputs.skiptests, 'valkey') - run: ./runtest --accurate --verbose --dump-logs ${{github.event.inputs.test_args}} + run: ./runtest ${{ github.event_name != 'pull_request' && '--accurate' || '' }} --verbose --dump-logs ${{github.event.inputs.test_args}} - name: module api test if: true && !contains(github.event.inputs.skiptests, 'modules') run: CFLAGS='-Werror' ./runtest-moduleapi --verbose --dump-logs ${{github.event.inputs.test_args}} diff --git a/COPYING b/COPYING index 2058f57e56..81d9209436 100644 --- a/COPYING +++ b/COPYING @@ -17,7 +17,7 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND BSD 3-Clause License -Copyright (c) 2006-2020, Salvatore Sanfilippo +Copyright (c) 2006-2020, Redis Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/README.md b/README.md index 592002d3c4..29d040ccd8 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,8 @@ [![codecov](https://codecov.io/gh/valkey-io/valkey/graph/badge.svg?token=KYYSJAYC5F)](https://codecov.io/gh/valkey-io/valkey) -This README is under construction as we work to build a new community driven high performance key-value store. - This project was forked from the open source Redis project right before the transition to their new source available licenses. -This README is just a fast *quick start* document. We are currently working on a more permanent documentation page. +This README is just a fast *quick start* document. More details can be found under [valkey.io](https://valkey.io/) What is Valkey? -------------- diff --git a/deps/fpconv/fpconv_powers.h b/deps/fpconv/fpconv_powers.h index bc488f6dd1..1855cd4bc7 100644 --- a/deps/fpconv/fpconv_powers.h +++ b/deps/fpconv/fpconv_powers.h @@ -6,7 +6,7 @@ * [1] https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf * ---------------------------------------------------------------------------- * - * Copyright (c) 2021, Redis Labs + * Copyright (c) 2021, Redis Ltd. * Copyright (c) 2013-2019, night-shift * Copyright (c) 2009, Florian Loitsch < florian.loitsch at inria dot fr > * All rights reserved. diff --git a/deps/hiredis/COPYING b/deps/hiredis/COPYING index a5fc973955..c94d2de467 100644 --- a/deps/hiredis/COPYING +++ b/deps/hiredis/COPYING @@ -1,4 +1,4 @@ -Copyright (c) 2009-2011, Salvatore Sanfilippo +Copyright (c) 2009-2011, Redis Ltd. Copyright (c) 2010-2011, Pieter Noordhuis All rights reserved. diff --git a/deps/hiredis/Makefile b/deps/hiredis/Makefile index bd2106b1d1..4a3de1f6ec 100644 --- a/deps/hiredis/Makefile +++ b/deps/hiredis/Makefile @@ -1,5 +1,5 @@ # Hiredis Makefile -# Copyright (C) 2010-2011 Salvatore Sanfilippo +# Copyright (C) 2010-2011 Redis Ltd. # Copyright (C) 2010-2011 Pieter Noordhuis # This file is released under the BSD license, see the COPYING file diff --git a/deps/hiredis/async.c b/deps/hiredis/async.c index 3d39cfaf81..1402160d88 100644 --- a/deps/hiredis/async.c +++ b/deps/hiredis/async.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2009-2011, Redis Ltd. * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. diff --git a/deps/hiredis/async.h b/deps/hiredis/async.h index 4f94660b12..98d1432a7b 100644 --- a/deps/hiredis/async.h +++ b/deps/hiredis/async.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2009-2011, Redis Ltd. * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. diff --git a/deps/hiredis/async_private.h b/deps/hiredis/async_private.h index ea0558d429..535ccd9e19 100644 --- a/deps/hiredis/async_private.h +++ b/deps/hiredis/async_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2009-2011, Redis Ltd. * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. diff --git a/deps/hiredis/dict.c b/deps/hiredis/dict.c index ad571818e2..7333e7ef44 100644 --- a/deps/hiredis/dict.c +++ b/deps/hiredis/dict.c @@ -5,7 +5,7 @@ * tables of power of two in size are used, collisions are handled by * chaining. See the source code for more information... :) * - * Copyright (c) 2006-2010, Salvatore Sanfilippo + * Copyright (c) 2006-2010, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/deps/hiredis/dict.h b/deps/hiredis/dict.h index 6ad0acd8d2..7e28cc6cd5 100644 --- a/deps/hiredis/dict.h +++ b/deps/hiredis/dict.h @@ -5,7 +5,7 @@ * tables of power of two in size are used, collisions are handled by * chaining. See the source code for more information... :) * - * Copyright (c) 2006-2010, Salvatore Sanfilippo + * Copyright (c) 2006-2010, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/deps/hiredis/fuzzing/format_command_fuzzer.c b/deps/hiredis/fuzzing/format_command_fuzzer.c index de125e08da..aaaaf60813 100644 --- a/deps/hiredis/fuzzing/format_command_fuzzer.c +++ b/deps/hiredis/fuzzing/format_command_fuzzer.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Salvatore Sanfilippo + * Copyright (c) 2020, Redis Ltd. * Copyright (c) 2020, Pieter Noordhuis * Copyright (c) 2020, Matt Stancliff , * Jan-Erik Rediger diff --git a/deps/hiredis/hiredis.c b/deps/hiredis/hiredis.c index 8012035a05..30d6a231e9 100644 --- a/deps/hiredis/hiredis.c +++ b/deps/hiredis/hiredis.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2009-2011, Redis Ltd. * Copyright (c) 2010-2014, Pieter Noordhuis * Copyright (c) 2015, Matt Stancliff , * Jan-Erik Rediger diff --git a/deps/hiredis/hiredis.h b/deps/hiredis/hiredis.h index 635988b7e1..84ff3af559 100644 --- a/deps/hiredis/hiredis.h +++ b/deps/hiredis/hiredis.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2009-2011, Redis Ltd. * Copyright (c) 2010-2014, Pieter Noordhuis * Copyright (c) 2015, Matt Stancliff , * Jan-Erik Rediger diff --git a/deps/hiredis/hiredis_ssl.h b/deps/hiredis/hiredis_ssl.h index 5f92cca9bc..093fe3a000 100644 --- a/deps/hiredis/hiredis_ssl.h +++ b/deps/hiredis/hiredis_ssl.h @@ -1,6 +1,6 @@ /* - * Copyright (c) 2019, Redis Labs + * Copyright (c) 2019, Redis Ltd. * * All rights reserved. * diff --git a/deps/hiredis/net.c b/deps/hiredis/net.c index 33fe0b94f4..698fd5cec0 100644 --- a/deps/hiredis/net.c +++ b/deps/hiredis/net.c @@ -1,6 +1,6 @@ /* Extracted from anet.c to work properly with Hiredis error reporting. * - * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2009-2011, Redis Ltd. * Copyright (c) 2010-2014, Pieter Noordhuis * Copyright (c) 2015, Matt Stancliff , * Jan-Erik Rediger diff --git a/deps/hiredis/net.h b/deps/hiredis/net.h index e15d462647..d3372a168f 100644 --- a/deps/hiredis/net.h +++ b/deps/hiredis/net.h @@ -1,6 +1,6 @@ /* Extracted from anet.c to work properly with Hiredis error reporting. * - * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2009-2011, Redis Ltd. * Copyright (c) 2010-2014, Pieter Noordhuis * Copyright (c) 2015, Matt Stancliff , * Jan-Erik Rediger diff --git a/deps/hiredis/read.c b/deps/hiredis/read.c index dd038c55bd..1198d7063b 100644 --- a/deps/hiredis/read.c +++ b/deps/hiredis/read.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2009-2011, Redis Ltd. * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. diff --git a/deps/hiredis/read.h b/deps/hiredis/read.h index 2d74d77a5b..8c6ec41fc0 100644 --- a/deps/hiredis/read.h +++ b/deps/hiredis/read.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2009-2011, Redis Ltd. * Copyright (c) 2010-2011, Pieter Noordhuis * * All rights reserved. diff --git a/deps/hiredis/sds.c b/deps/hiredis/sds.c index ac2b483525..49cbf57ffd 100644 --- a/deps/hiredis/sds.c +++ b/deps/hiredis/sds.c @@ -1,8 +1,7 @@ /* SDSLib 2.0 -- A C dynamic strings library * - * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2006-2015, Redis Ltd. * Copyright (c) 2015, Oran Agra - * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/deps/hiredis/sds.h b/deps/hiredis/sds.h index 7f0ca5dfa5..304dd1290f 100644 --- a/deps/hiredis/sds.h +++ b/deps/hiredis/sds.h @@ -1,8 +1,7 @@ /* SDSLib 2.0 -- A C dynamic strings library * - * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2006-2015, Redis Ltd. * Copyright (c) 2015, Oran Agra - * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/deps/hiredis/sdsalloc.h b/deps/hiredis/sdsalloc.h index c9dcc3df8e..962b61ac8d 100644 --- a/deps/hiredis/sdsalloc.h +++ b/deps/hiredis/sdsalloc.h @@ -1,8 +1,7 @@ /* SDSLib 2.0 -- A C dynamic strings library * - * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2006-2015, Redis Ltd. * Copyright (c) 2015, Oran Agra - * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/deps/hiredis/ssl.c b/deps/hiredis/ssl.c index 9ab18cc0e5..5cfa45d626 100644 --- a/deps/hiredis/ssl.c +++ b/deps/hiredis/ssl.c @@ -1,7 +1,7 @@ /* - * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2009-2011, Redis Ltd. * Copyright (c) 2010-2011, Pieter Noordhuis - * Copyright (c) 2019, Redis Labs + * Copyright (c) 2019, Redis Ltd. * * All rights reserved. * diff --git a/deps/linenoise/linenoise.c b/deps/linenoise/linenoise.c index 36d266f891..ce2d2b7c42 100644 --- a/deps/linenoise/linenoise.c +++ b/deps/linenoise/linenoise.c @@ -10,7 +10,7 @@ * * ------------------------------------------------------------------------ * - * Copyright (c) 2010-2016, Salvatore Sanfilippo + * Copyright (c) 2010-2016, Redis Ltd. * Copyright (c) 2010-2013, Pieter Noordhuis * * All rights reserved. diff --git a/deps/linenoise/linenoise.h b/deps/linenoise/linenoise.h index beac6df467..ca351fb8b2 100644 --- a/deps/linenoise/linenoise.h +++ b/deps/linenoise/linenoise.h @@ -7,7 +7,7 @@ * * ------------------------------------------------------------------------ * - * Copyright (c) 2010-2014, Salvatore Sanfilippo + * Copyright (c) 2010-2014, Redis Ltd. * Copyright (c) 2010-2013, Pieter Noordhuis * * All rights reserved. diff --git a/deps/lua/src/lua_cmsgpack.c b/deps/lua/src/lua_cmsgpack.c index 5f8929d454..9ac967e23e 100644 --- a/deps/lua/src/lua_cmsgpack.c +++ b/deps/lua/src/lua_cmsgpack.c @@ -10,7 +10,7 @@ #define LUACMSGPACK_NAME "cmsgpack" #define LUACMSGPACK_SAFE_NAME "cmsgpack_safe" #define LUACMSGPACK_VERSION "lua-cmsgpack 0.4.0" -#define LUACMSGPACK_COPYRIGHT "Copyright (C) 2012, Salvatore Sanfilippo" +#define LUACMSGPACK_COPYRIGHT "Copyright (C) 2012, Redis Ltd." #define LUACMSGPACK_DESCRIPTION "MessagePack C implementation for Lua" /* Allows a preprocessor directive to override MAX_NESTING */ @@ -39,7 +39,7 @@ /* ============================================================================= * MessagePack implementation and bindings for Lua 5.1/5.2. - * Copyright(C) 2012 Salvatore Sanfilippo + * Copyright(C) 2012 Redis Ltd. * * http://github.com/antirez/lua-cmsgpack * @@ -958,7 +958,7 @@ LUALIB_API int luaopen_cmsgpack_safe(lua_State *L) { } /****************************************************************************** -* Copyright (C) 2012 Salvatore Sanfilippo. All rights reserved. +* Copyright (C) 2012 Redis Ltd. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the diff --git a/src/Makefile b/src/Makefile index eaf0e4e387..d69bd915dc 100644 --- a/src/Makefile +++ b/src/Makefile @@ -1,5 +1,5 @@ # Valkey Makefile -# Copyright (C) 2009 Salvatore Sanfilippo +# Copyright (C) 2009 Redis Ltd. # This file is released under the BSD license, see the COPYING file # # The Makefile composes the final FINAL_CFLAGS and FINAL_LDFLAGS using diff --git a/src/acl.c b/src/acl.c index 51aa567165..89ac04a31e 100644 --- a/src/acl.c +++ b/src/acl.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Salvatore Sanfilippo + * Copyright (c) 2018, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/adlist.c b/src/adlist.c index 05f7eccb6f..11b152592b 100644 --- a/src/adlist.c +++ b/src/adlist.c @@ -1,6 +1,6 @@ /* adlist.c - A generic doubly linked list implementation * - * Copyright (c) 2006-2010, Salvatore Sanfilippo + * Copyright (c) 2006-2010, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/adlist.h b/src/adlist.h index 6d28f3abc1..bfc4280434 100644 --- a/src/adlist.h +++ b/src/adlist.h @@ -1,6 +1,6 @@ /* adlist.h - A generic doubly linked list implementation * - * Copyright (c) 2006-2012, Salvatore Sanfilippo + * Copyright (c) 2006-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/ae.c b/src/ae.c index 36b6131410..ef7ba7bd67 100644 --- a/src/ae.c +++ b/src/ae.c @@ -2,7 +2,7 @@ * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated * it in form of a library for easy reuse. * - * Copyright (c) 2006-2010, Salvatore Sanfilippo + * Copyright (c) 2006-2010, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/ae.h b/src/ae.h index 652b42a8f5..1156819c9c 100644 --- a/src/ae.h +++ b/src/ae.h @@ -2,7 +2,7 @@ * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated * it in form of a library for easy reuse. * - * Copyright (c) 2006-2012, Salvatore Sanfilippo + * Copyright (c) 2006-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/ae_epoll.c b/src/ae_epoll.c index c8b4ac743f..b2410ca29a 100644 --- a/src/ae_epoll.c +++ b/src/ae_epoll.c @@ -1,6 +1,6 @@ /* Linux epoll(2) based ae.c module * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/ae_select.c b/src/ae_select.c index 08ed381aba..12db491706 100644 --- a/src/ae_select.c +++ b/src/ae_select.c @@ -1,6 +1,6 @@ /* Select()-based ae.c module. * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/anet.c b/src/anet.c index d0547b7769..d4ac698982 100644 --- a/src/anet.c +++ b/src/anet.c @@ -1,6 +1,6 @@ /* anet.c -- Basic TCP socket stuff made a bit less boring * - * Copyright (c) 2006-2012, Salvatore Sanfilippo + * Copyright (c) 2006-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -45,6 +45,7 @@ #include #include #include +#include #include "anet.h" #include "config.h" @@ -505,7 +506,7 @@ int anetTcpNonBlockBestEffortBindConnect(char *err, const char *addr, int port, return anetTcpGenericConnect(err, addr, port, source_addr, ANET_CONNECT_NONBLOCK | ANET_CONNECT_BE_BINDING); } -static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int backlog, mode_t perm) { +static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int backlog, mode_t perm, char *group) { if (bind(s, sa, len) == -1) { anetSetError(err, "bind: %s", strerror(errno)); close(s); @@ -514,6 +515,22 @@ static int anetListen(char *err, int s, struct sockaddr *sa, socklen_t len, int if (sa->sa_family == AF_LOCAL && perm) chmod(((struct sockaddr_un *)sa)->sun_path, perm); + if (sa->sa_family == AF_LOCAL && group != NULL) { + struct group *grp; + if ((grp = getgrnam(group)) == NULL) { + anetSetError(err, "getgrnam error for group '%s': %s", group, strerror(errno)); + close(s); + return ANET_ERR; + } + + /* Owner of the socket remains same. */ + if (chown(((struct sockaddr_un *)sa)->sun_path, -1, grp->gr_gid) == -1) { + anetSetError(err, "chown error for group '%s': %s", group, strerror(errno)); + close(s); + return ANET_ERR; + } + } + if (listen(s, backlog) == -1) { anetSetError(err, "listen: %s", strerror(errno)); close(s); @@ -553,7 +570,7 @@ static int _anetTcpServer(char *err, int port, char *bindaddr, int af, int backl if (af == AF_INET6 && anetV6Only(err, s) == ANET_ERR) goto error; if (anetSetReuseAddr(err, s) == ANET_ERR) goto error; - if (anetListen(err, s, p->ai_addr, p->ai_addrlen, backlog, 0) == ANET_ERR) s = ANET_ERR; + if (anetListen(err, s, p->ai_addr, p->ai_addrlen, backlog, 0, NULL) == ANET_ERR) s = ANET_ERR; goto end; } if (p == NULL) { @@ -577,7 +594,7 @@ int anetTcp6Server(char *err, int port, char *bindaddr, int backlog) { return _anetTcpServer(err, port, bindaddr, AF_INET6, backlog); } -int anetUnixServer(char *err, char *path, mode_t perm, int backlog) { +int anetUnixServer(char *err, char *path, mode_t perm, int backlog, char *group) { int s; struct sockaddr_un sa; @@ -593,7 +610,7 @@ int anetUnixServer(char *err, char *path, mode_t perm, int backlog) { memset(&sa, 0, sizeof(sa)); sa.sun_family = AF_LOCAL; valkey_strlcpy(sa.sun_path, path, sizeof(sa.sun_path)); - if (anetListen(err, s, (struct sockaddr *)&sa, sizeof(sa), backlog, perm) == ANET_ERR) return ANET_ERR; + if (anetListen(err, s, (struct sockaddr *)&sa, sizeof(sa), backlog, perm, group) == ANET_ERR) return ANET_ERR; return s; } diff --git a/src/anet.h b/src/anet.h index c3642f8e72..ab32f72e4b 100644 --- a/src/anet.h +++ b/src/anet.h @@ -1,6 +1,6 @@ /* anet.c -- Basic TCP socket stuff made a bit less boring * - * Copyright (c) 2006-2012, Salvatore Sanfilippo + * Copyright (c) 2006-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -56,7 +56,7 @@ int anetTcpNonBlockBestEffortBindConnect(char *err, const char *addr, int port, int anetResolve(char *err, char *host, char *ipbuf, size_t ipbuf_len, int flags); int anetTcpServer(char *err, int port, char *bindaddr, int backlog); int anetTcp6Server(char *err, int port, char *bindaddr, int backlog); -int anetUnixServer(char *err, char *path, mode_t perm, int backlog); +int anetUnixServer(char *err, char *path, mode_t perm, int backlog, char *group); int anetTcpAccept(char *err, int serversock, char *ip, size_t ip_len, int *port); int anetUnixAccept(char *err, int serversock); int anetNonBlock(char *err, int fd); diff --git a/src/aof.c b/src/aof.c index d43616c888..b0b277c06d 100644 --- a/src/aof.c +++ b/src/aof.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/asciilogo.h b/src/asciilogo.h index bd20defa9e..2b2b381758 100644 --- a/src/asciilogo.h +++ b/src/asciilogo.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * Copyright (c) 2024, Valkey contributors * All rights reserved. * diff --git a/src/atomicvar.h b/src/atomicvar.h index d79cf4c9d7..62235e24ae 100644 --- a/src/atomicvar.h +++ b/src/atomicvar.h @@ -45,7 +45,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2015, Salvatore Sanfilippo + * Copyright (c) 2015, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/bio.c b/src/bio.c index 11692e77ef..e55c729f74 100644 --- a/src/bio.c +++ b/src/bio.c @@ -31,7 +31,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/bio.h b/src/bio.h index 0d1fe9b4b9..80cf515380 100644 --- a/src/bio.h +++ b/src/bio.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/bitops.c b/src/bitops.c index 2094bb0ea9..10c383b270 100644 --- a/src/bitops.c +++ b/src/bitops.c @@ -1,6 +1,6 @@ /* Bit operations. * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/blocked.c b/src/blocked.c index 56dc02dec0..8e1974a703 100644 --- a/src/blocked.c +++ b/src/blocked.c @@ -1,6 +1,6 @@ /* blocked.c - generic support for blocking operations like BLPOP & WAIT. * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -76,10 +76,13 @@ static void releaseBlockedEntry(client *c, dictEntry *de, int remove_key); void initClientBlockingState(client *c) { c->bstate.btype = BLOCKED_NONE; c->bstate.timeout = 0; + c->bstate.unblock_on_nokey = 0; c->bstate.keys = dictCreate(&objectKeyHeapPointerValueDictType); c->bstate.numreplicas = 0; + c->bstate.numlocal = 0; c->bstate.reploffset = 0; - c->bstate.unblock_on_nokey = 0; + c->bstate.generic_blocked_list_node = NULL; + c->bstate.module_blocked_handle = NULL; c->bstate.async_rm_call_handle = NULL; } @@ -191,8 +194,9 @@ void unblockClient(client *c, int queue_for_reprocessing) { if (moduleClientIsBlockedOnKeys(c)) unblockClientWaitingData(c); unblockClientFromModule(c); } else if (c->bstate.btype == BLOCKED_POSTPONE) { - listDelNode(server.postponed_clients, c->postponed_list_node); - c->postponed_list_node = NULL; + serverAssert(c->bstate.postponed_list_node); + listDelNode(server.postponed_clients, c->bstate.postponed_list_node); + c->bstate.postponed_list_node = NULL; } else if (c->bstate.btype == BLOCKED_SHUTDOWN) { /* No special cleanup. */ } else { @@ -595,6 +599,11 @@ void blockClientForReplicaAck(client *c, mstime_t timeout, long long offset, lon c->bstate.numreplicas = numreplicas; c->bstate.numlocal = numlocal; listAddNodeHead(server.clients_waiting_acks, c); + /* Note that we remember the linked list node where the client is stored, + * this way removing the client in unblockClientWaitingReplicas() will not + * require a linear scan, but just a constant time operation. */ + serverAssert(c->bstate.client_waiting_acks_list_node == NULL); + c->bstate.client_waiting_acks_list_node = listFirst(server.clients_waiting_acks); blockClient(c, BLOCKED_WAIT); } @@ -605,7 +614,8 @@ void blockPostponeClient(client *c) { c->bstate.timeout = 0; blockClient(c, BLOCKED_POSTPONE); listAddNodeTail(server.postponed_clients, c); - c->postponed_list_node = listLast(server.postponed_clients); + serverAssert(c->bstate.postponed_list_node == NULL); + c->bstate.postponed_list_node = listLast(server.postponed_clients); /* Mark this client to execute its command */ c->flag.pending_command = 1; } diff --git a/src/call_reply.c b/src/call_reply.c index dcb05cc850..00d196081e 100644 --- a/src/call_reply.c +++ b/src/call_reply.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2021, Redis Labs Ltd. + * Copyright (c) 2009-2021, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/call_reply.h b/src/call_reply.h index 3ca1dd7c4f..3c63cf0ec1 100644 --- a/src/call_reply.h +++ b/src/call_reply.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2021, Redis Labs Ltd. + * Copyright (c) 2009-2021, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/childinfo.c b/src/childinfo.c index 19a5fa045c..f9a90a23cc 100644 --- a/src/childinfo.c +++ b/src/childinfo.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Salvatore Sanfilippo + * Copyright (c) 2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/cli_common.c b/src/cli_common.c index 24d796de67..01d20ed4ea 100644 --- a/src/cli_common.c +++ b/src/cli_common.c @@ -1,6 +1,6 @@ /* CLI (command line interface) common methods * - * Copyright (c) 2020, Redis Labs + * Copyright (c) 2020, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/cluster.c b/src/cluster.c index 162004d703..ced7668d65 100644 --- a/src/cluster.c +++ b/src/cluster.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c index b35b0b3b8e..4f1e09d4ef 100644 --- a/src/cluster_legacy.c +++ b/src/cluster_legacy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -62,14 +62,13 @@ void clusterSendPing(clusterLink *link, int type); void clusterSendFail(char *nodename); void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request); void clusterUpdateState(void); -int clusterNodeCoversSlot(clusterNode *n, int slot); list *clusterGetNodesInMyShard(clusterNode *node); int clusterNodeAddReplica(clusterNode *primary, clusterNode *replica); int clusterAddSlot(clusterNode *n, int slot); int clusterDelSlot(int slot); int clusterDelNodeSlots(clusterNode *node); int clusterNodeSetSlotBit(clusterNode *n, int slot); -void clusterSetPrimary(clusterNode *n, int closeSlots); +static void clusterSetPrimary(clusterNode *n, int closeSlots, int full_sync_required); void clusterHandleReplicaFailover(void); void clusterHandleReplicaMigration(int max_replicas); int bitmapTestBit(unsigned char *bitmap, int pos); @@ -93,7 +92,6 @@ void moduleCallClusterReceivers(const char *sender_id, uint32_t len); const char *clusterGetMessageTypeString(int type); void removeChannelsInSlot(unsigned int slot); -unsigned int countKeysInSlot(unsigned int hashslot); unsigned int countChannelsInSlot(unsigned int hashslot); unsigned int delKeysInSlot(unsigned int hashslot); void clusterAddNodeToShard(const char *shard_id, clusterNode *node); @@ -2370,7 +2368,7 @@ int nodeUpdateAddressIfNeeded(clusterNode *node, clusterLink *link, clusterMsg * /* Check if this is our primary and we have to change the * replication target as well. */ if (nodeIsReplica(myself) && myself->replicaof == node) - replicationSetPrimary(node->ip, getNodeDefaultReplicationPort(node)); + replicationSetPrimary(node->ip, getNodeDefaultReplicationPort(node), 0); return 1; } @@ -2432,6 +2430,9 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc return; } + /* Sender and myself in the same shard? */ + int are_in_same_shard = areInSameShard(sender, myself); + for (j = 0; j < CLUSTER_SLOTS; j++) { if (bitmapTestBit(slots, j)) { sender_slots++; @@ -2474,7 +2475,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * the same shard and we should retain the migrating_slots_to state * for the slot in question */ if (server.cluster->migrating_slots_to[j] != NULL) { - if (!areInSameShard(sender, myself)) { + if (!are_in_same_shard) { serverLog(LL_NOTICE, "Slot %d is no longer being migrated to node %.40s (%s) in shard %.40s.", j, server.cluster->migrating_slots_to[j]->name, server.cluster->migrating_slots_to[j]->human_nodename, @@ -2595,7 +2596,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * the new primary if my current config epoch is lower than the * sender's. */ if (!new_primary && myself->replicaof != sender && sender_slots == 0 && myself->numslots == 0 && - nodeEpoch(myself) < senderConfigEpoch && areInSameShard(sender, myself)) { + nodeEpoch(myself) < senderConfigEpoch && are_in_same_shard) { new_primary = sender; } @@ -2619,16 +2620,18 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc * sender. In this case we don't reconfigure ourselves as a replica * of the sender. */ if (new_primary && cur_primary->numslots == 0) { - if (server.cluster_allow_replica_migration || areInSameShard(sender, myself)) { + if (server.cluster_allow_replica_migration || are_in_same_shard) { serverLog(LL_NOTICE, "Configuration change detected. Reconfiguring myself " "as a replica of node %.40s (%s) in shard %.40s", sender->name, sender->human_nodename, sender->shard_id); /* Don't clear the migrating/importing states if this is a replica that - * just gets promoted to the new primary in the shard. */ - clusterSetPrimary(sender, !areInSameShard(sender, myself)); + * just gets promoted to the new primary in the shard. + * + * If the sender and myself are in the same shard, try psync. */ + clusterSetPrimary(sender, !are_in_same_shard, !are_in_same_shard); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_FSYNC_CONFIG); - } else if ((sender_slots >= migrated_our_slots) && !areInSameShard(sender, myself)) { + } else if ((sender_slots >= migrated_our_slots) && !are_in_same_shard) { /* When all our slots are lost to the sender and the sender belongs to * a different shard, this is likely due to a client triggered slot * migration. Don't reconfigure this node to migrate to the new shard @@ -3383,12 +3386,19 @@ int clusterProcessPacket(clusterLink *link) { /* Explicitly check for a replication loop before attempting the replication * chain folding logic. */ if (myself->replicaof && myself->replicaof->replicaof && myself->replicaof->replicaof != myself) { - /* Safeguard against sub-replicas. A replica's primary can turn itself - * into a replica if its last slot is removed. If no other node takes - * over the slot, there is nothing else to trigger replica migration. */ + /* Safeguard against sub-replicas. + * + * A replica's primary can turn itself into a replica if its last slot + * is removed. If no other node takes over the slot, there is nothing + * else to trigger replica migration. In this case, they are not in the + * same shard, so a full sync is required. + * + * Or a replica's primary can turn itself into a replica of its other + * replica during a failover. In this case, they are in the same shard, + * so we can try a psync. */ serverLog(LL_NOTICE, "I'm a sub-replica! Reconfiguring myself as a replica of %.40s from %.40s", myself->replicaof->replicaof->name, myself->replicaof->name); - clusterSetPrimary(myself->replicaof->replicaof, 1); + clusterSetPrimary(myself->replicaof->replicaof, 1, !areInSameShard(myself->replicaof->replicaof, myself)); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_FSYNC_CONFIG); } @@ -4250,14 +4260,14 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { * with CLUSTERMSG_FLAG0_FORCEACK (manual failover). */ if (clusterNodeIsPrimary(node) || primary == NULL || (!nodeFailed(primary) && !force_ack)) { if (clusterNodeIsPrimary(node)) { - serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): it is a primary node", node->name, - node->human_nodename); + serverLog(LL_WARNING, "Failover auth denied to %.40s (%s) for epoch %llu: it is a primary node", node->name, + node->human_nodename, (unsigned long long)requestCurrentEpoch); } else if (primary == NULL) { - serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): I don't know its primary", node->name, - node->human_nodename); + serverLog(LL_WARNING, "Failover auth denied to %.40s (%s) for epoch %llu: I don't know its primary", + node->name, node->human_nodename, (unsigned long long)requestCurrentEpoch); } else if (!nodeFailed(primary)) { - serverLog(LL_WARNING, "Failover auth denied to %.40s (%s): its primary is up", node->name, - node->human_nodename); + serverLog(LL_WARNING, "Failover auth denied to %.40s (%s) for epoch %llu: its primary is up", node->name, + node->human_nodename, (unsigned long long)requestCurrentEpoch); } return; } @@ -4692,7 +4702,9 @@ void clusterHandleReplicaMigration(int max_replicas) { !(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) { serverLog(LL_NOTICE, "Migrating to orphaned primary %.40s (%s) in shard %.40s", target->name, target->human_nodename, target->shard_id); - clusterSetPrimary(target, 1); + /* We are migrating to a different shard that has a completely different + * replication history, so a full sync is required. */ + clusterSetPrimary(target, 1, 1); } } @@ -5005,7 +5017,7 @@ void clusterCron(void) { * enable it if we know the address of our primary and it appears to * be up. */ if (nodeIsReplica(myself) && server.primary_host == NULL && myself->replicaof && nodeHasAddr(myself->replicaof)) { - replicationSetPrimary(myself->replicaof->ip, getNodeDefaultReplicationPort(myself->replicaof)); + replicationSetPrimary(myself->replicaof->ip, getNodeDefaultReplicationPort(myself->replicaof), 0); } /* Abort a manual failover if the timeout is reached. */ @@ -5398,7 +5410,7 @@ static inline void removeAllNotOwnedShardChannelSubscriptions(void) { /* Set the specified node 'n' as primary for this node. * If this node is currently a primary, it is turned into a replica. */ -void clusterSetPrimary(clusterNode *n, int closeSlots) { +static void clusterSetPrimary(clusterNode *n, int closeSlots, int full_sync_required) { serverAssert(n != myself); serverAssert(myself->numslots == 0); @@ -5412,7 +5424,7 @@ void clusterSetPrimary(clusterNode *n, int closeSlots) { myself->replicaof = n; updateShardId(myself, n->shard_id); clusterNodeAddReplica(n, myself); - replicationSetPrimary(n->ip, getNodeDefaultReplicationPort(n)); + replicationSetPrimary(n->ip, getNodeDefaultReplicationPort(n), full_sync_required); removeAllNotOwnedShardChannelSubscriptions(); resetManualFailover(); @@ -6259,7 +6271,7 @@ void clusterCommandSetSlot(client *c) { * To mitigate this issue, the following order needs to be enforced for slot * migration finalization such that the replicas finalize the slot ownership * before the primary: - . * + * * 1. Client C issues SETSLOT n NODE B against node B. * 2. Primary B replicates `SETSLOT n NODE B` to all of its replicas (e.g., B', B''). * 3. Upon replication completion, primary B executes `SETSLOT n NODE B` and @@ -6279,17 +6291,25 @@ void clusterCommandSetSlot(client *c) { * non-replicated behavior.*/ listIter li; listNode *ln; - int legacy_replica_found = 0; + int num_eligible_replicas = 0; listRewind(server.replicas, &li); while ((ln = listNext(&li))) { client *r = ln->value; - if (r->replica_version < 0x702ff /* 7.2.255 */) { - legacy_replica_found++; - break; + + /* We think that when the command comes in, the primary only needs to + * wait for the online replicas. The admin can easily check if there + * are replicas that are down for an extended period of time. If they + * decide to move forward anyways, we should not block it. If a replica + * failed right before the replication and was not included in the + * replication, it would also unlikely win the election. + * + * And 0x702ff is 7.2.255, we only support new versions in this case. */ + if (r->repl_state == REPLICA_STATE_ONLINE && r->replica_version > 0x702ff) { + num_eligible_replicas++; } } - if (!legacy_replica_found) { + if (num_eligible_replicas != 0) { forceCommandPropagation(c, PROPAGATE_REPL); /* We are a primary and this is the first time we see this `SETSLOT` * command. Force-replicate the command to all of our replicas @@ -6299,7 +6319,7 @@ void clusterCommandSetSlot(client *c) { * 2. The repl offset target is set to the primary's current repl offset + 1. * There is no concern of partial replication because replicas always * ack the repl offset at the command boundary. */ - blockClientForReplicaAck(c, timeout_ms, server.primary_repl_offset + 1, myself->num_replicas, 0); + blockClientForReplicaAck(c, timeout_ms, server.primary_repl_offset + 1, num_eligible_replicas, 0); /* Mark client as pending command for execution after replication to replicas. */ c->flag.pending_command = 1; replicationRequestAckFromReplicas(); @@ -6308,7 +6328,7 @@ void clusterCommandSetSlot(client *c) { } /* Slot states have been updated on the compatible replicas (if any). - * Now exuecte the command on the primary. */ + * Now execute the command on the primary. */ if (!strcasecmp(c->argv[3]->ptr, "migrating")) { serverLog(LL_NOTICE, "Migrating slot %d to node %.40s (%s)", slot, n->name, n->human_nodename); server.cluster->migrating_slots_to[slot] = n; @@ -6343,7 +6363,9 @@ void clusterCommandSetSlot(client *c) { "Lost my last slot during slot migration. Reconfiguring myself " "as a replica of %.40s (%s) in shard %.40s", n->name, n->human_nodename, n->shard_id); - clusterSetPrimary(n, 1); + /* We are migrating to a different shard that has a completely different + * replication history, so a full sync is required. */ + clusterSetPrimary(n, 1, 1); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG | CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_FSYNC_CONFIG); } @@ -6548,8 +6570,18 @@ int clusterCommandSpecial(client *c) { return 1; } - /* Set the primary. */ - clusterSetPrimary(n, 1); + /* If `n` is already my primary, there is no need to re-establish the + * replication connection. */ + if (myself->replicaof == n) { + addReply(c, shared.ok); + return 1; + } + + /* Set the primary. + * If the instance is a primary, it is an empty primary. + * If the instance is a replica, it had a totally different replication history. + * In these both cases, myself as a replica has to do a full sync. */ + clusterSetPrimary(n, 1, 1); clusterBroadcastPong(CLUSTER_BROADCAST_ALL); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE | CLUSTER_TODO_SAVE_CONFIG); addReply(c, shared.ok); diff --git a/src/commands.def b/src/commands.def index 4559c0aefe..cd9f8e2984 100644 --- a/src/commands.def +++ b/src/commands.def @@ -5605,6 +5605,28 @@ struct COMMAND_ARG SENTINEL_GET_MASTER_ADDR_BY_NAME_Args[] = { {MAKE_ARG("primary-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; +/********** SENTINEL GET_PRIMARY_ADDR_BY_NAME ********************/ + +#ifndef SKIP_CMD_HISTORY_TABLE +/* SENTINEL GET_PRIMARY_ADDR_BY_NAME history */ +#define SENTINEL_GET_PRIMARY_ADDR_BY_NAME_History NULL +#endif + +#ifndef SKIP_CMD_TIPS_TABLE +/* SENTINEL GET_PRIMARY_ADDR_BY_NAME tips */ +#define SENTINEL_GET_PRIMARY_ADDR_BY_NAME_Tips NULL +#endif + +#ifndef SKIP_CMD_KEY_SPECS_TABLE +/* SENTINEL GET_PRIMARY_ADDR_BY_NAME key specs */ +#define SENTINEL_GET_PRIMARY_ADDR_BY_NAME_Keyspecs NULL +#endif + +/* SENTINEL GET_PRIMARY_ADDR_BY_NAME argument table */ +struct COMMAND_ARG SENTINEL_GET_PRIMARY_ADDR_BY_NAME_Args[] = { +{MAKE_ARG("primary-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +}; + /********** SENTINEL HELP ********************/ #ifndef SKIP_CMD_HISTORY_TABLE @@ -5669,6 +5691,31 @@ struct COMMAND_ARG SENTINEL_IS_MASTER_DOWN_BY_ADDR_Args[] = { {MAKE_ARG("runid",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, }; +/********** SENTINEL IS_PRIMARY_DOWN_BY_ADDR ********************/ + +#ifndef SKIP_CMD_HISTORY_TABLE +/* SENTINEL IS_PRIMARY_DOWN_BY_ADDR history */ +#define SENTINEL_IS_PRIMARY_DOWN_BY_ADDR_History NULL +#endif + +#ifndef SKIP_CMD_TIPS_TABLE +/* SENTINEL IS_PRIMARY_DOWN_BY_ADDR tips */ +#define SENTINEL_IS_PRIMARY_DOWN_BY_ADDR_Tips NULL +#endif + +#ifndef SKIP_CMD_KEY_SPECS_TABLE +/* SENTINEL IS_PRIMARY_DOWN_BY_ADDR key specs */ +#define SENTINEL_IS_PRIMARY_DOWN_BY_ADDR_Keyspecs NULL +#endif + +/* SENTINEL IS_PRIMARY_DOWN_BY_ADDR argument table */ +struct COMMAND_ARG SENTINEL_IS_PRIMARY_DOWN_BY_ADDR_Args[] = { +{MAKE_ARG("ip",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("port",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("current-epoch",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +{MAKE_ARG("runid",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +}; + /********** SENTINEL MASTER ********************/ #ifndef SKIP_CMD_HISTORY_TABLE @@ -5767,6 +5814,45 @@ struct COMMAND_ARG SENTINEL_MONITOR_Args[] = { #define SENTINEL_PENDING_SCRIPTS_Keyspecs NULL #endif +/********** SENTINEL PRIMARIES ********************/ + +#ifndef SKIP_CMD_HISTORY_TABLE +/* SENTINEL PRIMARIES history */ +#define SENTINEL_PRIMARIES_History NULL +#endif + +#ifndef SKIP_CMD_TIPS_TABLE +/* SENTINEL PRIMARIES tips */ +#define SENTINEL_PRIMARIES_Tips NULL +#endif + +#ifndef SKIP_CMD_KEY_SPECS_TABLE +/* SENTINEL PRIMARIES key specs */ +#define SENTINEL_PRIMARIES_Keyspecs NULL +#endif + +/********** SENTINEL PRIMARY ********************/ + +#ifndef SKIP_CMD_HISTORY_TABLE +/* SENTINEL PRIMARY history */ +#define SENTINEL_PRIMARY_History NULL +#endif + +#ifndef SKIP_CMD_TIPS_TABLE +/* SENTINEL PRIMARY tips */ +#define SENTINEL_PRIMARY_Tips NULL +#endif + +#ifndef SKIP_CMD_KEY_SPECS_TABLE +/* SENTINEL PRIMARY key specs */ +#define SENTINEL_PRIMARY_Keyspecs NULL +#endif + +/* SENTINEL PRIMARY argument table */ +struct COMMAND_ARG SENTINEL_PRIMARY_Args[] = { +{MAKE_ARG("primary-name",ARG_TYPE_STRING,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)}, +}; + /********** SENTINEL REMOVE ********************/ #ifndef SKIP_CMD_HISTORY_TABLE @@ -5942,15 +6028,19 @@ struct COMMAND_STRUCT SENTINEL_Subcommands[] = { {MAKE_CMD("debug","Lists or updates the current configurable parameters of Sentinel.","O(N) where N is the number of configurable parameters","7.0.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_DEBUG_History,0,SENTINEL_DEBUG_Tips,0,sentinelCommand,-2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_DEBUG_Keyspecs,0,NULL,1),.args=SENTINEL_DEBUG_Args}, {MAKE_CMD("failover","Forces a Sentinel failover.",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_FAILOVER_History,0,SENTINEL_FAILOVER_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_FAILOVER_Keyspecs,0,NULL,1),.args=SENTINEL_FAILOVER_Args}, {MAKE_CMD("flushconfig","Rewrites the Sentinel configuration file.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_FLUSHCONFIG_History,0,SENTINEL_FLUSHCONFIG_Tips,0,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_FLUSHCONFIG_Keyspecs,0,NULL,0)}, -{MAKE_CMD("get-master-addr-by-name","Returns the port and address of a primary instance.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_GET_MASTER_ADDR_BY_NAME_History,0,SENTINEL_GET_MASTER_ADDR_BY_NAME_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_GET_MASTER_ADDR_BY_NAME_Keyspecs,0,NULL,1),.args=SENTINEL_GET_MASTER_ADDR_BY_NAME_Args}, +{MAKE_CMD("get-master-addr-by-name","Returns the port and address of a primary instance.","O(1)","2.8.4",CMD_DOC_DEPRECATED,"`SENTINEL GET-PRIMARY-ADDR-BY-NAME`","8.0.0","sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_GET_MASTER_ADDR_BY_NAME_History,0,SENTINEL_GET_MASTER_ADDR_BY_NAME_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_GET_MASTER_ADDR_BY_NAME_Keyspecs,0,NULL,1),.args=SENTINEL_GET_MASTER_ADDR_BY_NAME_Args}, +{MAKE_CMD("get-primary-addr-by-name","Returns the port and address of a primary instance.","O(1)","8.0.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_GET_PRIMARY_ADDR_BY_NAME_History,0,SENTINEL_GET_PRIMARY_ADDR_BY_NAME_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_GET_PRIMARY_ADDR_BY_NAME_Keyspecs,0,NULL,1),.args=SENTINEL_GET_PRIMARY_ADDR_BY_NAME_Args}, {MAKE_CMD("help","Returns helpful text about the different subcommands.","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_HELP_History,0,SENTINEL_HELP_Tips,0,sentinelCommand,2,CMD_LOADING|CMD_STALE|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_HELP_Keyspecs,0,NULL,0)}, {MAKE_CMD("info-cache","Returns the cached `INFO` replies from the deployment's instances.","O(N) where N is the number of instances","3.2.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_INFO_CACHE_History,0,SENTINEL_INFO_CACHE_Tips,0,sentinelCommand,-3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_INFO_CACHE_Keyspecs,0,NULL,1),.args=SENTINEL_INFO_CACHE_Args}, -{MAKE_CMD("is-master-down-by-addr","Determines whether a primary instance is down.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_IS_MASTER_DOWN_BY_ADDR_History,0,SENTINEL_IS_MASTER_DOWN_BY_ADDR_Tips,0,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_IS_MASTER_DOWN_BY_ADDR_Keyspecs,0,NULL,4),.args=SENTINEL_IS_MASTER_DOWN_BY_ADDR_Args}, -{MAKE_CMD("master","Returns the state of a primary instance.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_MASTER_History,0,SENTINEL_MASTER_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_MASTER_Keyspecs,0,NULL,1),.args=SENTINEL_MASTER_Args}, -{MAKE_CMD("masters","Returns a list of monitored primaries.","O(N) where N is the number of primaries","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_MASTERS_History,0,SENTINEL_MASTERS_Tips,0,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_MASTERS_Keyspecs,0,NULL,0)}, +{MAKE_CMD("is-master-down-by-addr","Determines whether a primary instance is down.","O(1)","2.8.4",CMD_DOC_DEPRECATED,"`SENTINEL IS-PRIMARY-DOWN-BY-ADDR`","8.0.0","sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_IS_MASTER_DOWN_BY_ADDR_History,0,SENTINEL_IS_MASTER_DOWN_BY_ADDR_Tips,0,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_IS_MASTER_DOWN_BY_ADDR_Keyspecs,0,NULL,4),.args=SENTINEL_IS_MASTER_DOWN_BY_ADDR_Args}, +{MAKE_CMD("is-primary-down-by-addr","Determines whether a primary instance is down.","O(1)","8.0.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_IS_PRIMARY_DOWN_BY_ADDR_History,0,SENTINEL_IS_PRIMARY_DOWN_BY_ADDR_Tips,0,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_IS_PRIMARY_DOWN_BY_ADDR_Keyspecs,0,NULL,4),.args=SENTINEL_IS_PRIMARY_DOWN_BY_ADDR_Args}, +{MAKE_CMD("master","Returns the state of a primary instance.","O(1)","2.8.4",CMD_DOC_DEPRECATED,"`SENTINEL PRIMARY`","8.0.0","sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_MASTER_History,0,SENTINEL_MASTER_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_MASTER_Keyspecs,0,NULL,1),.args=SENTINEL_MASTER_Args}, +{MAKE_CMD("masters","Returns a list of monitored primaries.","O(N) where N is the number of primaries","2.8.4",CMD_DOC_DEPRECATED,"`SENTINEL PRIMARIES`","8.0.0","sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_MASTERS_History,0,SENTINEL_MASTERS_Tips,0,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_MASTERS_Keyspecs,0,NULL,0)}, {MAKE_CMD("monitor","Starts monitoring.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_MONITOR_History,0,SENTINEL_MONITOR_Tips,0,sentinelCommand,6,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_MONITOR_Keyspecs,0,NULL,4),.args=SENTINEL_MONITOR_Args}, {MAKE_CMD("myid","Returns the Sentinel instance ID.","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_MYID_History,0,SENTINEL_MYID_Tips,0,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_MYID_Keyspecs,0,NULL,0)}, {MAKE_CMD("pending-scripts","Returns information about pending scripts for Sentinel.",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_PENDING_SCRIPTS_History,0,SENTINEL_PENDING_SCRIPTS_Tips,0,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_PENDING_SCRIPTS_Keyspecs,0,NULL,0)}, +{MAKE_CMD("primaries","Returns a list of monitored primaries.","O(N) where N is the number of primaries","8.0.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_PRIMARIES_History,0,SENTINEL_PRIMARIES_Tips,0,sentinelCommand,2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_PRIMARIES_Keyspecs,0,NULL,0)}, +{MAKE_CMD("primary","Returns the state of a primary instance.","O(1)","8.0.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_PRIMARY_History,0,SENTINEL_PRIMARY_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_PRIMARY_Keyspecs,0,NULL,1),.args=SENTINEL_PRIMARY_Args}, {MAKE_CMD("remove","Stops monitoring.","O(1)","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_REMOVE_History,0,SENTINEL_REMOVE_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_REMOVE_Keyspecs,0,NULL,1),.args=SENTINEL_REMOVE_Args}, {MAKE_CMD("replicas","Returns a list of the monitored replicas.","O(N) where N is the number of replicas","5.0.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_REPLICAS_History,0,SENTINEL_REPLICAS_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_REPLICAS_Keyspecs,0,NULL,1),.args=SENTINEL_REPLICAS_Args}, {MAKE_CMD("reset","Resets primaries by name matching a pattern.","O(N) where N is the number of monitored primaries","2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_RESET_History,0,SENTINEL_RESET_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_RESET_Keyspecs,0,NULL,1),.args=SENTINEL_RESET_Args}, diff --git a/src/commands/sentinel-get-master-addr-by-name.json b/src/commands/sentinel-get-master-addr-by-name.json index 2d7fc50eda..9f91fef764 100644 --- a/src/commands/sentinel-get-master-addr-by-name.json +++ b/src/commands/sentinel-get-master-addr-by-name.json @@ -7,6 +7,12 @@ "arity": 3, "container": "SENTINEL", "function": "sentinelCommand", + "deprecated_since": "8.0.0", + "replaced_by": "`SENTINEL GET-PRIMARY-ADDR-BY-NAME`", + "doc_flags": [ + "DEPRECATED" + ], + "command_flags": [ "ADMIN", "SENTINEL", diff --git a/src/commands/sentinel-get-primary-addr-by-name.json b/src/commands/sentinel-get-primary-addr-by-name.json new file mode 100644 index 0000000000..f2d6261931 --- /dev/null +++ b/src/commands/sentinel-get-primary-addr-by-name.json @@ -0,0 +1,38 @@ +{ + "GET-PRIMARY-ADDR-BY-NAME": { + "summary": "Returns the port and address of a primary instance.", + "complexity": "O(1)", + "group": "sentinel", + "since": "8.0.0", + "arity": 3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "reply_schema": { + "type": "array", + "minItems": 2, + "maxItems": 2, + "items": [ + { + "type": "string", + "description": "IP addr or hostname." + }, + { + "type": "string", + "description": "Port.", + "pattern": "[0-9]+" + } + ] + }, + "arguments": [ + { + "name": "primary-name", + "type": "string" + } + ] + } +} diff --git a/src/commands/sentinel-is-master-down-by-addr.json b/src/commands/sentinel-is-master-down-by-addr.json index 3ecf8723fb..6f4b305ae0 100644 --- a/src/commands/sentinel-is-master-down-by-addr.json +++ b/src/commands/sentinel-is-master-down-by-addr.json @@ -7,6 +7,11 @@ "arity": 6, "container": "SENTINEL", "function": "sentinelCommand", + "deprecated_since": "8.0.0", + "replaced_by": "`SENTINEL IS-PRIMARY-DOWN-BY-ADDR`", + "doc_flags": [ + "DEPRECATED" + ], "command_flags": [ "ADMIN", "SENTINEL", diff --git a/src/commands/sentinel-is-primary-down-by-addr.json b/src/commands/sentinel-is-primary-down-by-addr.json new file mode 100644 index 0000000000..c3c00f1ec0 --- /dev/null +++ b/src/commands/sentinel-is-primary-down-by-addr.json @@ -0,0 +1,61 @@ +{ + "IS-PRIMARY-DOWN-BY-ADDR": { + "summary": "Determines whether a primary instance is down.", + "complexity": "O(1)", + "group": "sentinel", + "since": "8.0.0", + "arity": 6, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "reply_schema": { + "type": "array", + "minItems": 3, + "maxItems": 3, + "items": [ + { + "oneOf": [ + { + "const": 0, + "description": "Primary is up." + }, + { + "const": 1, + "description": "Primary is down." + } + ] + }, + { + "type": "string", + "description": "Sentinel address." + }, + { + "type": "integer", + "description": "Port." + } + ] + }, + "arguments": [ + { + "name": "ip", + "type": "string" + }, + { + "name": "port", + "type": "integer" + }, + { + "name": "current-epoch", + "type": "integer" + }, + { + "name": "runid", + "type": "string" + } + ] + } +} diff --git a/src/commands/sentinel-master.json b/src/commands/sentinel-master.json index 3af3227394..6c2d035122 100644 --- a/src/commands/sentinel-master.json +++ b/src/commands/sentinel-master.json @@ -7,6 +7,11 @@ "arity": 3, "container": "SENTINEL", "function": "sentinelCommand", + "deprecated_since": "8.0.0", + "replaced_by": "`SENTINEL PRIMARY`", + "doc_flags": [ + "DEPRECATED" + ], "command_flags": [ "ADMIN", "SENTINEL", diff --git a/src/commands/sentinel-masters.json b/src/commands/sentinel-masters.json index b6aa86d02a..2a5a09bfd2 100644 --- a/src/commands/sentinel-masters.json +++ b/src/commands/sentinel-masters.json @@ -7,6 +7,11 @@ "arity": 2, "container": "SENTINEL", "function": "sentinelCommand", + "deprecated_since": "8.0.0", + "replaced_by": "`SENTINEL PRIMARIES`", + "doc_flags": [ + "DEPRECATED" + ], "command_flags": [ "ADMIN", "SENTINEL", @@ -14,7 +19,7 @@ ], "reply_schema": { "type": "array", - "description": "List of monitored primaries, and their state.", + "description": "List of monitored primaries, and their states.", "items": { "type": "object", "additionalProperties": { diff --git a/src/commands/sentinel-primaries.json b/src/commands/sentinel-primaries.json new file mode 100644 index 0000000000..0885bbf597 --- /dev/null +++ b/src/commands/sentinel-primaries.json @@ -0,0 +1,26 @@ +{ + "PRIMARIES": { + "summary": "Returns a list of monitored primaries.", + "complexity": "O(N) where N is the number of primaries", + "group": "sentinel", + "since": "8.0.0", + "arity": 2, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "reply_schema": { + "type": "array", + "description": "List of monitored primaries, and their states.", + "items": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } +} diff --git a/src/commands/sentinel-primary.json b/src/commands/sentinel-primary.json new file mode 100644 index 0000000000..03cf5c9102 --- /dev/null +++ b/src/commands/sentinel-primary.json @@ -0,0 +1,29 @@ +{ + "PRIMARY": { + "summary": "Returns the state of a primary instance.", + "complexity": "O(1)", + "group": "sentinel", + "since": "8.0.0", + "arity": 3, + "container": "SENTINEL", + "function": "sentinelCommand", + "command_flags": [ + "ADMIN", + "SENTINEL", + "ONLY_SENTINEL" + ], + "reply_schema": { + "type": "object", + "description": "The state and info of the specified primary.", + "additionalProperties": { + "type": "string" + } + }, + "arguments": [ + { + "name": "primary-name", + "type": "string" + } + ] + } +} diff --git a/src/config.c b/src/config.c index 0bacf06901..c11057c202 100644 --- a/src/config.c +++ b/src/config.c @@ -1,6 +1,6 @@ /* Configuration file parsing and CONFIG GET/SET commands implementation. * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -3118,6 +3118,7 @@ standardConfig static_configs[] = { /* String Configs */ createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.acl_filename, "", NULL, NULL), createStringConfig("unixsocket", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.unixsocket, NULL, NULL, NULL), + createStringConfig("unixsocketgroup", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.unixsocketgroup, NULL, NULL, NULL), createStringConfig("pidfile", NULL, IMMUTABLE_CONFIG, EMPTY_STRING_IS_NULL, server.pidfile, NULL, NULL, NULL), createStringConfig("replica-announce-ip", "slave-announce-ip", MODIFIABLE_CONFIG, EMPTY_STRING_IS_NULL, server.replica_announce_ip, NULL, NULL, NULL), createStringConfig("primaryuser", "masteruser", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.primary_user, NULL, NULL, NULL), @@ -3143,6 +3144,7 @@ standardConfig static_configs[] = { createStringConfig("req-res-logfile", NULL, IMMUTABLE_CONFIG | HIDDEN_CONFIG, EMPTY_STRING_IS_NULL, server.req_res_logfile, NULL, NULL, NULL), #endif createStringConfig("locale-collate", NULL, MODIFIABLE_CONFIG, ALLOW_EMPTY_STRING, server.locale_collate, "", NULL, updateLocaleCollate), + createStringConfig("debug-context", NULL, MODIFIABLE_CONFIG | DEBUG_CONFIG | HIDDEN_CONFIG, ALLOW_EMPTY_STRING, server.debug_context, "", NULL, NULL), /* SDS Configs */ createSDSConfig("primaryauth", "masterauth", MODIFIABLE_CONFIG | SENSITIVE_CONFIG, EMPTY_STRING_IS_NULL, server.primary_auth, NULL, NULL, NULL), @@ -3231,7 +3233,7 @@ standardConfig static_configs[] = { createLongLongConfig("latency-monitor-threshold", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.latency_monitor_threshold, 0, INTEGER_CONFIG, NULL, NULL), createLongLongConfig("proto-max-bulk-len", NULL, DEBUG_CONFIG | MODIFIABLE_CONFIG, 1024 * 1024, LONG_MAX, server.proto_max_bulk_len, 512ll * 1024 * 1024, MEMORY_CONFIG, NULL, NULL), /* Bulk request max size */ createLongLongConfig("stream-node-max-entries", NULL, MODIFIABLE_CONFIG, 0, LLONG_MAX, server.stream_node_max_entries, 100, INTEGER_CONFIG, NULL, NULL), - createLongLongConfig("repl-backlog-size", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, server.repl_backlog_size, 1024 * 1024, MEMORY_CONFIG, NULL, updateReplBacklogSize), /* Default: 1mb */ + createLongLongConfig("repl-backlog-size", NULL, MODIFIABLE_CONFIG, 1, LLONG_MAX, server.repl_backlog_size, 10 * 1024 * 1024, MEMORY_CONFIG, NULL, updateReplBacklogSize), /* Default: 10mb */ /* Unsigned Long Long configs */ createULongLongConfig("maxmemory", NULL, MODIFIABLE_CONFIG, 0, ULLONG_MAX, server.maxmemory, 0, MEMORY_CONFIG, NULL, updateMaxmemory), diff --git a/src/config.h b/src/config.h index 201e421976..8d19fa9f7f 100644 --- a/src/config.h +++ b/src/config.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/connection.h b/src/connection.h index d59f7bc7fc..97d79e5655 100644 --- a/src/connection.h +++ b/src/connection.h @@ -1,6 +1,6 @@ /* - * Copyright (c) 2019, Redis Labs + * Copyright (c) 2019, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -144,7 +144,8 @@ struct connListener { int bindaddr_count; int port; ConnectionType *ct; - void *priv; /* used by connection type specified data */ + void *priv1; /* used by connection type specified data */ + void *priv2; /* used by connection type specified data */ }; /* The connection module does not deal with listening and accepting sockets, diff --git a/src/connhelpers.h b/src/connhelpers.h index b543f2ed2d..ae3fb1508e 100644 --- a/src/connhelpers.h +++ b/src/connhelpers.h @@ -1,6 +1,6 @@ /* - * Copyright (c) 2019, Redis Labs + * Copyright (c) 2019, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/crc16.c b/src/crc16.c index 7b8c1dad0a..2153177578 100644 --- a/src/crc16.c +++ b/src/crc16.c @@ -2,7 +2,7 @@ /* * Copyright 2001-2010 Georges Menie (www.menie.org) - * Copyright 2010-2012 Salvatore Sanfilippo (adapted to Redis coding style) + * Copyright 2010-2012 Redis Ltd. (adapted to Redis coding style) * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/db.c b/src/db.c index d0a6640f57..4a2c0a495e 100644 --- a/src/db.c +++ b/src/db.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/debug.c b/src/debug.c index 27bc481767..ce322ded8f 100644 --- a/src/debug.c +++ b/src/debug.c @@ -1,6 +1,5 @@ /* - * Copyright (c) 2009-2020, Salvatore Sanfilippo - * Copyright (c) 2020, Redis Labs, Inc + * Copyright (c) 2009-2020, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -426,8 +425,10 @@ void debugCommand(client *c) { "MALLCTL-STR []", " Get or set a malloc tuning string.", #endif - "OBJECT ", + "OBJECT [fast]", " Show low level info about `key` and associated value.", + " Some fields of the default behavior may be time consuming to fetch,", + " and `fast` can be passed to avoid fetching them.", "DROP-CLUSTER-PACKET-FILTER ", " Drop all packets that match the filtered type. Set to -1 allow all packets.", "CLOSE-CLUSTER-LINK-ON-PACKET-DROP <0|1>", @@ -604,11 +605,14 @@ void debugCommand(client *c) { } else if (!strcasecmp(c->argv[1]->ptr, "close-cluster-link-on-packet-drop") && c->argc == 3) { server.debug_cluster_close_link_on_packet_drop = atoi(c->argv[2]->ptr); addReply(c, shared.ok); - } else if (!strcasecmp(c->argv[1]->ptr, "object") && c->argc == 3) { + } else if (!strcasecmp(c->argv[1]->ptr, "object") && (c->argc == 3 || c->argc == 4)) { dictEntry *de; robj *val; char *strenc; + int fast = 0; + if (c->argc == 4 && !strcasecmp(c->argv[3]->ptr, "fast")) fast = 1; + if ((de = dbFind(c->db, c->argv[2]->ptr)) == NULL) { addReplyErrorObject(c, shared.nokeyerr); return; @@ -639,22 +643,27 @@ void debugCommand(client *c) { used = snprintf(nextra, remaining, " ql_compressed:%d", compressed); nextra += used; remaining -= used; - /* Add total uncompressed size */ - unsigned long sz = 0; - for (quicklistNode *node = ql->head; node; node = node->next) { - sz += node->sz; + if (!fast) { + /* Add total uncompressed size */ + unsigned long sz = 0; + for (quicklistNode *node = ql->head; node; node = node->next) { + sz += node->sz; + } + used = snprintf(nextra, remaining, " ql_uncompressed_size:%lu", sz); + nextra += used; + remaining -= used; } - used = snprintf(nextra, remaining, " ql_uncompressed_size:%lu", sz); - nextra += used; - remaining -= used; } - addReplyStatusFormat(c, - "Value at:%p refcount:%d " - "encoding:%s serializedlength:%zu " - "lru:%d lru_seconds_idle:%llu%s", - (void *)val, val->refcount, strenc, rdbSavedObjectLen(val, c->argv[2], c->db->id), - val->lru, estimateObjectIdleTime(val) / 1000, extra); + sds s = sdsempty(); + s = sdscatprintf(s, "Value at:%p refcount:%d encoding:%s", (void *)val, val->refcount, strenc); + if (!fast) s = sdscatprintf(s, " serializedlength:%zu", rdbSavedObjectLen(val, c->argv[2], c->db->id)); + /* Either lru or lfu field could work correctly which depends on server.maxmemory_policy. */ + s = sdscatprintf(s, " lru:%d lru_seconds_idle:%llu", val->lru, estimateObjectIdleTime(val) / 1000); + s = sdscatprintf(s, " lfu_freq:%lu lfu_access_time_minutes:%u", LFUDecrAndReturn(val), val->lru >> 8); + s = sdscatprintf(s, "%s", extra); + addReplyStatusLength(c, s, sdslen(s)); + sdsfree(s); } else if (!strcasecmp(c->argv[1]->ptr, "sdslen") && c->argc == 3) { dictEntry *de; robj *val; diff --git a/src/debugmacro.h b/src/debugmacro.h index 429a230226..7113367cef 100644 --- a/src/debugmacro.h +++ b/src/debugmacro.h @@ -2,7 +2,7 @@ * * ----------------------------------------------------------------------------- * - * Copyright (c) 2016, Salvatore Sanfilippo + * Copyright (c) 2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/defrag.c b/src/defrag.c index 5a54875864..4d34009f8b 100644 --- a/src/defrag.c +++ b/src/defrag.c @@ -5,7 +5,7 @@ * We do that by scanning the keyspace and for each pointer we have, we can try to * ask the allocator if moving it to a new address will help reduce fragmentation. * - * Copyright (c) 2020, Redis Labs, Inc + * Copyright (c) 2020, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/dict.c b/src/dict.c index 2eb3dd386f..1df03f6546 100644 --- a/src/dict.c +++ b/src/dict.c @@ -5,7 +5,7 @@ * tables of power of two in size are used, collisions are handled by * chaining. See the source code for more information... :) * - * Copyright (c) 2006-2012, Salvatore Sanfilippo + * Copyright (c) 2006-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/dict.h b/src/dict.h index 97a79910cb..5fd15004b8 100644 --- a/src/dict.h +++ b/src/dict.h @@ -5,7 +5,7 @@ * tables of power of two in size are used, collisions are handled by * chaining. See the source code for more information... :) * - * Copyright (c) 2006-2012, Salvatore Sanfilippo + * Copyright (c) 2006-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/endianconv.c b/src/endianconv.c index de91671565..15b6bb322a 100644 --- a/src/endianconv.c +++ b/src/endianconv.c @@ -13,7 +13,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2011-2012, Salvatore Sanfilippo + * Copyright (c) 2011-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/endianconv.h b/src/endianconv.h index 062c3f2f53..3faa82b38f 100644 --- a/src/endianconv.h +++ b/src/endianconv.h @@ -2,7 +2,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2011-2012, Salvatore Sanfilippo + * Copyright (c) 2011-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/eval.c b/src/eval.c index 2afbf445f5..f99bff6552 100644 --- a/src/eval.c +++ b/src/eval.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/evict.c b/src/evict.c index fb04616871..4b9f70eaa5 100644 --- a/src/evict.c +++ b/src/evict.c @@ -2,7 +2,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2009-2016, Salvatore Sanfilippo + * Copyright (c) 2009-2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/expire.c b/src/expire.c index 05abb9580a..a9842ae123 100644 --- a/src/expire.c +++ b/src/expire.c @@ -2,7 +2,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2009-2016, Salvatore Sanfilippo + * Copyright (c) 2009-2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/fmacros.h b/src/fmacros.h index d54bdf2d94..78efcdb71f 100644 --- a/src/fmacros.h +++ b/src/fmacros.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/geo.c b/src/geo.c index 64eaa3bd63..9e43a6e93b 100644 --- a/src/geo.c +++ b/src/geo.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2014, Matt Stancliff . - * Copyright (c) 2015-2016, Salvatore Sanfilippo . + * Copyright (c) 2015-2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/geohash.c b/src/geohash.c index d75e21e4e5..138fd2759a 100644 --- a/src/geohash.c +++ b/src/geohash.c @@ -1,7 +1,7 @@ /* * Copyright (c) 2013-2014, yinqiwen * Copyright (c) 2014, Matt Stancliff . - * Copyright (c) 2015-2016, Salvatore Sanfilippo . + * Copyright (c) 2015-2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/geohash.h b/src/geohash.h index b7d185b724..5ae9d78f74 100644 --- a/src/geohash.h +++ b/src/geohash.h @@ -1,7 +1,7 @@ /* * Copyright (c) 2013-2014, yinqiwen * Copyright (c) 2014, Matt Stancliff . - * Copyright (c) 2015, Salvatore Sanfilippo . + * Copyright (c) 2015, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/geohash_helper.c b/src/geohash_helper.c index eb7ab9a8ec..aa4b4743a6 100644 --- a/src/geohash_helper.c +++ b/src/geohash_helper.c @@ -1,7 +1,7 @@ /* * Copyright (c) 2013-2014, yinqiwen * Copyright (c) 2014, Matt Stancliff . - * Copyright (c) 2015-2016, Salvatore Sanfilippo . + * Copyright (c) 2015-2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/geohash_helper.h b/src/geohash_helper.h index fbca4dda9b..fa95df85e9 100644 --- a/src/geohash_helper.h +++ b/src/geohash_helper.h @@ -1,7 +1,7 @@ /* * Copyright (c) 2013-2014, yinqiwen * Copyright (c) 2014, Matt Stancliff . - * Copyright (c) 2015, Salvatore Sanfilippo . + * Copyright (c) 2015, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/hyperloglog.c b/src/hyperloglog.c index 08df8f6a74..9769533d5e 100644 --- a/src/hyperloglog.c +++ b/src/hyperloglog.c @@ -1,7 +1,7 @@ /* hyperloglog.c - HyperLogLog probabilistic cardinality approximation. * This file implements the algorithm and the exported commands. * - * Copyright (c) 2014, Salvatore Sanfilippo + * Copyright (c) 2014, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/intset.c b/src/intset.c index ae3b796fd1..ddddae221d 100644 --- a/src/intset.c +++ b/src/intset.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2009-2012, Pieter Noordhuis - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/intset.h b/src/intset.h index 52d215ab0f..c727ee3f83 100644 --- a/src/intset.h +++ b/src/intset.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2009-2012, Pieter Noordhuis - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/latency.c b/src/latency.c index eacb5fbbc4..49203ce768 100644 --- a/src/latency.c +++ b/src/latency.c @@ -5,7 +5,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2014, Salvatore Sanfilippo + * Copyright (c) 2014, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/latency.h b/src/latency.h index 4b1e291895..ed8beee7af 100644 --- a/src/latency.h +++ b/src/latency.h @@ -3,7 +3,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2014, Salvatore Sanfilippo + * Copyright (c) 2014, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/listpack.c b/src/listpack.c index be970e1e64..3bb6686f61 100644 --- a/src/listpack.c +++ b/src/listpack.c @@ -4,8 +4,7 @@ * * https://github.com/antirez/listpack * - * Copyright (c) 2017, Salvatore Sanfilippo - * Copyright (c) 2020, Redis Labs, Inc + * Copyright (c) 2017,2020, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -167,7 +166,7 @@ int lpSafeToAdd(unsigned char *lp, size_t add) { * "utils.c", function string2ll(), and is copyright: * * Copyright(C) 2011, Pieter Noordhuis - * Copyright(C) 2011, Salvatore Sanfilippo + * Copyright(C) 2011, Redis Ltd. * * The function is released under the BSD 3-clause license. */ diff --git a/src/listpack.h b/src/listpack.h index 7934905c6e..1f99b59457 100644 --- a/src/listpack.h +++ b/src/listpack.h @@ -4,7 +4,7 @@ * * https://github.com/antirez/listpack * - * Copyright (c) 2017, Salvatore Sanfilippo + * Copyright (c) 2017, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/listpack_malloc.h b/src/listpack_malloc.h index 0d36bb19d5..a75bd31817 100644 --- a/src/listpack_malloc.h +++ b/src/listpack_malloc.h @@ -1,7 +1,7 @@ /* Listpack -- A lists of strings serialization format * https://github.com/antirez/listpack * - * Copyright (c) 2017, Salvatore Sanfilippo + * Copyright (c) 2017, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/localtime.c b/src/localtime.c index 55037de103..797b13a543 100644 --- a/src/localtime.c +++ b/src/localtime.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Salvatore Sanfilippo + * Copyright (c) 2018, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/lolwut.c b/src/lolwut.c index 7862e5c9fd..d4b7031873 100644 --- a/src/lolwut.c +++ b/src/lolwut.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Salvatore Sanfilippo + * Copyright (c) 2018, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/lolwut.h b/src/lolwut.h index 682d00531f..77bf05dafb 100644 --- a/src/lolwut.h +++ b/src/lolwut.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019, Salvatore Sanfilippo + * Copyright (c) 2018-2019, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/lolwut5.c b/src/lolwut5.c index d1405d88e5..0dfe35642b 100644 --- a/src/lolwut5.c +++ b/src/lolwut5.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Salvatore Sanfilippo + * Copyright (c) 2018, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/lolwut6.c b/src/lolwut6.c index a743fb20c3..1ed3683cf7 100644 --- a/src/lolwut6.c +++ b/src/lolwut6.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Salvatore Sanfilippo + * Copyright (c) 2019, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/memtest.c b/src/memtest.c index 4439baae26..c071cfdfdf 100644 --- a/src/memtest.c +++ b/src/memtest.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/module.c b/src/module.c index 5eb5c3ac84..e45b8f4181 100644 --- a/src/module.c +++ b/src/module.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Salvatore Sanfilippo + * Copyright (c) 2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/modules/helloblock.c b/src/modules/helloblock.c index 65e9bb71a2..61c848b413 100644 --- a/src/modules/helloblock.c +++ b/src/modules/helloblock.c @@ -3,7 +3,7 @@ * * ----------------------------------------------------------------------------- * - * Copyright (c) 2016, Salvatore Sanfilippo + * Copyright (c) 2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/modules/hellocluster.c b/src/modules/hellocluster.c index cfc0d4f0f4..b5c96ed256 100644 --- a/src/modules/hellocluster.c +++ b/src/modules/hellocluster.c @@ -2,7 +2,7 @@ * * ----------------------------------------------------------------------------- * - * Copyright (c) 2018, Salvatore Sanfilippo + * Copyright (c) 2018, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/modules/hellodict.c b/src/modules/hellodict.c index 38081919f3..e0af06ba2f 100644 --- a/src/modules/hellodict.c +++ b/src/modules/hellodict.c @@ -5,7 +5,7 @@ * * ----------------------------------------------------------------------------- * - * Copyright (c) 2018, Salvatore Sanfilippo + * Copyright (c) 2018, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/modules/hellohook.c b/src/modules/hellohook.c index 35a1ed0a1a..c655800aa9 100644 --- a/src/modules/hellohook.c +++ b/src/modules/hellohook.c @@ -2,7 +2,7 @@ * * ----------------------------------------------------------------------------- * - * Copyright (c) 2019, Salvatore Sanfilippo + * Copyright (c) 2019, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/modules/hellotimer.c b/src/modules/hellotimer.c index 40ba323e58..3d6258c76a 100644 --- a/src/modules/hellotimer.c +++ b/src/modules/hellotimer.c @@ -2,7 +2,7 @@ * * ----------------------------------------------------------------------------- * - * Copyright (c) 2018, Salvatore Sanfilippo + * Copyright (c) 2018, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/modules/hellotype.c b/src/modules/hellotype.c index 7e2dc60c68..53dc75c864 100644 --- a/src/modules/hellotype.c +++ b/src/modules/hellotype.c @@ -7,7 +7,7 @@ * * ----------------------------------------------------------------------------- * - * Copyright (c) 2016, Salvatore Sanfilippo + * Copyright (c) 2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/modules/helloworld.c b/src/modules/helloworld.c index f74e4e9b66..83f36615ad 100644 --- a/src/modules/helloworld.c +++ b/src/modules/helloworld.c @@ -6,7 +6,7 @@ * * ----------------------------------------------------------------------------- * - * Copyright (c) 2016, Salvatore Sanfilippo + * Copyright (c) 2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/multi.c b/src/multi.c index 074060269c..14688f80b4 100644 --- a/src/multi.c +++ b/src/multi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/networking.c b/src/networking.c index 915a0b016f..27d81da493 100644 --- a/src/networking.c +++ b/src/networking.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -212,7 +212,6 @@ client *createClient(connection *conn) { c->peerid = NULL; c->sockname = NULL; c->client_list_node = NULL; - c->postponed_list_node = NULL; c->io_read_state = CLIENT_IDLE; c->io_write_state = CLIENT_IDLE; c->nwritten = 0; @@ -1518,7 +1517,11 @@ void unlinkClient(client *c) { } } /* Only use shutdown when the fork is active and we are the parent. */ - if (server.child_type) connShutdown(c->conn); + if (server.child_type && !c->flag.repl_rdb_channel) { + connShutdown(c->conn); + } else if (c->flag.repl_rdb_channel) { + shutdown(c->conn->fd, SHUT_RDWR); + } connClose(c->conn); c->conn = NULL; } @@ -1775,6 +1778,7 @@ void freeClient(client *c) { void freeClientAsync(client *c) { if (c->flag.close_asap || c->flag.script) return; c->flag.close_asap = 1; + debugServerAssertWithInfo(c, NULL, listSearchKey(server.clients_to_close, c) == NULL); listAddNodeTail(server.clients_to_close, c); } diff --git a/src/notify.c b/src/notify.c index 1cbf9c74ed..5305f24664 100644 --- a/src/notify.c +++ b/src/notify.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Salvatore Sanfilippo + * Copyright (c) 2013, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/object.c b/src/object.c index 6e5d1f460b..73382ffe8b 100644 --- a/src/object.c +++ b/src/object.c @@ -1,6 +1,6 @@ /* Object implementation. * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/pqsort.c b/src/pqsort.c index ca4f99359d..31729cfbec 100644 --- a/src/pqsort.c +++ b/src/pqsort.c @@ -1,7 +1,7 @@ /* The following is the NetBSD libc qsort implementation modified in order to * support partial sorting of ranges. * - * Copyright(C) 2009-2012 Salvatore Sanfilippo. All rights reserved. + * Copyright(C) 2009-2012 Redis Ltd. * * The original copyright notice follows. */ diff --git a/src/pqsort.h b/src/pqsort.h index 5f1c5c1cd3..84f98f8626 100644 --- a/src/pqsort.h +++ b/src/pqsort.h @@ -1,7 +1,7 @@ /* The following is the NetBSD libc qsort implementation modified in order to * support partial sorting of ranges. * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/pubsub.c b/src/pubsub.c index eacadfc185..047d408621 100644 --- a/src/pubsub.c +++ b/src/pubsub.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/rand.c b/src/rand.c index 6f3d0c49d7..cceb644cee 100644 --- a/src/rand.c +++ b/src/rand.c @@ -13,7 +13,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2010-2012, Salvatore Sanfilippo + * Copyright (c) 2010-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/rand.h b/src/rand.h index 2726c92b7f..dec7465084 100644 --- a/src/rand.h +++ b/src/rand.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/rax.c b/src/rax.c index cee9b714e5..909d5a2375 100644 --- a/src/rax.c +++ b/src/rax.c @@ -2,7 +2,7 @@ * * Version 1.2 -- 7 February 2019 * - * Copyright (c) 2017-2019, Salvatore Sanfilippo + * Copyright (c) 2017-2019, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/rax.h b/src/rax.h index c03e0303a0..3e3a85e783 100644 --- a/src/rax.h +++ b/src/rax.h @@ -1,6 +1,6 @@ /* Rax -- A radix tree implementation. * - * Copyright (c) 2017-2018, Salvatore Sanfilippo + * Copyright (c) 2017-2018, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/rax_malloc.h b/src/rax_malloc.h index 9295985c65..03c952e1a4 100644 --- a/src/rax_malloc.h +++ b/src/rax_malloc.h @@ -1,6 +1,6 @@ /* Rax -- A radix tree implementation. * - * Copyright (c) 2017, Salvatore Sanfilippo + * Copyright (c) 2017, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/rdb.c b/src/rdb.c index 41d58e4e47..bc2d03e86c 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -3450,10 +3450,9 @@ static void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { if (!bysignal && exitcode == 0) { serverLog(LL_NOTICE, "Background RDB transfer terminated with success"); } else if (!bysignal && exitcode != 0) { - serverLog(LL_WARNING, "Background transfer error"); - server.lastbgsave_status = C_ERR; + serverLog(LL_WARNING, "Background RDB transfer error"); } else { - serverLog(LL_WARNING, "Background transfer terminated by signal %d", bysignal); + serverLog(LL_WARNING, "Background RDB transfer terminated by signal %d", bysignal); } if (server.rdb_child_exit_pipe != -1) close(server.rdb_child_exit_pipe); if (server.rdb_pipe_read > 0) { @@ -3558,14 +3557,14 @@ int rdbSaveToReplicasSockets(int req, rdbSaveInfo *rsi) { conns[connsnum++] = replica->conn; if (dual_channel) { - /* Put the socket in blocking mode to simplify RDB transfer. */ - connBlock(replica->conn); connSendTimeout(replica->conn, server.repl_timeout * 1000); /* This replica uses diskless dual channel sync, hence we need * to inform it with the save end offset.*/ sendCurrentOffsetToReplica(replica); /* Make sure repl traffic is appended to the replication backlog */ addRdbReplicaToPsyncWait(replica); + /* Put the socket in blocking mode to simplify RDB transfer. */ + connBlock(replica->conn); } else { server.rdb_pipe_numconns++; } diff --git a/src/rdb.h b/src/rdb.h index 3b17cbe9de..8f05a0ea1e 100644 --- a/src/rdb.h +++ b/src/rdb.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/rdma.c b/src/rdma.c index f95b5b1229..bca01b9839 100644 --- a/src/rdma.c +++ b/src/rdma.c @@ -74,6 +74,7 @@ typedef enum ValkeyRdmaOpcode { #define VALKEY_RDMA_MAX_RX_SIZE (16 * 1024 * 1024) #define VALKEY_RDMA_SYNCIO_RES 10 #define VALKEY_RDMA_INVALID_OPCODE 0xffff +#define VALKEY_RDMA_KEEPALIVE_MS 3000 typedef struct rdma_connection { connection c; @@ -94,6 +95,7 @@ typedef struct RdmaContext { connection *conn; char *ip; int port; + long long keepalive_te; /* RDMA has no transport layer keepalive */ struct ibv_pd *pd; struct rdma_event_channel *cm_channel; struct ibv_comp_channel *comp_channel; @@ -405,7 +407,7 @@ static int rdmaSendCommand(RdmaContext *ctx, struct rdma_cm_id *cm_id, ValkeyRdm } static int connRdmaRegisterRx(RdmaContext *ctx, struct rdma_cm_id *cm_id) { - ValkeyRdmaCmd cmd; + ValkeyRdmaCmd cmd = {0}; cmd.memory.opcode = htons(RegisterXferMemory); cmd.memory.addr = htonu64((uint64_t)ctx->rx.addr); @@ -419,7 +421,7 @@ static int connRdmaRegisterRx(RdmaContext *ctx, struct rdma_cm_id *cm_id) { } static int connRdmaGetFeature(RdmaContext *ctx, struct rdma_cm_id *cm_id, ValkeyRdmaCmd *cmd) { - ValkeyRdmaCmd _cmd; + ValkeyRdmaCmd _cmd = {0}; _cmd.feature.opcode = htons(GetServerFeature); _cmd.feature.select = cmd->feature.select; @@ -447,12 +449,13 @@ static int rdmaHandleEstablished(struct rdma_cm_event *ev) { return C_OK; } -static int rdmaHandleDisconnect(struct rdma_cm_event *ev) { +static int rdmaHandleDisconnect(aeEventLoop *el, struct rdma_cm_event *ev) { struct rdma_cm_id *cm_id = ev->id; RdmaContext *ctx = cm_id->context; connection *conn = ctx->conn; rdma_connection *rdma_conn = (rdma_connection *)conn; + aeDeleteTimeEvent(el, ctx->keepalive_te); conn->state = CONN_STATE_CLOSED; /* we can't close connection now, let's mark this connection as closed state */ @@ -669,7 +672,27 @@ static void connRdmaEventHandler(struct aeEventLoop *el, int fd, void *clientDat } } -static int rdmaHandleConnect(char *err, struct rdma_cm_event *ev, char *ip, size_t ip_len, int *port) { +static int rdmaKeepaliveTimeProc(struct aeEventLoop *el, long long id, void *clientData) { + struct rdma_cm_id *cm_id = clientData; + RdmaContext *ctx = cm_id->context; + connection *conn = ctx->conn; + ValkeyRdmaCmd cmd = {0}; + + UNUSED(el); + UNUSED(id); + if (conn->state != CONN_STATE_CONNECTED) { + return AE_NOMORE; + } + + cmd.keepalive.opcode = htons(Keepalive); + if (rdmaSendCommand(ctx, cm_id, &cmd) != C_OK) { + return AE_NOMORE; + } + + return VALKEY_RDMA_KEEPALIVE_MS; +} + +static int rdmaHandleConnect(aeEventLoop *el, char *err, struct rdma_cm_event *ev, char *ip, size_t ip_len, int *port) { int ret = C_OK; struct rdma_cm_id *cm_id = ev->id; struct sockaddr_storage caddr; @@ -694,6 +717,11 @@ static int rdmaHandleConnect(char *err, struct rdma_cm_event *ev, char *ip, size ctx = zcalloc(sizeof(RdmaContext)); ctx->ip = zstrdup(ip); ctx->port = *port; + ctx->keepalive_te = aeCreateTimeEvent(el, VALKEY_RDMA_KEEPALIVE_MS, rdmaKeepaliveTimeProc, cm_id, NULL); + if (ctx->keepalive_te == AE_ERR) { + return C_ERR; + } + cm_id->context = ctx; if (rdmaCreateResource(ctx, cm_id) == C_ERR) { goto reject; @@ -720,7 +748,7 @@ static rdma_listener *rdmaFdToListener(connListener *listener, int fd) { for (int i = 0; i < listener->count; i++) { if (listener->fd[i] != fd) continue; - return (rdma_listener *)listener->priv + i; + return (rdma_listener *)listener->priv1 + i; } return NULL; @@ -732,7 +760,8 @@ static rdma_listener *rdmaFdToListener(connListener *listener, int fd) { * 1, handle RDMA_CM_EVENT_CONNECT_REQUEST and return CM fd on success * 2, handle RDMA_CM_EVENT_ESTABLISHED and return C_OK on success */ -static int rdmaAccept(connListener *listener, char *err, int fd, char *ip, size_t ip_len, int *port, void **priv) { +static int +rdmaAccept(aeEventLoop *el, connListener *listener, char *err, int fd, char *ip, size_t ip_len, int *port, void **priv) { struct rdma_cm_event *ev; enum rdma_cm_event_type ev_type; int ret = C_OK; @@ -755,7 +784,7 @@ static int rdmaAccept(connListener *listener, char *err, int fd, char *ip, size_ ev_type = ev->event; switch (ev_type) { case RDMA_CM_EVENT_CONNECT_REQUEST: - ret = rdmaHandleConnect(err, ev, ip, ip_len, port); + ret = rdmaHandleConnect(el, err, ev, ip, ip_len, port); if (ret == C_OK) { RdmaContext *ctx = (RdmaContext *)ev->id->context; *priv = ev->id; @@ -773,7 +802,7 @@ static int rdmaAccept(connListener *listener, char *err, int fd, char *ip, size_ case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_TIMEWAIT_EXIT: - rdmaHandleDisconnect(ev); + rdmaHandleDisconnect(el, ev); ret = C_OK; break; @@ -804,7 +833,7 @@ static void connRdmaAcceptHandler(aeEventLoop *el, int fd, void *privdata, int m UNUSED(mask); while (max--) { - cfd = rdmaAccept(listener, server.neterr, fd, cip, sizeof(cip), &cport, &connpriv); + cfd = rdmaAccept(el, listener, server.neterr, fd, cip, sizeof(cip), &cport, &connpriv); if (cfd == ANET_ERR) { if (errno != EWOULDBLOCK) serverLog(LL_WARNING, "RDMA Accepting client connection: %s", server.neterr); return; @@ -951,7 +980,7 @@ static void rdmaCMeventHandler(struct aeEventLoop *el, int fd, void *clientData, case RDMA_CM_EVENT_TIMEWAIT_EXIT: case RDMA_CM_EVENT_CONNECT_REQUEST: case RDMA_CM_EVENT_ADDR_CHANGE: - case RDMA_CM_EVENT_DISCONNECTED: rdmaHandleDisconnect(ev); break; + case RDMA_CM_EVENT_DISCONNECTED: rdmaHandleDisconnect(el, ev); break; case RDMA_CM_EVENT_MULTICAST_JOIN: case RDMA_CM_EVENT_MULTICAST_ERROR: @@ -1488,7 +1517,7 @@ int connRdmaListen(connListener *listener) { bindaddr = default_bindaddr; } - listener->priv = rdma_listener = zcalloc_num(bindaddr_count, sizeof(*rdma_listener)); + listener->priv1 = rdma_listener = zcalloc_num(bindaddr_count, sizeof(*rdma_listener)); for (j = 0; j < bindaddr_count; j++) { char *addr = bindaddr[j]; int optional = *addr == '-'; @@ -1707,13 +1736,13 @@ static int rdmaChangeListener(void) { aeDeleteFileEvent(server.el, listener->fd[i], AE_READABLE); listener->fd[i] = -1; - struct rdma_listener *rdma_listener = (struct rdma_listener *)listener->priv + i; + struct rdma_listener *rdma_listener = (struct rdma_listener *)listener->priv1 + i; rdma_destroy_id(rdma_listener->cm_id); rdma_destroy_event_channel(rdma_listener->cm_channel); } listener->count = 0; - zfree(listener->priv); + zfree(listener->priv1); closeListener(listener); diff --git a/src/release.c b/src/release.c index a5a16f24f4..0afa1144dc 100644 --- a/src/release.c +++ b/src/release.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/replication.c b/src/replication.c index 6be8d3f9d5..560153b8d2 100644 --- a/src/replication.c +++ b/src/replication.c @@ -1,6 +1,6 @@ /* Asynchronous replication implementation. * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -391,7 +391,7 @@ void freeReplicaReferencedReplBuffer(client *replica) { uint64_t rdb_cid = htonu64(replica->id); if (raxRemove(server.replicas_waiting_psync, (unsigned char *)&rdb_cid, sizeof(rdb_cid), NULL)) { serverLog(LL_DEBUG, "Remove psync waiting replica %s with cid %llu from replicas rax.", - replicationGetReplicaName(replica), (long long unsigned int)replica->associated_rdb_client_id); + replicationGetReplicaName(replica), (long long unsigned int)replica->id); } } if (replica->ref_repl_buf_node != NULL) { @@ -956,7 +956,9 @@ int startBgsaveForReplication(int mincapa, int req) { /* `SYNC` should have failed with error if we don't support socket and require a filter, assert this here */ serverAssert(socket_target || !(req & REPLICA_REQ_RDB_MASK)); - serverLog(LL_NOTICE, "Starting BGSAVE for SYNC with target: %s", socket_target ? "replicas sockets" : "disk"); + serverLog(LL_NOTICE, "Starting BGSAVE for SYNC with target: %s using: %s", + socket_target ? "replicas sockets" : "disk", + (req & REPLICA_REQ_RDB_CHANNEL) ? "dual-channel" : "normal sync"); rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); @@ -1992,8 +1994,8 @@ void readSyncBulkPayload(connection *conn) { /* Static vars used to hold the EOF mark, and the last bytes received * from the server: when they match, we reached the end of the transfer. */ - static char eofmark[CONFIG_RUN_ID_SIZE]; - static char lastbytes[CONFIG_RUN_ID_SIZE]; + static char eofmark[RDB_EOF_MARK_SIZE]; + static char lastbytes[RDB_EOF_MARK_SIZE]; static int usemark = 0; /* If repl_transfer_size == -1 we still have to read the bulk length @@ -2036,10 +2038,10 @@ void readSyncBulkPayload(connection *conn) { * At the end of the file the announced delimiter is transmitted. The * delimiter is long and random enough that the probability of a * collision with the actual file content can be ignored. */ - if (strncmp(buf + 1, "EOF:", 4) == 0 && strlen(buf + 5) >= CONFIG_RUN_ID_SIZE) { + if (strncmp(buf + 1, "EOF:", 4) == 0 && strlen(buf + 5) >= RDB_EOF_MARK_SIZE) { usemark = 1; - memcpy(eofmark, buf + 5, CONFIG_RUN_ID_SIZE); - memset(lastbytes, 0, CONFIG_RUN_ID_SIZE); + memcpy(eofmark, buf + 5, RDB_EOF_MARK_SIZE); + memset(lastbytes, 0, RDB_EOF_MARK_SIZE); /* Set any repl_transfer_size to avoid entering this code path * at the next call. */ server.repl_transfer_size = 0; @@ -2084,14 +2086,14 @@ void readSyncBulkPayload(connection *conn) { if (usemark) { /* Update the last bytes array, and check if it matches our * delimiter. */ - if (nread >= CONFIG_RUN_ID_SIZE) { - memcpy(lastbytes, buf + nread - CONFIG_RUN_ID_SIZE, CONFIG_RUN_ID_SIZE); + if (nread >= RDB_EOF_MARK_SIZE) { + memcpy(lastbytes, buf + nread - RDB_EOF_MARK_SIZE, RDB_EOF_MARK_SIZE); } else { - int rem = CONFIG_RUN_ID_SIZE - nread; + int rem = RDB_EOF_MARK_SIZE - nread; memmove(lastbytes, lastbytes + nread, rem); memcpy(lastbytes + rem, buf, nread); } - if (memcmp(lastbytes, eofmark, CONFIG_RUN_ID_SIZE) == 0) eof_reached = 1; + if (memcmp(lastbytes, eofmark, RDB_EOF_MARK_SIZE) == 0) eof_reached = 1; } /* Update the last I/O time for the replication transfer (used in @@ -2109,7 +2111,7 @@ void readSyncBulkPayload(connection *conn) { /* Delete the last 40 bytes from the file if we reached EOF. */ if (usemark && eof_reached) { - if (ftruncate(server.repl_transfer_fd, server.repl_transfer_read - CONFIG_RUN_ID_SIZE) == -1) { + if (ftruncate(server.repl_transfer_fd, server.repl_transfer_read - RDB_EOF_MARK_SIZE) == -1) { serverLog(LL_WARNING, "Error truncating the RDB file received from the primary " "for SYNC: %s", @@ -2226,7 +2228,7 @@ void readSyncBulkPayload(connection *conn) { loadingFailed = 1; } else if (usemark) { /* Verify the end mark is correct. */ - if (!rioRead(&rdb, buf, CONFIG_RUN_ID_SIZE) || memcmp(buf, eofmark, CONFIG_RUN_ID_SIZE) != 0) { + if (!rioRead(&rdb, buf, RDB_EOF_MARK_SIZE) || memcmp(buf, eofmark, RDB_EOF_MARK_SIZE) != 0) { serverLog(LL_WARNING, "Replication stream EOF marker is broken"); loadingFailed = 1; } @@ -2677,6 +2679,7 @@ static void fullSyncWithPrimary(connection *conn) { /* Parse end offset response */ char *endoff_format = "$ENDOFF:%lld %40s %d %llu"; if (sscanf(err, endoff_format, &reploffset, primary_replid, &dbid, &rdb_client_id) != 4) { + serverLog(LL_WARNING, "Received unexpected $ENDOFF response: %s", err); goto error; } sdsfree(err); @@ -3723,7 +3726,7 @@ int cancelReplicationHandshake(int reconnect) { } /* Set replication to the specified primary address and port. */ -void replicationSetPrimary(char *ip, int port) { +void replicationSetPrimary(char *ip, int port, int full_sync_required) { int was_primary = server.primary_host == NULL; sdsfree(server.primary_host); @@ -3749,13 +3752,22 @@ void replicationSetPrimary(char *ip, int port) { * sync with new primary. */ cancelReplicationHandshake(0); + /* Before destroying our primary state, create a cached primary using * our own parameters, to later PSYNC with the new primary. */ - if (was_primary) { + if (was_primary && !full_sync_required) { replicationDiscardCachedPrimary(); replicationCachePrimaryUsingMyself(); } + /* If full sync is required, drop the cached primary. Doing so increases + * this replica node's election rank (delay) and reduces its chance of + * winning the election. If a replica requiring a full sync wins the + * election, it will flush valid data in the shard, causing data loss. */ + if (full_sync_required) { + replicationDiscardCachedPrimary(); + } + /* Fire the role change modules event. */ moduleFireServerEvent(VALKEYMODULE_EVENT_REPLICATION_ROLE_CHANGED, VALKEYMODULE_EVENT_REPLROLECHANGED_NOW_REPLICA, NULL); @@ -3893,7 +3905,7 @@ void replicaofCommand(client *c) { } /* There was no previous primary or the user specified a different one, * we can continue. */ - replicationSetPrimary(c->argv[1]->ptr, port); + replicationSetPrimary(c->argv[1]->ptr, port, 0); sds client = catClientInfoString(sdsempty(), c); serverLog(LL_NOTICE, "REPLICAOF %s:%d enabled (user request from '%s')", server.primary_host, server.primary_port, client); @@ -4159,6 +4171,8 @@ void replicationResurrectProvisionalPrimary(void) { memcpy(server.primary->replid, server.repl_provisional_primary.replid, CONFIG_RUN_ID_SIZE); server.primary->reploff = server.repl_provisional_primary.reploff; server.primary->read_reploff = server.repl_provisional_primary.read_reploff; + server.primary_repl_offset = server.primary->reploff; + memcpy(server.replid, server.primary->replid, sizeof(server.primary->replid)); establishPrimaryConnection(); } @@ -4353,9 +4367,9 @@ void waitaofCommand(client *c) { * waiting for replica acks. Never call it directly, call unblockClient() * instead. */ void unblockClientWaitingReplicas(client *c) { - listNode *ln = listSearchKey(server.clients_waiting_acks, c); - serverAssert(ln != NULL); - listDelNode(server.clients_waiting_acks, ln); + serverAssert(c->bstate.client_waiting_acks_list_node); + listDelNode(server.clients_waiting_acks, c->bstate.client_waiting_acks_list_node); + c->bstate.client_waiting_acks_list_node = NULL; updateStatsOnUnblock(c, 0, 0, 0); } @@ -4902,7 +4916,7 @@ void updateFailoverStatus(void) { server.target_replica_port); server.failover_state = FAILOVER_IN_PROGRESS; /* If timeout has expired force a failover if requested. */ - replicationSetPrimary(server.target_replica_host, server.target_replica_port); + replicationSetPrimary(server.target_replica_host, server.target_replica_port, 0); return; } else { /* Force was not requested, so timeout. */ @@ -4945,6 +4959,6 @@ void updateFailoverStatus(void) { serverLog(LL_NOTICE, "Failover target %s:%d is synced, failing over.", server.target_replica_host, server.target_replica_port); /* Designated replica is caught up, failover to it. */ - replicationSetPrimary(server.target_replica_host, server.target_replica_port); + replicationSetPrimary(server.target_replica_host, server.target_replica_port, 0); } } diff --git a/src/resp_parser.c b/src/resp_parser.c index 326766fc22..950d2227b7 100644 --- a/src/resp_parser.c +++ b/src/resp_parser.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2021, Redis Labs Ltd. + * Copyright (c) 2009-2021, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/resp_parser.h b/src/resp_parser.h index a099fa8a7d..1ff08f2734 100644 --- a/src/resp_parser.h +++ b/src/resp_parser.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Redis Labs Ltd. + * Copyright (c) 2021, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/rio.c b/src/rio.c index 5f8ae61a4e..b47f4c2c13 100644 --- a/src/rio.c +++ b/src/rio.c @@ -16,7 +16,7 @@ * ---------------------------------------------------------------------------- * * Copyright (c) 2009-2012, Pieter Noordhuis - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/rio.h b/src/rio.h index 698dcb66d1..ee0f27aa7e 100644 --- a/src/rio.h +++ b/src/rio.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2009-2012, Pieter Noordhuis - * Copyright (c) 2009-2019, Salvatore Sanfilippo + * Copyright (c) 2009-2019, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/sds.c b/src/sds.c index ba3362e88a..9878a6bf85 100644 --- a/src/sds.c +++ b/src/sds.c @@ -1,8 +1,7 @@ /* SDSLib 2.0 -- A C dynamic strings library * - * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2006-2015, Redis Ltd. * Copyright (c) 2015, Oran Agra - * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/sds.h b/src/sds.h index a12b8dd89e..e9c4a95f9a 100644 --- a/src/sds.h +++ b/src/sds.h @@ -1,8 +1,7 @@ /* SDSLib 2.0 -- A C dynamic strings library * - * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2006-2015, Redis Ltd. * Copyright (c) 2015, Oran Agra - * Copyright (c) 2015, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/sdsalloc.h b/src/sdsalloc.h index dfa8257ebd..19236af722 100644 --- a/src/sdsalloc.h +++ b/src/sdsalloc.h @@ -1,7 +1,6 @@ /* SDSLib 2.0 -- A C dynamic strings library * - * Copyright (c) 2006-2015, Salvatore Sanfilippo - * Copyright (c) 2015, Redis Labs, Inc + * Copyright (c) 2006-2015, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/sentinel.c b/src/sentinel.c index 7eee2562cc..84b75db716 100644 --- a/src/sentinel.c +++ b/src/sentinel.c @@ -1,6 +1,6 @@ /* Sentinel implementation * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -2087,7 +2087,7 @@ void rewriteConfigSentinelOption(struct rewriteConfigState *state) { ri = dictGetVal(de); replica_addr = ri->addr; - /* If primary_addr (obtained using sentinelGetCurrentMasterAddress() + /* If primary_addr (obtained using sentinelGetCurrentPrimaryAddress() * so it may be the address of the promoted replica) is equal to this * replica's address, a failover is in progress and the replica was * already successfully promoted. So as the address of this replica @@ -3688,7 +3688,7 @@ void sentinelCommand(client *c) { "DEBUG [ ...]", " Show a list of configurable time parameters and their values (milliseconds).", " Or update current configurable parameters values (one or more).", -"GET-MASTER-ADDR-BY-NAME ", +"GET-PRIMARY-ADDR-BY-NAME ", " Return the ip and port number of the primary with that name.", "FAILOVER ", " Manually failover a primary node without asking for agreement from other", @@ -3698,12 +3698,12 @@ void sentinelCommand(client *c) { " Sentinel state.", "INFO-CACHE ", " Return last cached INFO output from primaries and all its replicas.", -"IS-MASTER-DOWN-BY-ADDR ", +"IS-PRIMARY-DOWN-BY-ADDR ", " Check if the primary specified by ip:port is down from current Sentinel's", " point of view.", -"MASTER ", +"PRIMARY ", " Show the state and info of the specified primary.", -"MASTERS", +"PRIMARIES", " Show a list of monitored primaries and their state.", "MONITOR ", " Start monitoring a new primary with the specified name, ip, port and quorum.", @@ -3727,11 +3727,11 @@ NULL }; /* clang-format on */ addReplyHelp(c, help); - } else if (!strcasecmp(c->argv[1]->ptr, "masters")) { + } else if (!strcasecmp(c->argv[1]->ptr, "primaries") || !strcasecmp(c->argv[1]->ptr, "masters")) { /* SENTINEL PRIMARIES */ if (c->argc != 2) goto numargserr; addReplyDictOfValkeyInstances(c, sentinel.primaries); - } else if (!strcasecmp(c->argv[1]->ptr, "master")) { + } else if (!strcasecmp(c->argv[1]->ptr, "primary") || !strcasecmp(c->argv[1]->ptr, "master")) { /* SENTINEL PRIMARY */ sentinelValkeyInstance *ri; @@ -3755,7 +3755,8 @@ NULL } else if (!strcasecmp(c->argv[1]->ptr, "myid") && c->argc == 2) { /* SENTINEL MYID */ addReplyBulkCBuffer(c, sentinel.myid, CONFIG_RUN_ID_SIZE); - } else if (!strcasecmp(c->argv[1]->ptr, "is-master-down-by-addr")) { + } else if (!strcasecmp(c->argv[1]->ptr, "is-primary-down-by-addr") || + !strcasecmp(c->argv[1]->ptr, "is-master-down-by-addr")) { /* SENTINEL IS-PRIMARY-DOWN-BY-ADDR * * Arguments: @@ -3807,7 +3808,8 @@ NULL /* SENTINEL RESET */ if (c->argc != 3) goto numargserr; addReplyLongLong(c, sentinelResetPrimariesByPattern(c->argv[2]->ptr, SENTINEL_GENERATE_EVENT)); - } else if (!strcasecmp(c->argv[1]->ptr, "get-master-addr-by-name")) { + } else if (!strcasecmp(c->argv[1]->ptr, "get-primary-addr-by-name") || + !strcasecmp(c->argv[1]->ptr, "get-master-addr-by-name")) { /* SENTINEL GET-PRIMARY-ADDR-BY-NAME */ sentinelValkeyInstance *ri; diff --git a/src/server.c b/src/server.c index 0699a82e50..aae570623b 100644 --- a/src/server.c +++ b/src/server.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2016, Salvatore Sanfilippo + * Copyright (c) 2009-2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -131,10 +131,11 @@ void serverLogRaw(int level, const char *msg) { struct timeval tv; int role_char; pid_t pid = getpid(); + int daylight_active = atomic_load_explicit(&server.daylight_active, memory_order_relaxed); gettimeofday(&tv, NULL); struct tm tm; - nolocks_localtime(&tm, tv.tv_sec, server.timezone, server.daylight_active); + nolocks_localtime(&tm, tv.tv_sec, server.timezone, daylight_active); off = strftime(buf, sizeof(buf), "%d %b %Y %H:%M:%S.", &tm); snprintf(buf + off, sizeof(buf) - off, "%03d", (int)tv.tv_usec / 1000); if (server.sentinel_mode) { @@ -1091,7 +1092,7 @@ static inline void updateCachedTimeWithUs(int update_daylight_info, const long l struct tm tm; time_t ut = server.unixtime; localtime_r(&ut, &tm); - server.daylight_active = tm.tm_isdst; + atomic_store_explicit(&server.daylight_active, tm.tm_isdst, memory_order_relaxed); } } @@ -1330,9 +1331,13 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { /* Show information about connected clients */ if (!server.sentinel_mode) { run_with_period(5000) { - serverLog(LL_DEBUG, "%lu clients connected (%lu replicas), %zu bytes in use", + char hmem[64]; + size_t zmalloc_used = zmalloc_used_memory(); + bytesToHuman(hmem, sizeof(hmem), zmalloc_used); + + serverLog(LL_DEBUG, "Total: %lu clients connected (%lu replicas), %zu (%s) bytes in use", listLength(server.clients) - listLength(server.replicas), listLength(server.replicas), - zmalloc_used_memory()); + zmalloc_used, hmem); } } @@ -2786,7 +2791,8 @@ void initListeners(void) { listener->bindaddr = &server.unixsocket; listener->bindaddr_count = 1; listener->ct = connectionByType(CONN_TYPE_UNIX); - listener->priv = &server.unixsocketperm; /* Unix socket specified */ + listener->priv1 = &server.unixsocketperm; /* Unix socket specified */ + listener->priv2 = server.unixsocketgroup; /* Unix socket group specified */ } /* create all the configured listener, and add handler to start to accept */ @@ -3275,6 +3281,13 @@ void slowlogPushCurrentCommand(client *c, struct serverCommand *cmd, ustime_t du * arguments. */ robj **argv = c->original_argv ? c->original_argv : c->argv; int argc = c->original_argv ? c->original_argc : c->argc; + + /* If a script is currently running, the client passed in is a + * fake client. Or the client passed in is the original client + * if this is a EVAL or alike, doesn't matter. In this case, + * use the original client to get the client information. */ + c = scriptIsRunning() ? scriptGetCaller() : c; + slowlogPushEntryIfNeeded(c, argv, argc, duration); } @@ -3904,7 +3917,30 @@ int processCommand(client *c) { if (!server.cluster_enabled && c->capa & CLIENT_CAPA_REDIRECT && server.primary_host && !mustObeyClient(c) && (is_write_command || (is_read_command && !c->flag.readonly))) { - addReplyErrorSds(c, sdscatprintf(sdsempty(), "-REDIRECT %s:%d", server.primary_host, server.primary_port)); + if (server.failover_state == FAILOVER_IN_PROGRESS) { + /* During the FAILOVER process, when conditions are met (such as + * when the force time is reached or the primary and replica offsets + * are consistent), the primary actively becomes the replica and + * transitions to the FAILOVER_IN_PROGRESS state. + * + * After the primary becomes the replica, and after handshaking + * and other operations, it will eventually send the PSYNC FAILOVER + * command to the replica, then the replica will become the primary. + * This means that the upgrade of the replica to the primary is an + * asynchronous operation, which implies that during the + * FAILOVER_IN_PROGRESS state, there may be a period of time where + * both nodes are replicas. + * + * In this scenario, if a -REDIRECT is returned, the request will be + * redirected to the replica and then redirected back, causing back + * and forth redirection. To avoid this situation, during the + * FAILOVER_IN_PROGRESS state, we temporarily suspend the clients + * that need to be redirected until the replica truly becomes the primary, + * and then resume the execution. */ + blockPostponeClient(c); + } else { + addReplyErrorSds(c, sdscatprintf(sdsempty(), "-REDIRECT %s:%d", server.primary_host, server.primary_port)); + } return C_OK; } @@ -4006,6 +4042,12 @@ int processCommand(client *c) { return C_OK; } + /* Not allow several UNSUBSCRIBE commands executed under non-pubsub mode */ + if (!c->flag.pubsub && (c->cmd->proc == unsubscribeCommand || c->cmd->proc == sunsubscribeCommand || + c->cmd->proc == punsubscribeCommand)) { + rejectCommandFormat(c, "-NOSUB '%s' command executed not in subscribed mode", c->cmd->fullname); + return C_OK; + } /* Only allow commands with flag "t", such as INFO, REPLICAOF and so on, * when replica-serve-stale-data is no and we are a replica with a broken * link with primary. */ diff --git a/src/server.h b/src/server.h index ccdece20dd..3530f42718 100644 --- a/src/server.h +++ b/src/server.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1002,7 +1002,7 @@ typedef struct multiState { } multiState; /* This structure holds the blocking operation state for a client. - * The fields used depend on client->btype. */ + * The fields used depend on client->bstate.btype. */ typedef struct blockingState { /* Generic fields. */ blocking_type btype; /* Type of blocking op if CLIENT_BLOCKED. */ @@ -1010,6 +1010,15 @@ typedef struct blockingState { * is > timeout then the operation timed out. */ int unblock_on_nokey; /* Whether to unblock the client when at least one of the keys is deleted or does not exist anymore */ + union { + listNode *client_waiting_acks_list_node; /* list node in server.clients_waiting_acks list. */ + listNode *postponed_list_node; /* list node in server.postponed_clients */ + listNode *generic_blocked_list_node; /* generic placeholder for blocked clients utility lists. + Since a client cannot be blocked multiple times, we can assume + it will be held in only one extra utility list, so it is ok to maintain + a union of these listNode references. */ + }; + /* BLOCKED_LIST, BLOCKED_ZSET and BLOCKED_STREAM or any other Keys related blocking */ dict *keys; /* The keys we are blocked on */ @@ -1319,7 +1328,6 @@ typedef struct client { sds peerid; /* Cached peer ID. */ sds sockname; /* Cached connection target address. */ listNode *client_list_node; /* list node in client list */ - listNode *postponed_list_node; /* list node within the postponed list */ void *module_blocked_client; /* Pointer to the ValkeyModuleBlockedClient associated with this * client. This is set in case of module authentication before the * unblocked client is reprocessed to handle reply callbacks. */ @@ -1704,6 +1712,7 @@ struct valkeyServer { int bindaddr_count; /* Number of addresses in server.bindaddr[] */ char *bind_source_addr; /* Source address to bind on for outgoing connections */ char *unixsocket; /* UNIX socket path */ + char *unixsocketgroup; /* UNIX socket group */ unsigned int unixsocketperm; /* UNIX socket permission (see mode_t) */ connListener listeners[CONN_TYPE_MAX]; /* TCP/Unix/TLS even more types */ uint32_t socket_mark_id; /* ID for listen socket marking */ @@ -2114,7 +2123,7 @@ struct valkeyServer { /* time cache */ time_t unixtime; /* Unix time sampled every cron cycle. */ time_t timezone; /* Cached timezone. As set by tzset(). */ - int daylight_active; /* Currently in daylight saving time. */ + _Atomic int daylight_active; /* Currently in daylight saving time. */ mstime_t mstime; /* 'unixtime' in milliseconds. */ ustime_t ustime; /* 'unixtime' in microseconds. */ mstime_t cmd_time_snapshot; /* Time snapshot of the root execution nesting. */ @@ -2219,6 +2228,7 @@ struct valkeyServer { sds availability_zone; /* When run in a cloud environment we can configure the availability zone it is running in */ /* Local environment */ char *locale_collate; + char *debug_context; /* A free-form string that has no impact on server except being included in a crash report. */ }; #define MAX_KEYS_BUFFER 256 @@ -3020,7 +3030,7 @@ void replicationStartPendingFork(void); void replicationHandlePrimaryDisconnection(void); void replicationCachePrimary(client *c); void resizeReplicationBacklog(void); -void replicationSetPrimary(char *ip, int port); +void replicationSetPrimary(char *ip, int port, int full_sync_required); void replicationUnsetPrimary(void); void refreshGoodReplicasCount(void); int checkGoodReplicasStatus(void); @@ -3293,6 +3303,7 @@ void adjustOpenFilesLimit(void); void incrementErrorCount(const char *fullerr, size_t namelen); void closeListeningSockets(int unlink_unix_socket); void updateCachedTime(int update_daylight_info); +void bytesToHuman(char *s, size_t size, unsigned long long n); void enterExecutionUnit(int update_cached_time, long long us); void exitExecutionUnit(void); void resetServerStats(void); diff --git a/src/serverassert.c b/src/serverassert.c index 74a2b21fb2..1364cf700f 100644 --- a/src/serverassert.c +++ b/src/serverassert.c @@ -6,7 +6,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2021, Andy Pan and Redis Labs + * Copyright (c) 2021, Andy Pan and Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/serverassert.h b/src/serverassert.h index e257c854fb..02fbcd85b7 100644 --- a/src/serverassert.h +++ b/src/serverassert.h @@ -7,7 +7,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2006-2012, Salvatore Sanfilippo + * Copyright (c) 2006-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/setproctitle.c b/src/setproctitle.c index 019402348b..d65274625a 100644 --- a/src/setproctitle.c +++ b/src/setproctitle.c @@ -2,7 +2,7 @@ * setproctitle.c - Linux/Darwin setproctitle. * -------------------------------------------------------------------------- * Copyright (C) 2010 William Ahern - * Copyright (C) 2013 Salvatore Sanfilippo + * Copyright (C) 2013 Redis Ltd. * Copyright (C) 2013 Stam He * * Permission is hereby granted, free of charge, to any person obtaining a diff --git a/src/siphash.c b/src/siphash.c index a62d5c0616..7527176218 100644 --- a/src/siphash.c +++ b/src/siphash.c @@ -4,7 +4,7 @@ Copyright (c) 2012-2016 Jean-Philippe Aumasson Copyright (c) 2012-2014 Daniel J. Bernstein - Copyright (c) 2017 Salvatore Sanfilippo + Copyright (c) 2017 Redis Ltd. To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain @@ -16,7 +16,7 @@ ---------------------------------------------------------------------------- - This version was modified by Salvatore Sanfilippo + This version was modified by Redis Ltd. in the following ways: 1. We use SipHash 1-2. This is not believed to be as strong as the diff --git a/src/slowlog.c b/src/slowlog.c index 2bf8e0185e..68abaf8058 100644 --- a/src/slowlog.c +++ b/src/slowlog.c @@ -10,7 +10,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/slowlog.h b/src/slowlog.h index 2372b9f6ea..12d9097ffa 100644 --- a/src/slowlog.h +++ b/src/slowlog.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/socket.c b/src/socket.c index b2f8f1aaec..295569da00 100644 --- a/src/socket.c +++ b/src/socket.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Redis Labs + * Copyright (c) 2019, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/solarisfixes.h b/src/solarisfixes.h index b68e34b968..a1473a9423 100644 --- a/src/solarisfixes.h +++ b/src/solarisfixes.h @@ -1,6 +1,6 @@ /* Solaris specific fixes. * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/sort.c b/src/sort.c index bad86add3b..f027b0c321 100644 --- a/src/sort.c +++ b/src/sort.c @@ -1,6 +1,6 @@ /* SORT command and helper functions. * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/sparkline.c b/src/sparkline.c index f51eeb2a86..c5ff695d45 100644 --- a/src/sparkline.c +++ b/src/sparkline.c @@ -5,7 +5,7 @@ * * --------------------------------------------------------------------------- * - * Copyright(C) 2011-2014 Salvatore Sanfilippo + * Copyright(C) 2011-2014 Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/sparkline.h b/src/sparkline.h index 6025d2b98b..1db94ba47a 100644 --- a/src/sparkline.h +++ b/src/sparkline.h @@ -2,7 +2,7 @@ * * --------------------------------------------------------------------------- * - * Copyright(C) 2011-2014 Salvatore Sanfilippo + * Copyright(C) 2011-2014 Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/syncio.c b/src/syncio.c index 427d279d4c..63e9444ff8 100644 --- a/src/syncio.c +++ b/src/syncio.c @@ -1,6 +1,6 @@ /* Synchronous socket and file I/O operations useful across the core. * - * Copyright (c) 2009-2010, Salvatore Sanfilippo + * Copyright (c) 2009-2010, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/syscheck.c b/src/syscheck.c index 3c6252d846..d66e7f0e19 100644 --- a/src/syscheck.c +++ b/src/syscheck.c @@ -1,6 +1,5 @@ /* - * Copyright (c) 2022, Redis Ltd. - * Copyright (c) 2016, Salvatore Sanfilippo + * Copyright (c) 2016, 2022, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/t_hash.c b/src/t_hash.c index ae4c499f9d..9398dc3e3f 100644 --- a/src/t_hash.c +++ b/src/t_hash.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/t_list.c b/src/t_list.c index 845666b13c..f5df563e40 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/t_set.c b/src/t_set.c index 53ab37be03..b2aeec52e7 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/t_stream.c b/src/t_stream.c index d2a45dde06..12eb78067d 100644 --- a/src/t_stream.c +++ b/src/t_stream.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Salvatore Sanfilippo + * Copyright (c) 2017, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/t_string.c b/src/t_string.c index 19e19cf5d5..6e233e0959 100644 --- a/src/t_string.c +++ b/src/t_string.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/t_zset.c b/src/t_zset.c index 6cc93ea4d1..029acde4fa 100644 --- a/src/t_zset.c +++ b/src/t_zset.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * Copyright (c) 2009-2012, Pieter Noordhuis * All rights reserved. * @@ -2529,15 +2529,6 @@ static void zdiff(zsetopsrc *src, long setnum, zset *dstzset, size_t *maxelelen, } } -dictType setAccumulatorDictType = { - dictSdsHash, /* hash function */ - NULL, /* key dup */ - dictSdsKeyCompare, /* key compare */ - NULL, /* key destructor */ - NULL, /* val destructor */ - NULL /* allow to expand */ -}; - /* The zunionInterDiffGenericCommand() function is called in order to implement the * following commands: ZUNION, ZINTER, ZDIFF, ZUNIONSTORE, ZINTERSTORE, ZDIFFSTORE, * ZINTERCARD. @@ -2724,7 +2715,6 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in zuiClearIterator(&src[0]); } } else if (op == SET_OP_UNION) { - dict *accumulator = dictCreate(&setAccumulatorDictType); dictIterator *di; dictEntry *de, *existing; double score; @@ -2732,7 +2722,7 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in if (setnum) { /* Our union is at least as large as the largest set. * Resize the dictionary ASAP to avoid useless rehashing. */ - dictExpand(accumulator, zuiLength(&src[setnum - 1])); + dictExpand(dstzset->dict, zuiLength(&src[setnum - 1])); } /* Step 1: Create a dictionary of elements -> aggregated-scores @@ -2747,7 +2737,7 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in if (isnan(score)) score = 0; /* Search for this element in the accumulating dictionary. */ - de = dictAddRaw(accumulator, zuiSdsFromValue(&zval), &existing); + de = dictAddRaw(dstzset->dict, zuiSdsFromValue(&zval), &existing); /* If we don't have it, we need to create a new entry. */ if (!existing) { tmp = zuiNewSdsFromValue(&zval); @@ -2757,7 +2747,7 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in totelelen += sdslen(tmp); if (sdslen(tmp) > maxelelen) maxelelen = sdslen(tmp); /* Update the element with its initial score. */ - dictSetKey(accumulator, de, tmp); + dictSetKey(dstzset->dict, de, tmp); dictSetDoubleVal(de, score); } else { /* Update the score with the score of the new instance @@ -2774,21 +2764,15 @@ void zunionInterDiffGenericCommand(client *c, robj *dstkey, int numkeysIndex, in } /* Step 2: convert the dictionary into the final sorted set. */ - di = dictGetIterator(accumulator); - - /* We now are aware of the final size of the resulting sorted set, - * let's resize the dictionary embedded inside the sorted set to the - * right size, in order to save rehashing time. */ - dictExpand(dstzset->dict, dictSize(accumulator)); + di = dictGetIterator(dstzset->dict); while ((de = dictNext(di)) != NULL) { sds ele = dictGetKey(de); score = dictGetDoubleVal(de); znode = zslInsert(dstzset->zsl, score, ele); - dictAdd(dstzset->dict, ele, &znode->score); + dictSetVal(dstzset->dict, de, &znode->score); } dictReleaseIterator(di); - dictRelease(accumulator); } else if (op == SET_OP_DIFF) { zdiff(src, setnum, dstzset, &maxelelen, &totelelen); } else { diff --git a/src/testhelp.h b/src/testhelp.h index 07c88cecd8..9acfb7b0e1 100644 --- a/src/testhelp.h +++ b/src/testhelp.h @@ -8,7 +8,7 @@ * * ---------------------------------------------------------------------------- * - * Copyright (c) 2010-2012, Salvatore Sanfilippo + * Copyright (c) 2010-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/timeout.c b/src/timeout.c index 3084edf7f3..f5534e7e71 100644 --- a/src/timeout.c +++ b/src/timeout.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2009-2020, Salvatore Sanfilippo +/* Copyright (c) 2009-2020, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/tls.c b/src/tls.c index 1913d876fa..7b57fce6f4 100644 --- a/src/tls.c +++ b/src/tls.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Redis Labs + * Copyright (c) 2019, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/tracking.c b/src/tracking.c index 62d6121cfd..de1b34d850 100644 --- a/src/tracking.c +++ b/src/tracking.c @@ -1,6 +1,6 @@ /* tracking.c - Client side caching: keys tracking and invalidation * - * Copyright (c) 2019, Salvatore Sanfilippo + * Copyright (c) 2019, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/unix.c b/src/unix.c index 795b2db9f1..ddfd73465a 100644 --- a/src/unix.c +++ b/src/unix.c @@ -51,7 +51,8 @@ static int connUnixIsLocal(connection *conn) { static int connUnixListen(connListener *listener) { int fd; - mode_t *perm = (mode_t *)listener->priv; + mode_t *perm = (mode_t *)listener->priv1; + char *group = (char *)listener->priv2; if (listener->bindaddr_count == 0) return C_OK; @@ -61,7 +62,7 @@ static int connUnixListen(connListener *listener) { char *addr = listener->bindaddr[j]; unlink(addr); /* don't care if this fails */ - fd = anetUnixServer(server.neterr, addr, *perm, server.tcp_backlog); + fd = anetUnixServer(server.neterr, addr, *perm, server.tcp_backlog, group); if (fd == ANET_ERR) { serverLog(LL_WARNING, "Failed opening Unix socket: %s", server.neterr); exit(1); diff --git a/src/util.c b/src/util.c index 0d96cac6c3..66f62c9001 100644 --- a/src/util.c +++ b/src/util.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * Copyright (c) 2012, Twitter, Inc. * All rights reserved. * diff --git a/src/util.h b/src/util.h index d675f4c6cd..51eb38f0b4 100644 --- a/src/util.h +++ b/src/util.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/valkey-benchmark.c b/src/valkey-benchmark.c index 5fe707510f..cccc872cd1 100644 --- a/src/valkey-benchmark.c +++ b/src/valkey-benchmark.c @@ -1,6 +1,6 @@ /* Server benchmark utility. * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/valkey-check-aof.c b/src/valkey-check-aof.c index d3ebfb348c..bc71d366d5 100644 --- a/src/valkey-check-aof.c +++ b/src/valkey-check-aof.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2009-2012, Pieter Noordhuis - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/valkey-check-rdb.c b/src/valkey-check-rdb.c index 0b2fdbb666..cab6b2a414 100644 --- a/src/valkey-check-rdb.c +++ b/src/valkey-check-rdb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Salvatore Sanfilippo + * Copyright (c) 2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/valkey-cli.c b/src/valkey-cli.c index 91a7bf1b0d..61c1e62558 100644 --- a/src/valkey-cli.c +++ b/src/valkey-cli.c @@ -1,6 +1,6 @@ /* Server CLI (command line interface) * - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -4655,10 +4655,19 @@ static int clusterManagerSetSlotOwner(clusterManagerNode *owner, int slot, int d /* Get the hash for the values of the specified keys in *keys_reply for the * specified nodes *n1 and *n2, by calling DEBUG DIGEST-VALUE command * on both nodes. Every key with same name on both nodes but having different - * values will be added to the *diffs list. Return 0 in case of reply - * error. */ -static int -clusterManagerCompareKeysValues(clusterManagerNode *n1, clusterManagerNode *n2, redisReply *keys_reply, list *diffs) { + * values will be added to the *diffs list. + * + * DEBUG DIGEST-VALUE currently will only return two errors: + * 1. Unknown subcommand. This happened in older server versions. + * 2. DEBUG command not allowed. This happened when we disable enable-debug-command. + * + * Return 0 and set the error message in case of reply error. */ +static int clusterManagerCompareKeysValues(clusterManagerNode *n1, + clusterManagerNode *n2, + redisReply *keys_reply, + list *diffs, + char **n1_err, + char **n2_err) { size_t i, argc = keys_reply->elements + 2; static const char *hash_zero = "0000000000000000000000000000000000000000"; char **argv = zcalloc(argc * sizeof(char *)); @@ -4678,18 +4687,32 @@ clusterManagerCompareKeysValues(clusterManagerNode *n1, clusterManagerNode *n2, redisReply *r1 = NULL, *r2 = NULL; redisAppendCommandArgv(n1->context, argc, (const char **)argv, argv_len); success = (redisGetReply(n1->context, &_reply1) == REDIS_OK); - if (!success) goto cleanup; + if (!success) { + fprintf(stderr, "Error getting DIGEST-VALUE from %s:%d, error: %s\n", n1->ip, n1->port, n1->context->errstr); + exit(1); + } r1 = (redisReply *)_reply1; redisAppendCommandArgv(n2->context, argc, (const char **)argv, argv_len); success = (redisGetReply(n2->context, &_reply2) == REDIS_OK); - if (!success) goto cleanup; + if (!success) { + fprintf(stderr, "Error getting DIGEST-VALUE from %s:%d, error: %s\n", n2->ip, n2->port, n2->context->errstr); + exit(1); + } r2 = (redisReply *)_reply2; success = (r1->type != REDIS_REPLY_ERROR && r2->type != REDIS_REPLY_ERROR); if (r1->type == REDIS_REPLY_ERROR) { + if (n1_err != NULL) { + *n1_err = zmalloc((r1->len + 1) * sizeof(char)); + valkey_strlcpy(*n1_err, r1->str, r1->len + 1); + } CLUSTER_MANAGER_PRINT_REPLY_ERROR(n1, r1->str); success = 0; } if (r2->type == REDIS_REPLY_ERROR) { + if (n2_err != NULL) { + *n2_err = zmalloc((r2->len + 1) * sizeof(char)); + valkey_strlcpy(*n2_err, r2->str, r2->len + 1); + } CLUSTER_MANAGER_PRINT_REPLY_ERROR(n2, r2->str); success = 0; } @@ -4875,10 +4898,27 @@ static int clusterManagerMigrateKeysInSlot(clusterManagerNode *source, if (!do_replace) { clusterManagerLogWarn("*** Checking key values on " "both nodes...\n"); + char *source_err = NULL; + char *target_err = NULL; list *diffs = listCreate(); - success = clusterManagerCompareKeysValues(source, target, reply, diffs); + success = + clusterManagerCompareKeysValues(source, target, reply, diffs, &source_err, &target_err); if (!success) { clusterManagerLogErr("*** Value check failed!\n"); + const char *debug_not_allowed = "ERR DEBUG command not allowed."; + if ((source_err && !strncmp(source_err, debug_not_allowed, 30)) || + (target_err && !strncmp(target_err, debug_not_allowed, 30))) { + clusterManagerLogErr("DEBUG command is not allowed.\n" + "You can turn on the enable-debug-command option.\n" + "Or you can relaunch the command with --cluster-replace " + "option to force key overriding.\n"); + } else if (source_err || target_err) { + clusterManagerLogErr("DEBUG DIGEST-VALUE command is not supported.\n" + "You can relaunch the command with --cluster-replace " + "option to force key overriding.\n"); + } + if (source_err) zfree(source_err); + if (target_err) zfree(target_err); listRelease(diffs); goto next; } @@ -4990,11 +5030,18 @@ clusterManagerMoveSlot(clusterManagerNode *source, clusterManagerNode *target, i * the face of primary failures. However, while our client is blocked on * the primary awaiting replication, the primary might become a replica * for the same reason as mentioned above, resulting in the client being - * unblocked with the role change error. */ + * unblocked with the role change error. + * + * Another acceptable error can arise now that the primary pre-replicates + * `cluster setslot` commands to replicas while blocking the client on the + * primary. And during the block, the replicas might automatically migrate + * to another primary, resulting in the client being unblocked with the + * NOREPLICAS error. In this case, since the configuration will eventually + * propagate itself, we can safely ignore this error on the source node. */ success = clusterManagerSetSlot(source, target, slot, "node", err); if (!success && err) { const char *acceptable[] = {"ERR Please use SETSLOT only with masters.", - "ERR Please use SETSLOT only with primaries.", "UNBLOCKED"}; + "ERR Please use SETSLOT only with primaries.", "UNBLOCKED", "NOREPLICAS"}; for (size_t i = 0; i < sizeof(acceptable) / sizeof(acceptable[0]); i++) { if (!strncmp(*err, acceptable[i], strlen(acceptable[i]))) { zfree(*err); @@ -6361,10 +6408,7 @@ static int clusterManagerCheckCluster(int quiet) { clusterManagerOnError(err); result = 0; if (do_fix /* && result*/) { - dictType dtype = clusterManagerDictType; - dtype.keyDestructor = dictSdsDestructor; - dtype.valDestructor = dictListDestructor; - clusterManagerUncoveredSlots = dictCreate(&dtype); + clusterManagerUncoveredSlots = dictCreate(&clusterManagerLinkDictType); int fixed = clusterManagerFixSlotsCoverage(slots); if (fixed > 0) result = 1; } diff --git a/src/ziplist.c b/src/ziplist.c index 30efdc6573..820d5e499f 100644 --- a/src/ziplist.c +++ b/src/ziplist.c @@ -151,8 +151,7 @@ * ---------------------------------------------------------------------------- * * Copyright (c) 2009-2012, Pieter Noordhuis - * Copyright (c) 2009-2017, Salvatore Sanfilippo - * Copyright (c) 2020, Redis Labs, Inc + * Copyright (c) 2009-2017, 2020, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/ziplist.h b/src/ziplist.h index d6f41af338..a839c66c60 100644 --- a/src/ziplist.h +++ b/src/ziplist.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2009-2012, Pieter Noordhuis - * Copyright (c) 2009-2012, Salvatore Sanfilippo + * Copyright (c) 2009-2012, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/zipmap.c b/src/zipmap.c index 4498a34528..25d8ab8465 100644 --- a/src/zipmap.c +++ b/src/zipmap.c @@ -12,7 +12,7 @@ * * -------------------------------------------------------------------------- * - * Copyright (c) 2009-2010, Salvatore Sanfilippo + * Copyright (c) 2009-2010, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/zipmap.h b/src/zipmap.h index 3fd1ee0778..0112ac2b1f 100644 --- a/src/zipmap.h +++ b/src/zipmap.h @@ -4,7 +4,7 @@ * * -------------------------------------------------------------------------- * - * Copyright (c) 2009-2010, Salvatore Sanfilippo + * Copyright (c) 2009-2010, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/zmalloc.c b/src/zmalloc.c index afee8e07a0..7b19107b66 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -1,6 +1,6 @@ /* zmalloc - total amount of allocated memory aware version of malloc() * - * Copyright (c) 2009-2010, Salvatore Sanfilippo + * Copyright (c) 2009-2010, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/src/zmalloc.h b/src/zmalloc.h index 421cb9bcaa..f389b905c0 100644 --- a/src/zmalloc.h +++ b/src/zmalloc.h @@ -1,6 +1,6 @@ /* zmalloc - total amount of allocated memory aware version of malloc() * - * Copyright (c) 2009-2010, Salvatore Sanfilippo + * Copyright (c) 2009-2010, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tests/cluster/cluster.tcl b/tests/cluster/cluster.tcl index 0080501bf4..72a187e784 100644 --- a/tests/cluster/cluster.tcl +++ b/tests/cluster/cluster.tcl @@ -1,6 +1,6 @@ # Cluster-specific test functions. # -# Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com +# Copyright (C) 2014 Redis Ltd. # This software is released under the BSD License. See the COPYING file for # more information. diff --git a/tests/cluster/run.tcl b/tests/cluster/run.tcl index 710b123fac..d29f17db7d 100644 --- a/tests/cluster/run.tcl +++ b/tests/cluster/run.tcl @@ -1,4 +1,4 @@ -# Cluster test suite. Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com +# Cluster test suite. Copyright (C) 2014 Redis Ltd. # This software is released under the BSD License. See the COPYING file for # more information. diff --git a/tests/instances.tcl b/tests/instances.tcl index 782804ddae..5cc96b0edb 100644 --- a/tests/instances.tcl +++ b/tests/instances.tcl @@ -3,7 +3,7 @@ # basic capabilities for spawning and handling N parallel Server / Sentinel # instances. # -# Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com +# Copyright (C) 2014 Redis Ltd. # This software is released under the BSD License. See the COPYING file for # more information. @@ -20,6 +20,7 @@ set ::verbose 0 set ::valgrind 0 set ::tls 0 set ::tls_module 0 +set ::io_threads 0 set ::pause_on_error 0 set ::dont_clean 0 set ::simulate_error 0 @@ -107,6 +108,11 @@ proc spawn_instance {type base_port count {conf {}} {base_conf_file ""}} { puts $cfg "port $port" } + if {$::io_threads} { + puts $cfg "io-threads 2" + puts $cfg "events-per-io-thread 0" + } + if {$::log_req_res} { puts $cfg "req-res-logfile stdout.reqres" } @@ -297,6 +303,8 @@ proc parse_options {} { if {$opt eq {--tls-module}} { set ::tls_module 1 } + } elseif {$opt eq {--io-threads}} { + set ::io_threads 1 } elseif {$opt eq {--config}} { set val2 [lindex $::argv [expr $j+2]] dict set ::global_config $val $val2 @@ -319,6 +327,7 @@ proc parse_options {} { puts "--valgrind Run with valgrind." puts "--tls Run tests in TLS mode." puts "--tls-module Run tests in TLS mode with Valkey module." + puts "--io-threads Run tests with IO threads." puts "--host Use hostname instead of 127.0.0.1." puts "--config Extra config argument(s)." puts "--fast-fail Exit immediately once the first test fails." diff --git a/tests/integration/dual-channel-replication.tcl b/tests/integration/dual-channel-replication.tcl index f72fd59de7..da5e521dd2 100644 --- a/tests/integration/dual-channel-replication.tcl +++ b/tests/integration/dual-channel-replication.tcl @@ -23,9 +23,14 @@ proc get_client_id_by_last_cmd {r cmd} { return $client_id } +# Wait until the process enters a paused state, then resume the process. proc wait_and_resume_process idx { set pid [srv $idx pid] - wait_for_log_messages $idx {"*Process is about to stop.*"} 0 2000 1 + wait_for_condition 50 1000 { + [string match "T*" [exec ps -o state= -p $pid]] + } else { + fail "Process $pid didn't stop, current state is [exec ps -o state= -p $pid]" + } resume_process $pid } @@ -315,13 +320,12 @@ start_server {tags {"dual-channel-replication external:skip"}} { } $replica1 replicaof no one - $primary set key3 val3 - + test "Test replica's buffer limit reached" { $primary config set repl-diskless-sync-delay 0 - $primary config set rdb-key-save-delay 500 + $primary config set rdb-key-save-delay 10000 # At this point we have about 10k keys in the db, - # We expect that the next full sync will take 5 seconds (10k*500)ms + # We expect that the next full sync will take 100 seconds (10k*10000)ms # It will give us enough time to fill the replica buffer. $replica1 config set dual-channel-replication-enabled yes $replica1 config set client-output-buffer-limit "replica 16383 16383 0" @@ -343,19 +347,25 @@ start_server {tags {"dual-channel-replication external:skip"}} { } assert {[s -2 replicas_replication_buffer_size] <= 16385*2} - # Wait for sync to succeed + # Primary replication buffer should grow wait_for_condition 50 1000 { - [status $replica1 master_link_status] == "up" + [status $primary mem_total_replication_buffers] >= 81915 } else { - fail "Replica is not synced" + fail "Primary should take the load" } - wait_for_value_to_propegate_to_replica $primary $replica1 "key3" } $replica1 replicaof no one $replica1 config set client-output-buffer-limit "replica 256mb 256mb 0"; # remove repl buffer limitation + $primary config set rdb-key-save-delay 0 - $primary set key4 val4 + wait_for_condition 500 1000 { + [s 0 rdb_bgsave_in_progress] eq 0 + } else { + fail "can't kill rdb child" + } + + $primary set key3 val3 test "dual-channel-replication fails when primary diskless disabled" { set cur_psync [status $primary sync_partial_ok] @@ -370,7 +380,7 @@ start_server {tags {"dual-channel-replication external:skip"}} { } else { fail "Replica is not synced" } - wait_for_value_to_propegate_to_replica $primary $replica1 "key4" + wait_for_value_to_propegate_to_replica $primary $replica1 "key3" # Verify that we did not use dual-channel-replication sync assert {[status $primary sync_partial_ok] == $cur_psync} @@ -687,19 +697,20 @@ start_server {tags {"dual-channel-replication external:skip"}} { set replica_log [srv 0 stdout] set replica_pid [srv 0 pid] - set load_handle0 [start_write_load $primary_host $primary_port 20] - set load_handle1 [start_write_load $primary_host $primary_port 20] - set load_handle2 [start_write_load $primary_host $primary_port 20] + set load_handle0 [start_write_load $primary_host $primary_port 60] + set load_handle1 [start_write_load $primary_host $primary_port 60] + set load_handle2 [start_write_load $primary_host $primary_port 60] $replica config set dual-channel-replication-enabled yes $replica config set loglevel debug $replica config set repl-timeout 10 + $primary config set repl-backlog-size 1mb test "Test dual-channel-replication primary gets cob overrun before established psync" { # Pause primary main process after fork $primary debug pause-after-fork 1 $replica replicaof $primary_host $primary_port - wait_for_log_messages 0 {"*Done loading RDB*"} 0 2000 1 + wait_for_log_messages 0 {"*Done loading RDB*"} 0 1000 10 # At this point rdb is loaded but psync hasn't been established yet. # Pause the replica so the primary main process will wake up while the @@ -707,14 +718,14 @@ start_server {tags {"dual-channel-replication external:skip"}} { pause_process $replica_pid wait_and_resume_process -1 $primary debug pause-after-fork 0 - wait_for_log_messages -1 {"*Client * closed * for overcoming of output buffer limits.*"} $loglines 2000 1 + wait_for_log_messages -1 {"*Client * closed * for overcoming of output buffer limits.*"} $loglines 1000 10 wait_for_condition 50 100 { [string match {*replicas_waiting_psync:0*} [$primary info replication]] } else { fail "Primary did not free repl buf block after sync failure" } resume_process $replica_pid - set res [wait_for_log_messages -1 {"*Unable to partial resync with replica * for lack of backlog*"} $loglines 20000 1] + set res [wait_for_log_messages -1 {"*Unable to partial resync with replica * for lack of backlog*"} $loglines 2000 10] set loglines [lindex $res 1] } $replica replicaof no one @@ -890,6 +901,7 @@ start_server {tags {"dual-channel-replication external:skip"}} { $primary debug log "killing replica main connection" set replica_main_conn_id [get_client_id_by_last_cmd $primary "psync"] assert {$replica_main_conn_id != ""} + set loglines [count_log_lines -1] $primary client kill id $replica_main_conn_id # Wait for primary to abort the sync wait_for_condition 50 1000 { @@ -897,11 +909,7 @@ start_server {tags {"dual-channel-replication external:skip"}} { } else { fail "Primary did not free repl buf block after sync failure" } - wait_for_condition 1000 10 { - [s -1 rdb_last_bgsave_status] eq "err" - } else { - fail "bgsave did not stop in time" - } + wait_for_log_messages -1 {"*Background RDB transfer error*"} $loglines 1000 10 } test "Test dual channel replication slave of no one after main conn kill" { @@ -924,17 +932,13 @@ start_server {tags {"dual-channel-replication external:skip"}} { fail "replica didn't start sync session in time" } - $primary debug log "killing replica rdb connection" set replica_rdb_channel_id [get_client_id_by_last_cmd $primary "sync"] + $primary debug log "killing replica rdb connection $replica_rdb_channel_id" assert {$replica_rdb_channel_id != ""} + set loglines [count_log_lines -1] $primary client kill id $replica_rdb_channel_id # Wait for primary to abort the sync - wait_for_condition 1000 10 { - [s -1 rdb_bgsave_in_progress] eq 0 && - [s -1 rdb_last_bgsave_status] eq "err" - } else { - fail "Primary should abort sync" - } + wait_for_log_messages -1 {"*Background RDB transfer error*"} $loglines 1000 10 } test "Test dual channel replication slave of no one after rdb conn kill" { @@ -963,6 +967,7 @@ start_server {tags {"dual-channel-replication external:skip"}} { $primary debug log "killing replica rdb connection $replica_rdb_channel_id" $primary client kill id $replica_rdb_channel_id # Wait for primary to abort the sync + wait_and_resume_process 0 wait_for_condition 10000000 10 { [s -1 rdb_bgsave_in_progress] eq 0 && [string match {*replicas_waiting_psync:0*} [$primary info replication]] @@ -972,7 +977,6 @@ start_server {tags {"dual-channel-replication external:skip"}} { # Verify primary reject replconf set-rdb-client-id set res [catch {$primary replconf set-rdb-client-id $replica_rdb_channel_id} err] assert [string match *ERR* $err] - wait_and_resume_process 0 } stop_write_load $load_handle } @@ -989,9 +993,9 @@ start_server {tags {"dual-channel-replication external:skip"}} { $primary config set loglevel debug $primary config set repl-diskless-sync-delay 0; # don't wait for other replicas - # Generating RDB will cost 5s(10000 * 0.0001s) + # Generating RDB will cost 100s $primary debug populate 10000 primary 1 - $primary config set rdb-key-save-delay 100 + $primary config set rdb-key-save-delay 10000 start_server {} { set replica_1 [srv 0 client] @@ -1023,11 +1027,6 @@ start_server {tags {"dual-channel-replication external:skip"}} { } $replica_2 replicaof $primary_host $primary_port wait_for_log_messages -2 {"*Current BGSAVE has socket target. Waiting for next BGSAVE for SYNC*"} $loglines 100 1000 - $primary config set rdb-key-save-delay 0 - # Verify second replica needed new session - wait_for_sync $replica_2 - assert {[s -2 sync_partial_ok] eq 2} - assert {[s -2 sync_full] eq 2} } stop_write_load $load_handle } @@ -1045,9 +1044,9 @@ start_server {tags {"dual-channel-replication external:skip"}} { $primary config set loglevel debug $primary config set repl-diskless-sync-delay 5; # allow catch failed sync before retry - # Generating RDB will cost 5s(10000 * 0.0001s) + # Generating RDB will cost 100 sec to generate $primary debug populate 10000 primary 1 - $primary config set rdb-key-save-delay 100 + $primary config set rdb-key-save-delay 10000 start_server {} { set replica [srv 0 client] @@ -1058,8 +1057,8 @@ start_server {tags {"dual-channel-replication external:skip"}} { $replica config set dual-channel-replication-enabled yes $replica config set loglevel debug $replica config set repl-timeout 10 + set load_handle [start_one_key_write_load $primary_host $primary_port 100 "mykey"] test "Replica recover rdb-connection killed" { - set load_handle [start_one_key_write_load $primary_host $primary_port 100 "mykey"] $replica replicaof $primary_host $primary_port # Wait for sync session to start wait_for_condition 500 1000 { @@ -1073,6 +1072,7 @@ start_server {tags {"dual-channel-replication external:skip"}} { $primary debug log "killing replica rdb connection" set replica_rdb_channel_id [get_client_id_by_last_cmd $primary "sync"] assert {$replica_rdb_channel_id != ""} + set loglines [count_log_lines -1] $primary client kill id $replica_rdb_channel_id # Wait for primary to abort the sync wait_for_condition 50 1000 { @@ -1080,24 +1080,23 @@ start_server {tags {"dual-channel-replication external:skip"}} { } else { fail "Primary did not free repl buf block after sync failure" } - wait_for_condition 1000 10 { - [s -1 rdb_last_bgsave_status] eq "err" - } else { - fail "bgsave did not stop in time" - } + wait_for_log_messages -1 {"*Background RDB transfer error*"} $loglines 1000 10 # Replica should retry - verify_replica_online $primary 0 500 - stop_write_load $load_handle - wait_for_condition 1000 100 { - [s -1 master_repl_offset] eq [s master_repl_offset] + wait_for_condition 500 1000 { + [string match "*slave*,state=wait_bgsave*,type=rdb-channel*" [$primary info replication]] && + [string match "*slave*,state=bg_transfer*,type=main-channel*" [$primary info replication]] && + [s -1 rdb_bgsave_in_progress] eq 1 } else { - fail "Replica offset didn't catch up with the primary after too long time" + fail "replica didn't retry after connection close" } } $replica replicaof no one - + wait_for_condition 500 1000 { + [s -1 rdb_bgsave_in_progress] eq 0 + } else { + fail "Primary should abort sync" + } test "Replica recover main-connection killed" { - set load_handle [start_one_key_write_load $primary_host $primary_port 100 "mykey"] $replica replicaof $primary_host $primary_port # Wait for sync session to start wait_for_condition 500 1000 { @@ -1111,6 +1110,7 @@ start_server {tags {"dual-channel-replication external:skip"}} { $primary debug log "killing replica main connection" set replica_main_conn_id [get_client_id_by_last_cmd $primary "sync"] assert {$replica_main_conn_id != ""} + set loglines [count_log_lines -1] $primary client kill id $replica_main_conn_id # Wait for primary to abort the sync wait_for_condition 50 1000 { @@ -1118,19 +1118,16 @@ start_server {tags {"dual-channel-replication external:skip"}} { } else { fail "Primary did not free repl buf block after sync failure" } - wait_for_condition 1000 10 { - [s -1 rdb_last_bgsave_status] eq "err" - } else { - fail "bgsave did not stop in time" - } + wait_for_log_messages -1 {"*Background RDB transfer error*"} $loglines 1000 10 # Replica should retry - verify_replica_online $primary 0 500 - stop_write_load $load_handle - wait_for_condition 1000 100 { - [s -1 master_repl_offset] eq [s master_repl_offset] + wait_for_condition 500 1000 { + [string match "*slave*,state=wait_bgsave*,type=rdb-channel*" [$primary info replication]] && + [string match "*slave*,state=bg_transfer*,type=main-channel*" [$primary info replication]] && + [s -1 rdb_bgsave_in_progress] eq 1 } else { - fail "Replica offset didn't catch up with the primary after too long time" - } + fail "replica didn't retry after connection close" + } } + stop_write_load $load_handle } } diff --git a/tests/integration/replica-redirect.tcl b/tests/integration/replica-redirect.tcl index 0db51dd3ff..b62ce4ca3b 100644 --- a/tests/integration/replica-redirect.tcl +++ b/tests/integration/replica-redirect.tcl @@ -1,14 +1,16 @@ start_server {tags {needs:repl external:skip}} { start_server {} { + set primary [srv -1 client] set primary_host [srv -1 host] set primary_port [srv -1 port] + set primary_pid [srv -1 pid] + + set replica_host [srv 0 host] + set replica_port [srv 0 port] + set replica_pid [srv 0 pid] r replicaof $primary_host $primary_port - wait_for_condition 50 100 { - [s 0 master_link_status] eq {up} - } else { - fail "Replicas not replicating from primary" - } + wait_replica_online $primary test {replica allow read command by default} { r get foo @@ -32,5 +34,42 @@ start_server {tags {needs:repl external:skip}} { r readonly r get foo } {} + + test {client paused during failover-in-progress} { + pause_process $replica_pid + # replica will never acknowledge this write + r -1 set foo bar + r -1 failover to $replica_host $replica_port TIMEOUT 100 FORCE + + # Wait for primary to give up on sync attempt and start failover + wait_for_condition 50 100 { + [s -1 master_failover_state] == "failover-in-progress" + } else { + fail "Failover from primary to replica did not timeout" + } + + set rd [valkey_deferring_client -1] + $rd client capa redirect + assert_match "OK" [$rd read] + $rd set foo bar + + # Client paused during failover-in-progress, see more details in PR #871 + wait_for_blocked_clients_count 1 100 10 -1 + + resume_process $replica_pid + + # Wait for failover to end + wait_for_condition 50 100 { + [s -1 master_failover_state] == "no-failover" + } else { + fail "Failover from primary to replica did not finish" + } + + assert_match *master* [r role] + assert_match *slave* [r -1 role] + + assert_error "REDIRECT $replica_host:$replica_port" {$rd read} + $rd close + } } } diff --git a/tests/integration/shutdown.tcl b/tests/integration/shutdown.tcl index 9949afe27c..5cfd51262e 100644 --- a/tests/integration/shutdown.tcl +++ b/tests/integration/shutdown.tcl @@ -19,8 +19,8 @@ proc fill_up_os_socket_send_buffer_for_repl {idx} { foreach how {sigterm shutdown} { test "Shutting down master waits for replica to catch up ($how)" { - start_server {overrides {save ""}} { - start_server {overrides {save ""}} { + start_server {overrides {save "" repl-backlog-size 1MB}} { + start_server {overrides {save "" repl-backlog-size 1MB}} { set master [srv -1 client] set master_host [srv -1 host] set master_port [srv -1 port] @@ -85,8 +85,8 @@ foreach how {sigterm shutdown} { } test {Shutting down master waits for replica timeout} { - start_server {overrides {save ""}} { - start_server {overrides {save ""}} { + start_server {overrides {save "" repl-backlog-size 1MB}} { + start_server {overrides {save "" repl-backlog-size 1MB}} { set master [srv -1 client] set master_host [srv -1 host] set master_port [srv -1 port] @@ -134,8 +134,8 @@ test {Shutting down master waits for replica timeout} { } {} {repl external:skip} test "Shutting down master waits for replica then fails" { - start_server {overrides {save ""}} { - start_server {overrides {save ""}} { + start_server {overrides {save "" repl-backlog-size 1MB}} { + start_server {overrides {save "" repl-backlog-size 1MB}} { set master [srv -1 client] set master_host [srv -1 host] set master_port [srv -1 port] @@ -193,8 +193,8 @@ test "Shutting down master waits for replica then fails" { } {} {repl external:skip} test "Shutting down master waits for replica then aborted" { - start_server {overrides {save ""}} { - start_server {overrides {save ""}} { + start_server {overrides {save "" repl-backlog-size 1MB}} { + start_server {overrides {save "" repl-backlog-size 1MB}} { set master [srv -1 client] set master_host [srv -1 host] set master_port [srv -1 port] diff --git a/tests/modules/basics.c b/tests/modules/basics.c index eda0fdb899..36f88becbe 100644 --- a/tests/modules/basics.c +++ b/tests/modules/basics.c @@ -2,7 +2,7 @@ * * ----------------------------------------------------------------------------- * - * Copyright (c) 2016, Salvatore Sanfilippo + * Copyright (c) 2016, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tests/modules/hooks.c b/tests/modules/hooks.c index 72e2f40ff0..ef63846669 100644 --- a/tests/modules/hooks.c +++ b/tests/modules/hooks.c @@ -2,7 +2,7 @@ * * ----------------------------------------------------------------------------- * - * Copyright (c) 2019, Salvatore Sanfilippo + * Copyright (c) 2019, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tests/modules/propagate.c b/tests/modules/propagate.c index bd7f1987ba..b3cd279e5a 100644 --- a/tests/modules/propagate.c +++ b/tests/modules/propagate.c @@ -9,7 +9,7 @@ * * ----------------------------------------------------------------------------- * - * Copyright (c) 2019, Salvatore Sanfilippo + * Copyright (c) 2019, Redis Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/tests/sentinel/run.tcl b/tests/sentinel/run.tcl index d5471e13e0..9cbb189bed 100644 --- a/tests/sentinel/run.tcl +++ b/tests/sentinel/run.tcl @@ -1,4 +1,4 @@ -# Sentinel test suite. Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com +# Sentinel test suite. Copyright (C) 2014 Redis Ltd. # This software is released under the BSD License. See the COPYING file for # more information. diff --git a/tests/sentinel/tests/00-base.tcl b/tests/sentinel/tests/00-base.tcl index c9f23a5c34..33e590ab52 100644 --- a/tests/sentinel/tests/00-base.tcl +++ b/tests/sentinel/tests/00-base.tcl @@ -57,8 +57,8 @@ test "SENTINEL PENDING-SCRIPTS returns the information about pending scripts" { } test "SENTINEL PRIMARIES returns a list of monitored primaries" { - assert_match "*mymaster*" [S 0 SENTINEL MASTERS] - assert_morethan_equal [llength [S 0 SENTINEL MASTERS]] 1 + assert_match "*mymaster*" [S 0 SENTINEL PRIMARIES] + assert_morethan_equal [llength [S 0 SENTINEL PRIMARIES]] 1 } test "SENTINEL SENTINELS returns a list of sentinel instances" { @@ -66,7 +66,7 @@ test "SENTINEL SENTINELS returns a list of sentinel instances" { } test "SENTINEL SLAVES returns a list of the monitored replicas" { - assert_morethan_equal [llength [S 0 SENTINEL SLAVES mymaster]] 1 + assert_morethan_equal [llength [S 0 SENTINEL REPLICAS mymaster]] 1 } test "SENTINEL SIMULATE-FAILURE HELP list supported flags" { @@ -77,20 +77,20 @@ test "SENTINEL SIMULATE-FAILURE HELP list supported flags" { test "Basic failover works if the primary is down" { set old_port [RPort $master_id] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] assert {[lindex $addr 1] == $old_port} kill_instance valkey $master_id foreach_sentinel_id id { S $id sentinel debug ping-period 500 S $id sentinel debug ask-period 500 wait_for_condition 1000 100 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + [lindex [S $id SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] 1] != $old_port } else { fail "At least one Sentinel did not receive failover info" } } restart_instance valkey $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] set master_id [get_instance_id_by_port valkey [lindex $addr 1]] } @@ -123,12 +123,12 @@ test "ODOWN is not possible without N (quorum) Sentinels reports" { S $id SENTINEL SET mymaster quorum [expr $sentinels+1] } set old_port [RPort $master_id] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] assert {[lindex $addr 1] == $old_port} kill_instance valkey $master_id # Make sure failover did not happened. - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] assert {[lindex $addr 1] == $old_port} restart_instance valkey $master_id } @@ -147,7 +147,7 @@ test "Failover is not possible without majority agreement" { kill_instance valkey $master_id # Make sure failover did not happened. - set addr [S $quorum SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S $quorum SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] assert {[lindex $addr 1] == $old_port} restart_instance valkey $master_id @@ -165,9 +165,9 @@ test "Failover works if we configure for absolute agreement" { # Wait for Sentinels to monitor the master again foreach_sentinel_id id { wait_for_condition 1000 100 { - [dict get [S $id SENTINEL MASTER mymaster] info-refresh] < 100000 + [dict get [S $id SENTINEL PRIMARY mymaster] info-refresh] < 100000 } else { - fail "At least one Sentinel is not monitoring the master" + fail "At least one Sentinel is not monitoring the primary" } } @@ -175,13 +175,13 @@ test "Failover works if we configure for absolute agreement" { foreach_sentinel_id id { wait_for_condition 1000 100 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + [lindex [S $id SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] 1] != $old_port } else { fail "At least one Sentinel did not receive failover info" } } restart_instance valkey $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] set master_id [get_instance_id_by_port valkey [lindex $addr 1]] # Set the min ODOWN agreement back to strict majority. @@ -208,3 +208,13 @@ test "SENTINEL RESET can resets the primary" { assert_equal 0 $res2 assert_equal 0 $res3 } + +test "SENTINEL IS-PRIMARY-DOWN-BY-ADDR checks if the primary is down" { + set sentinel_id [S 0 SENTINEL MYID] + set master_ip_port [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] + set master_ip [lindex $master_ip_port 0] + set master_port [lindex $master_ip_port 1] + set result [S 0 SENTINEL IS-PRIMARY-DOWN-BY-ADDR $master_ip $master_port 50 $sentinel_id] + assert_equal $sentinel_id [lindex $result 1] + assert_equal {50} [lindex $result 2] +} diff --git a/tests/sentinel/tests/01-conf-update.tcl b/tests/sentinel/tests/01-conf-update.tcl index e8550e9e33..a9a2e9e062 100644 --- a/tests/sentinel/tests/01-conf-update.tcl +++ b/tests/sentinel/tests/01-conf-update.tcl @@ -4,7 +4,7 @@ source "../tests/includes/init-tests.tcl" test "We can failover with Sentinel 1 crashed" { set old_port [RPort $master_id] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] assert {[lindex $addr 1] == $old_port} # Crash Sentinel 1 @@ -14,21 +14,21 @@ test "We can failover with Sentinel 1 crashed" { foreach_sentinel_id id { if {$id != 1} { wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + [lindex [S $id SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] 1] != $old_port } else { fail "Sentinel $id did not receive failover info" } } } restart_instance valkey $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] set master_id [get_instance_id_by_port valkey [lindex $addr 1]] } test "After Sentinel 1 is restarted, its config gets updated" { restart_instance sentinel 1 wait_for_condition 1000 50 { - [lindex [S 1 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + [lindex [S 1 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] 1] != $old_port } else { fail "Restarted Sentinel did not receive failover info" } diff --git a/tests/sentinel/tests/02-slaves-reconf.tcl b/tests/sentinel/tests/02-replicas-reconf.tcl similarity index 88% rename from tests/sentinel/tests/02-slaves-reconf.tcl rename to tests/sentinel/tests/02-replicas-reconf.tcl index 7fb2e615a9..4bf2a35d26 100644 --- a/tests/sentinel/tests/02-slaves-reconf.tcl +++ b/tests/sentinel/tests/02-replicas-reconf.tcl @@ -29,18 +29,18 @@ proc 02_crash_and_failover {} { uplevel 1 { test "Crash the primary and force a failover" { set old_port [RPort $master_id] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] assert {[lindex $addr 1] == $old_port} kill_instance valkey $master_id foreach_sentinel_id id { wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + [lindex [S $id SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] 1] != $old_port } else { fail "At least one Sentinel did not receive failover info" } } restart_instance valkey $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] set master_id [get_instance_id_by_port valkey [lindex $addr 1]] } } @@ -74,7 +74,7 @@ test "Wait for failover to end" { while {$inprogress} { set inprogress 0 foreach_sentinel_id id { - if {[dict exists [S $id SENTINEL MASTER mymaster] failover-state]} { + if {[dict exists [S $id SENTINEL PRIMARY mymaster] failover-state]} { incr inprogress } } diff --git a/tests/sentinel/tests/03-runtime-reconf.tcl b/tests/sentinel/tests/03-runtime-reconf.tcl index 46043adf1f..ac51aeecb3 100644 --- a/tests/sentinel/tests/03-runtime-reconf.tcl +++ b/tests/sentinel/tests/03-runtime-reconf.tcl @@ -50,7 +50,7 @@ proc verify_sentinel_connect_replicas {id} { proc wait_for_sentinels_connect_servers { {is_connect 1} } { foreach_sentinel_id id { wait_for_condition 1000 50 { - [string match "*disconnected*" [dict get [S $id SENTINEL MASTER mymaster] flags]] != $is_connect + [string match "*disconnected*" [dict get [S $id SENTINEL PRIMARY mymaster] flags]] != $is_connect } else { fail "At least some sentinel can't connect to master" } @@ -80,7 +80,7 @@ test "Sentinels (re)connection following SENTINEL SET myprimary auth-pass" { # Verify sentinel that restarted failed to connect master wait_for_condition 100 50 { - [string match "*disconnected*" [dict get [S $sent2re SENTINEL MASTER mymaster] flags]] != 0 + [string match "*disconnected*" [dict get [S $sent2re SENTINEL PRIMARY mymaster] flags]] != 0 } else { fail "Expected to be disconnected from master due to wrong password" } @@ -124,21 +124,21 @@ test "Sentinels (re)connection following primary ACL change" { # Verify sentinel that restarted failed to reconnect master wait_for_condition 100 50 { - [string match "*disconnected*" [dict get [S $sent2re SENTINEL MASTER mymaster] flags]] != 0 + [string match "*disconnected*" [dict get [S $sent2re SENTINEL PRIMARY mymaster] flags]] != 0 } else { fail "Expected: Restarted sentinel to be disconnected from master due to obsolete password" } # Verify sentinel with updated password managed to connect (wait for sentinelTimer to reconnect) wait_for_condition 100 50 { - [string match "*disconnected*" [dict get [S $sent2up SENTINEL MASTER mymaster] flags]] == 0 + [string match "*disconnected*" [dict get [S $sent2up SENTINEL PRIMARY mymaster] flags]] == 0 } else { fail "Expected: Sentinel to be connected to master" } # Verify sentinel untouched gets failed to connect master wait_for_condition 100 50 { - [string match "*disconnected*" [dict get [S $sent2un SENTINEL MASTER mymaster] flags]] != 0 + [string match "*disconnected*" [dict get [S $sent2un SENTINEL PRIMARY mymaster] flags]] != 0 } else { fail "Expected: Sentinel to be disconnected from master due to obsolete password" } @@ -164,7 +164,7 @@ test "Sentinels (re)connection following primary ACL change" { test "Set parameters in normal case" { - set info [S 0 SENTINEL master mymaster] + set info [S 0 SENTINEL primary mymaster] set origin_quorum [dict get $info quorum] set origin_down_after_milliseconds [dict get $info down-after-milliseconds] set update_quorum [expr $origin_quorum+1] @@ -173,7 +173,7 @@ test "Set parameters in normal case" { assert_equal [S 0 SENTINEL SET mymaster quorum $update_quorum] "OK" assert_equal [S 0 SENTINEL SET mymaster down-after-milliseconds $update_down_after_milliseconds] "OK" - set update_info [S 0 SENTINEL master mymaster] + set update_info [S 0 SENTINEL primary mymaster] assert {[dict get $update_info quorum] != $origin_quorum} assert {[dict get $update_info down-after-milliseconds] != $origin_down_after_milliseconds} @@ -184,13 +184,13 @@ test "Set parameters in normal case" { test "Set parameters in normal case with bad format" { - set info [S 0 SENTINEL master mymaster] + set info [S 0 SENTINEL primary mymaster] set origin_down_after_milliseconds [dict get $info down-after-milliseconds] assert_error "ERR Invalid argument '-20' for SENTINEL SET 'down-after-milliseconds'*" {S 0 SENTINEL SET mymaster down-after-milliseconds -20} assert_error "ERR Invalid argument 'abc' for SENTINEL SET 'down-after-milliseconds'*" {S 0 SENTINEL SET mymaster down-after-milliseconds "abc"} - set current_info [S 0 SENTINEL master mymaster] + set current_info [S 0 SENTINEL primary mymaster] assert {[dict get $current_info down-after-milliseconds] == $origin_down_after_milliseconds} } @@ -206,7 +206,7 @@ test "Sentinel Set with other error situations" { assert_error "ERR Unknown option or number of arguments for SENTINEL SET 'fakeoption'" {S 0 SENTINEL SET mymaster fakeoption fakevalue} # save new config to disk failed - set info [S 0 SENTINEL master mymaster] + set info [S 0 SENTINEL primary mymaster] set origin_quorum [dict get $info quorum] set update_quorum [expr $origin_quorum+1] set sentinel_id 0 diff --git a/tests/sentinel/tests/05-manual.tcl b/tests/sentinel/tests/05-manual.tcl index ba5572c1a9..7f5485c42c 100644 --- a/tests/sentinel/tests/05-manual.tcl +++ b/tests/sentinel/tests/05-manual.tcl @@ -10,7 +10,7 @@ foreach_sentinel_id id { test "Manual failover works" { set old_port [RPort $master_id] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] assert {[lindex $addr 1] == $old_port} # Since we reduced the info-period (default 10000) above immediately, @@ -29,12 +29,12 @@ test "Manual failover works" { foreach_sentinel_id id { wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + [lindex [S $id SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] 1] != $old_port } else { fail "At least one Sentinel did not receive failover info" } } - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] set master_id [get_instance_id_by_port valkey [lindex $addr 1]] } diff --git a/tests/sentinel/tests/07-down-conditions.tcl b/tests/sentinel/tests/07-down-conditions.tcl index 1068cd18fb..fbc1924816 100644 --- a/tests/sentinel/tests/07-down-conditions.tcl +++ b/tests/sentinel/tests/07-down-conditions.tcl @@ -18,7 +18,7 @@ proc ensure_master_up {} { S $::alive_sentinel sentinel debug ask-period 100 S $::alive_sentinel sentinel debug publish-period 100 wait_for_condition 1000 50 { - [dict get [S $::alive_sentinel sentinel master mymaster] flags] eq "master" + [dict get [S $::alive_sentinel sentinel primary mymaster] flags] eq "master" } else { fail "Master flags are not just 'master'" } @@ -31,7 +31,7 @@ proc ensure_master_down {} { S $::alive_sentinel sentinel debug publish-period 100 wait_for_condition 1000 50 { [string match *down* \ - [dict get [S $::alive_sentinel sentinel master mymaster] flags]] + [dict get [S $::alive_sentinel sentinel primary mymaster] flags]] } else { fail "Master is not flagged SDOWN" } @@ -45,7 +45,7 @@ test "Crash the majority of Sentinels to prevent failovers for this unit" { test "SDOWN is triggered by non-responding but not crashed instance" { ensure_master_up - set master_addr [S $::alive_sentinel SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set master_addr [S $::alive_sentinel SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] set master_id [get_instance_id_by_port valkey [lindex $master_addr 1]] set pid [get_instance_attrib valkey $master_id pid] @@ -56,7 +56,7 @@ test "SDOWN is triggered by non-responding but not crashed instance" { } test "SDOWN is triggered by crashed instance" { - lassign [S $::alive_sentinel SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] host port + lassign [S $::alive_sentinel SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] host port ensure_master_up kill_instance valkey 0 ensure_master_down diff --git a/tests/sentinel/tests/08-hostname-conf.tcl b/tests/sentinel/tests/08-hostname-conf.tcl index 4a551cf856..6362e81253 100644 --- a/tests/sentinel/tests/08-hostname-conf.tcl +++ b/tests/sentinel/tests/08-hostname-conf.tcl @@ -35,7 +35,7 @@ source "../tests/includes/init-tests.tcl" proc verify_hostname_announced {hostname} { foreach_sentinel_id id { # Master is reported with its hostname - if {![string equal [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 0] $hostname]} { + if {![string equal [lindex [S $id SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] 0] $hostname]} { return 0 } @@ -66,4 +66,4 @@ test "(post-cleanup) Configure instances and sentinel for IPs" { set_redis_announce_ip $::host set_sentinel_config resolve-hostnames no set_sentinel_config announce-hostnames no -} \ No newline at end of file +} diff --git a/tests/sentinel/tests/11-port-0.tcl b/tests/sentinel/tests/11-port-0.tcl index a3e8bdba1c..a3a1fa1514 100644 --- a/tests/sentinel/tests/11-port-0.tcl +++ b/tests/sentinel/tests/11-port-0.tcl @@ -13,7 +13,7 @@ test "Start/Stop sentinel on same port with a different runID should not change delete_lines_with_pattern $orgfilename $tmpfilename "myid" # Get count of total sentinels - set a [S 0 SENTINEL master mymaster] + set a [S 0 SENTINEL primary mymaster] set original_count [lindex $a 33] # Restart sentinel with the modified config file @@ -23,7 +23,7 @@ test "Start/Stop sentinel on same port with a different runID should not change after 1000 # Get new count of total sentinel - set b [S 0 SENTINEL master mymaster] + set b [S 0 SENTINEL primary mymaster] set curr_count [lindex $b 33] # If the count is not the same then fail the test diff --git a/tests/sentinel/tests/12-master-reboot.tcl b/tests/sentinel/tests/12-primary-reboot.tcl similarity index 92% rename from tests/sentinel/tests/12-master-reboot.tcl rename to tests/sentinel/tests/12-primary-reboot.tcl index 3d7c7c6ecb..3fb993564c 100644 --- a/tests/sentinel/tests/12-master-reboot.tcl +++ b/tests/sentinel/tests/12-primary-reboot.tcl @@ -37,7 +37,7 @@ proc reboot_instance {type id} { test "Primary reboot in very short time" { set old_port [RPort $master_id] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] assert {[lindex $addr 1] == $old_port} R $master_id debug populate 10000 @@ -59,13 +59,13 @@ test "Primary reboot in very short time" { foreach_sentinel_id id { wait_for_condition 1000 100 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port + [lindex [S $id SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] 1] != $old_port } else { fail "At least one Sentinel did not receive failover info" } } - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set addr [S 0 SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] set master_id [get_instance_id_by_port valkey [lindex $addr 1]] # Make sure the instance load all the dataset @@ -90,7 +90,7 @@ test "All the other slaves now point to the new primary" { wait_for_condition 1000 50 { [RI $id master_port] == [lindex $addr 1] } else { - fail "Valkey ID $id not configured to replicate with new master" + fail "Valkey ID $id not configured to replicate with new primary" } } } diff --git a/tests/sentinel/tests/15-sentinel-deprecated-commands.tcl b/tests/sentinel/tests/15-sentinel-deprecated-commands.tcl new file mode 100644 index 0000000000..38eadcdfdf --- /dev/null +++ b/tests/sentinel/tests/15-sentinel-deprecated-commands.tcl @@ -0,0 +1,27 @@ +# Test Sentinel configuration consistency after partitions heal. +source "../tests/includes/init-tests.tcl" + +test "SENTINEL MASTERS returns a list of monitored masters (SENTINEL MASTERS as a deprecated command)" { + assert_match "*mymaster*" [S 0 SENTINEL MASTERS] + assert_morethan_equal [llength [S 0 SENTINEL MASTERS]] 1 +} + +test "SENTINEL SLAVES returns a list of the monitored slaves (SENTINEL SLAVES as a deprecated command)" { + assert_morethan_equal [llength [S 0 SENTINEL SLAVES mymaster]] 1 +} + +test "SENTINEL MASTER returns the information list of the monitored master (SENTINEL MASTER as a deprecated command)" { + set info [S 0 SENTINEL MASTER mymaster] + assert_equal mymaster [dict get $info name] +} + +test "SENTINEL IS-MASTER-DOWN-BY-ADDR checks if the primary is down (SENTINEL IS-MASTER-DOWN-BY-ADDR as a deprecated command)" { + set sentinel_id [S 0 SENTINEL MYID] + set master_ip_port [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] + set master_ip [lindex $master_ip_port 0] + set master_port [lindex $master_ip_port 1] + set result [S 0 SENTINEL IS-MASTER-DOWN-BY-ADDR $master_ip $master_port 99 $sentinel_id] + assert_equal $sentinel_id [lindex $result 1] + assert_equal {99} [lindex $result 2] +} + diff --git a/tests/sentinel/tests/15-config-set-config-get.tcl b/tests/sentinel/tests/16-config-set-config-get.tcl similarity index 100% rename from tests/sentinel/tests/15-config-set-config-get.tcl rename to tests/sentinel/tests/16-config-set-config-get.tcl diff --git a/tests/sentinel/tests/includes/init-tests.tcl b/tests/sentinel/tests/includes/init-tests.tcl index 8635e0e255..456a810c5d 100644 --- a/tests/sentinel/tests/includes/init-tests.tcl +++ b/tests/sentinel/tests/includes/init-tests.tcl @@ -12,7 +12,7 @@ test "(init) Remove old primary entry from sentinels" { } set redis_slaves [expr $::instances_count - 1] -test "(init) Create a primary-slaves cluster of [expr $redis_slaves+1] instances" { +test "(init) Create a primary-replicas cluster of [expr $redis_slaves+1] instances" { create_valkey_master_slave_cluster [expr {$redis_slaves+1}] } set master_id 0 @@ -26,7 +26,7 @@ test "(init) Sentinels can start monitoring a primary" { [get_instance_attrib valkey $master_id port] $quorum } foreach_sentinel_id id { - assert {[S $id sentinel master mymaster] ne {}} + assert {[S $id sentinel primary mymaster] ne {}} S $id SENTINEL SET mymaster down-after-milliseconds 2000 S $id SENTINEL SET mymaster failover-timeout 10000 S $id SENTINEL debug tilt-period 5000 @@ -41,9 +41,9 @@ test "(init) Sentinels can start monitoring a primary" { test "(init) Sentinels can talk with the primary" { foreach_sentinel_id id { wait_for_condition 1000 50 { - [catch {S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster}] == 0 + [catch {S $id SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster}] == 0 } else { - fail "Sentinel $id can't talk with the master." + fail "Sentinel $id can't talk with the primary." } } } @@ -52,12 +52,12 @@ test "(init) Sentinels are able to auto-discover other sentinels" { verify_sentinel_auto_discovery } -test "(init) Sentinels are able to auto-discover slaves" { +test "(init) Sentinels are able to auto-discover replicas" { foreach_sentinel_id id { wait_for_condition 1000 50 { - [dict get [S $id SENTINEL MASTER mymaster] num-slaves] == $redis_slaves + [dict get [S $id SENTINEL PRIMARY mymaster] num-slaves] == $redis_slaves } else { - fail "At least some sentinel can't detect some slave" + fail "At least some sentinels can't detect some replicas" } } } diff --git a/tests/sentinel/tests/includes/utils.tcl b/tests/sentinel/tests/includes/utils.tcl index 5909ab0cd9..7d963de19d 100644 --- a/tests/sentinel/tests/includes/utils.tcl +++ b/tests/sentinel/tests/includes/utils.tcl @@ -14,7 +14,7 @@ proc verify_sentinel_auto_discovery {} { set sentinels [llength $::sentinel_instances] foreach_sentinel_id id { wait_for_condition 1000 50 { - [dict get [S $id SENTINEL MASTER mymaster] num-other-sentinels] == ($sentinels-1) + [dict get [S $id SENTINEL PRIMARY mymaster] num-other-sentinels] == ($sentinels-1) } else { fail "At least some sentinel can't detect some other sentinel" } diff --git a/tests/support/cluster.tcl b/tests/support/cluster.tcl index 2b9e44f64f..e9a5395be5 100644 --- a/tests/support/cluster.tcl +++ b/tests/support/cluster.tcl @@ -1,5 +1,5 @@ # Tcl cluster client as a wrapper of redis.rb. -# Copyright (C) 2014 Salvatore Sanfilippo +# Copyright (c) 2014 Redis Ltd. # Released under the BSD license like Redis itself # # Example usage: diff --git a/tests/support/server.tcl b/tests/support/server.tcl index cc8a9ea64f..e8f9f8fb44 100644 --- a/tests/support/server.tcl +++ b/tests/support/server.tcl @@ -241,6 +241,16 @@ proc tags_acceptable {tags err_return} { return 0 } + if {$::io_threads && [lsearch $tags "io-threads:skip"] >= 0} { + set err "Not supported in io-threads mode" + return 0 + } + + if {$::tcl_version < 8.6 && [lsearch $tags "ipv6"] >= 0} { + set err "TCL version is too low and does not support this" + return 0 + } + return 1 } @@ -497,6 +507,12 @@ proc start_server {options {code undefined}} { dict set config "tls-ca-cert-file" [format "%s/tests/tls/ca.crt" [pwd]] dict set config "loglevel" "debug" } + + if {$::io_threads} { + dict set config "io-threads" 2 + dict set config "events-per-io-thread" 0 + } + foreach line $data { if {[string length $line] > 0 && [string index $line 0] ne "#"} { set elements [split $line " "] diff --git a/tests/support/valkey.tcl b/tests/support/valkey.tcl index 6f8727ace8..a5d1e2ca0a 100644 --- a/tests/support/valkey.tcl +++ b/tests/support/valkey.tcl @@ -1,5 +1,5 @@ # Tcl client library - used by the server test -# Copyright (C) 2009-2014 Salvatore Sanfilippo +# Copyright (c) 2009-2014 Redis Ltd. # Released under the BSD license like Redis itself # # Example usage: diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index ae784271c9..feee94ef80 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -1,4 +1,4 @@ -# Server test suite. Copyright (C) 2009 Salvatore Sanfilippo antirez@gmail.com +# Server test suite. Copyright (C) 2009 Redis Ltd. # This software is released under the BSD License. See the COPYING file for # more information. @@ -41,6 +41,7 @@ set ::traceleaks 0 set ::valgrind 0 set ::durable 0 set ::tls 0 +set ::io_threads 0 set ::tls_module 0 set ::stack_logging 0 set ::verbose 0 @@ -576,6 +577,7 @@ proc print_help_screen {} { "--loops Execute the specified set of tests several times." "--wait-server Wait after server is started (so that you can attach a debugger)." "--dump-logs Dump server log on test failure." + "--io-threads Run tests with IO threads." "--tls Run tests in TLS mode." "--tls-module Run tests in TLS mode with Valkey module." "--host Run tests against an external host." @@ -630,6 +632,8 @@ for {set j 0} {$j < [llength $argv]} {incr j} { } } elseif {$opt eq {--quiet}} { set ::quiet 1 + } elseif {$opt eq {--io-threads}} { + set ::io_threads 1 } elseif {$opt eq {--tls} || $opt eq {--tls-module}} { package require tls 1.6 set ::tls 1 diff --git a/tests/unit/cluster/replica-migration.tcl b/tests/unit/cluster/replica-migration.tcl new file mode 100644 index 0000000000..9145bbfb31 --- /dev/null +++ b/tests/unit/cluster/replica-migration.tcl @@ -0,0 +1,193 @@ +# Allocate slot 0 to the last primary and evenly distribute the remaining +# slots to the remaining primaries. +proc my_slot_allocation {masters replicas} { + set avg [expr double(16384) / [expr $masters-1]] + set slot_start 1 + for {set j 0} {$j < $masters-1} {incr j} { + set slot_end [expr int(ceil(($j + 1) * $avg) - 1)] + R $j cluster addslotsrange $slot_start $slot_end + set slot_start [expr $slot_end + 1] + } + R [expr $masters-1] cluster addslots 0 +} + +start_cluster 4 4 {tags {external:skip cluster} overrides {cluster-node-timeout 1000 cluster-migration-barrier 999}} { + test "Migrated replica reports zero repl offset and rank, and fails to win election" { + # Write some data to primary 0, slot 1, make a small repl_offset. + for {set i 0} {$i < 1024} {incr i} { + R 0 incr key_991803 + } + assert_equal {1024} [R 0 get key_991803] + + # Write some data to primary 3, slot 0, make a big repl_offset. + for {set i 0} {$i < 10240} {incr i} { + R 3 incr key_977613 + } + assert_equal {10240} [R 3 get key_977613] + + # 10s, make sure primary 0 will hang in the save. + R 0 config set rdb-key-save-delay 100000000 + + # Move the slot 0 from primary 3 to primary 0 + set addr "[srv 0 host]:[srv 0 port]" + set myid [R 3 CLUSTER MYID] + set code [catch { + exec src/valkey-cli {*}[valkeycli_tls_config "./tests"] --cluster rebalance $addr --cluster-weight $myid=0 + } result] + if {$code != 0} { + fail "valkey-cli --cluster rebalance returns non-zero exit code, output below:\n$result" + } + + # Validate that shard 3's primary and replica can convert to replicas after + # they lose the last slot. + R 3 config set cluster-replica-validity-factor 0 + R 7 config set cluster-replica-validity-factor 0 + R 3 config set cluster-allow-replica-migration yes + R 7 config set cluster-allow-replica-migration yes + + # Shutdown primary 0. + catch {R 0 shutdown nosave} + + # Wait for the replica to become a primary, and make sure + # the other primary become a replica. + wait_for_condition 1000 50 { + [s -4 role] eq {master} && + [s -3 role] eq {slave} && + [s -7 role] eq {slave} + } else { + puts "s -4 role: [s -4 role]" + puts "s -3 role: [s -3 role]" + puts "s -7 role: [s -7 role]" + fail "Failover does not happened" + } + + # Make sure the offset of server 3 / 7 is 0. + verify_log_message -3 "*Start of election*offset 0*" 0 + verify_log_message -7 "*Start of election*offset 0*" 0 + + # Make sure the right replica gets the higher rank. + verify_log_message -4 "*Start of election*rank #0*" 0 + + # Wait for the cluster to be ok. + wait_for_condition 1000 50 { + [CI 3 cluster_state] eq "ok" && + [CI 4 cluster_state] eq "ok" && + [CI 7 cluster_state] eq "ok" + } else { + puts "R 3: [R 3 cluster info]" + puts "R 4: [R 4 cluster info]" + puts "R 7: [R 7 cluster info]" + fail "Cluster is down" + } + + # Make sure the key exists and is consistent. + R 3 readonly + R 7 readonly + wait_for_condition 1000 50 { + [R 3 get key_991803] == 1024 && [R 3 get key_977613] == 10240 && + [R 4 get key_991803] == 1024 && [R 4 get key_977613] == 10240 && + [R 7 get key_991803] == 1024 && [R 7 get key_977613] == 10240 + } else { + puts "R 3: [R 3 keys *]" + puts "R 4: [R 4 keys *]" + puts "R 7: [R 7 keys *]" + fail "Key not consistent" + } + } +} my_slot_allocation cluster_allocate_replicas ;# start_cluster + +start_cluster 4 4 {tags {external:skip cluster} overrides {cluster-node-timeout 1000 cluster-migration-barrier 999}} { + test "New non-empty replica reports zero repl offset and rank, and fails to win election" { + # Write some data to primary 0, slot 1, make a small repl_offset. + for {set i 0} {$i < 1024} {incr i} { + R 0 incr key_991803 + } + assert_equal {1024} [R 0 get key_991803] + + # Write some data to primary 3, slot 0, make a big repl_offset. + for {set i 0} {$i < 10240} {incr i} { + R 3 incr key_977613 + } + assert_equal {10240} [R 3 get key_977613] + + # 10s, make sure primary 0 will hang in the save. + R 0 config set rdb-key-save-delay 100000000 + + # Make server 7 a replica of server 0. + R 7 config set cluster-replica-validity-factor 0 + R 7 config set cluster-allow-replica-migration yes + R 7 cluster replicate [R 0 cluster myid] + + # Shutdown primary 0. + catch {R 0 shutdown nosave} + + # Wait for the replica to become a primary. + wait_for_condition 1000 50 { + [s -4 role] eq {master} && + [s -7 role] eq {slave} + } else { + puts "s -4 role: [s -4 role]" + puts "s -7 role: [s -7 role]" + fail "Failover does not happened" + } + + # Make sure server 7 gets the lower rank and it's offset is 0. + verify_log_message -4 "*Start of election*rank #0*" 0 + verify_log_message -7 "*Start of election*offset 0*" 0 + + # Wait for the cluster to be ok. + wait_for_condition 1000 50 { + [CI 4 cluster_state] eq "ok" && + [CI 7 cluster_state] eq "ok" + } else { + puts "R 4: [R 4 cluster info]" + puts "R 7: [R 7 cluster info]" + fail "Cluster is down" + } + + # Make sure the key exists and is consistent. + R 7 readonly + wait_for_condition 1000 50 { + [R 4 get key_991803] == 1024 && + [R 7 get key_991803] == 1024 + } else { + puts "R 4: [R 4 get key_991803]" + puts "R 7: [R 7 get key_991803]" + fail "Key not consistent" + } + } +} my_slot_allocation cluster_allocate_replicas ;# start_cluster + +start_cluster 4 4 {tags {external:skip cluster} overrides {cluster-node-timeout 1000 cluster-migration-barrier 999}} { + test "valkey-cli make source node ignores NOREPLICAS error when doing the last CLUSTER SETSLOT" { + R 3 config set cluster-allow-replica-migration no + R 7 config set cluster-allow-replica-migration yes + + # Record the current primary node, server 7 will be migrated later. + set old_primary_ip [lindex [R 7 role] 1] + set old_primary_port [lindex [R 7 role] 2] + + # Move slot 0 from primary 3 to primary 0. + set addr "[srv 0 host]:[srv 0 port]" + set myid [R 3 CLUSTER MYID] + set code [catch { + exec src/valkey-cli {*}[valkeycli_tls_config "./tests"] --cluster rebalance $addr --cluster-weight $myid=0 + } result] + if {$code != 0} { + fail "valkey-cli --cluster rebalance returns non-zero exit code, output below:\n$result" + } + + wait_for_cluster_propagation + wait_for_cluster_state "ok" + + # Make sure server 3 is still a primary and has no replicas. + assert_equal [s -3 role] {master} + assert_equal [lindex [R 3 role] 2] {} + + # And server 7 becomes a replica of another primary. + set new_primary_ip [lindex [R 7 role] 1] + set new_primary_port [lindex [R 7 role] 2] + assert_equal [s -7 role] {slave} + assert_not_equal "$old_primary_ip:$old_primary_port" "new_primary_ip:new_primary_port" + } +} my_slot_allocation cluster_allocate_replicas ;# start_cluster diff --git a/tests/unit/cluster/slot-migration.tcl b/tests/unit/cluster/slot-migration.tcl index 030404dfde..d798971968 100644 --- a/tests/unit/cluster/slot-migration.tcl +++ b/tests/unit/cluster/slot-migration.tcl @@ -435,3 +435,35 @@ start_cluster 2 0 {tags {tls:skip external:skip cluster regression} overrides {c R 0 MIGRATE 127.0.0.1 [lindex [R 1 CONFIG GET port] 1] $stream_name 0 5000 } } + +start_cluster 3 6 {tags {external:skip cluster} overrides {cluster-node-timeout 1000} } { + test "Slot migration is ok when the replicas are down" { + # Killing all replicas in primary 0. + assert_equal 2 [s 0 connected_slaves] + catch {R 3 shutdown nosave} + catch {R 6 shutdown nosave} + wait_for_condition 50 100 { + [s 0 connected_slaves] == 0 + } else { + fail "The replicas in primary 0 are still connecting" + } + + # Killing one replica in primary 1. + assert_equal 2 [s -1 connected_slaves] + catch {R 4 shutdown nosave} + wait_for_condition 50 100 { + [s -1 connected_slaves] == 1 + } else { + fail "The replica in primary 1 is still connecting" + } + + # Check slot migration is ok when the replicas are down. + migrate_slot 0 1 0 + migrate_slot 0 2 1 + assert_equal {OK} [R 0 CLUSTER SETSLOT 0 NODE [R 1 CLUSTER MYID]] + assert_equal {OK} [R 0 CLUSTER SETSLOT 1 NODE [R 2 CLUSTER MYID]] + wait_for_slot_state 0 "" + wait_for_slot_state 1 "" + wait_for_slot_state 2 "" + } +} diff --git a/tests/unit/info.tcl b/tests/unit/info.tcl index 9c54463482..61d1acd1f8 100644 --- a/tests/unit/info.tcl +++ b/tests/unit/info.tcl @@ -308,51 +308,49 @@ start_server {tags {"info" "external:skip"}} { assert_equal "count=2" [errorstat ERR] } - # skip the following 2 tests if we are running with io-threads as the eventloop metrics are different in that case. - if {[r config get io-threads] eq 0} { - test {stats: eventloop metrics} { - set info1 [r info stats] - set cycle1 [getInfoProperty $info1 eventloop_cycles] - set el_sum1 [getInfoProperty $info1 eventloop_duration_sum] - set cmd_sum1 [getInfoProperty $info1 eventloop_duration_cmd_sum] - assert_morethan $cycle1 0 - assert_morethan $el_sum1 0 - assert_morethan $cmd_sum1 0 - after 110 ;# default hz is 10, wait for a cron tick. - set info2 [r info stats] - set cycle2 [getInfoProperty $info2 eventloop_cycles] - set el_sum2 [getInfoProperty $info2 eventloop_duration_sum] - set cmd_sum2 [getInfoProperty $info2 eventloop_duration_cmd_sum] - if {$::verbose} { puts "eventloop metrics cycle1: $cycle1, cycle2: $cycle2" } - assert_morethan $cycle2 $cycle1 - assert_lessthan $cycle2 [expr $cycle1+10] ;# we expect 2 or 3 cycles here, but allow some tolerance - if {$::verbose} { puts "eventloop metrics el_sum1: $el_sum1, el_sum2: $el_sum2" } - assert_morethan $el_sum2 $el_sum1 - assert_lessthan $el_sum2 [expr $el_sum1+30000] ;# we expect roughly 100ms here, but allow some tolerance - if {$::verbose} { puts "eventloop metrics cmd_sum1: $cmd_sum1, cmd_sum2: $cmd_sum2" } - assert_morethan $cmd_sum2 $cmd_sum1 - assert_lessthan $cmd_sum2 [expr $cmd_sum1+15000] ;# we expect about tens of ms here, but allow some tolerance - } - - test {stats: instantaneous metrics} { - r config resetstat - set retries 0 - for {set retries 1} {$retries < 4} {incr retries} { - after 1600 ;# hz is 10, wait for 16 cron tick so that sample array is fulfilled - set value [s instantaneous_eventloop_cycles_per_sec] - if {$value > 0} break - } - - assert_lessthan $retries 4 - if {$::verbose} { puts "instantaneous metrics instantaneous_eventloop_cycles_per_sec: $value" } - assert_morethan $value 0 - assert_lessthan $value [expr $retries*15] ;# default hz is 10 - set value [s instantaneous_eventloop_duration_usec] - if {$::verbose} { puts "instantaneous metrics instantaneous_eventloop_duration_usec: $value" } - assert_morethan $value 0 - assert_lessthan $value [expr $retries*22000] ;# default hz is 10, so duration < 1000 / 10, allow some tolerance + test {stats: eventloop metrics} { + set info1 [r info stats] + set cycle1 [getInfoProperty $info1 eventloop_cycles] + set el_sum1 [getInfoProperty $info1 eventloop_duration_sum] + set cmd_sum1 [getInfoProperty $info1 eventloop_duration_cmd_sum] + assert_morethan $cycle1 0 + assert_morethan $el_sum1 0 + assert_morethan $cmd_sum1 0 + after 110 ;# default hz is 10, wait for a cron tick. + set info2 [r info stats] + set cycle2 [getInfoProperty $info2 eventloop_cycles] + set el_sum2 [getInfoProperty $info2 eventloop_duration_sum] + set cmd_sum2 [getInfoProperty $info2 eventloop_duration_cmd_sum] + if {$::verbose} { puts "eventloop metrics cycle1: $cycle1, cycle2: $cycle2" } + assert_morethan $cycle2 $cycle1 + assert_lessthan $cycle2 [expr $cycle1+10] ;# we expect 2 or 3 cycles here, but allow some tolerance + if {$::verbose} { puts "eventloop metrics el_sum1: $el_sum1, el_sum2: $el_sum2" } + assert_morethan $el_sum2 $el_sum1 + assert_lessthan $el_sum2 [expr $el_sum1+30000] ;# we expect roughly 100ms here, but allow some tolerance + if {$::verbose} { puts "eventloop metrics cmd_sum1: $cmd_sum1, cmd_sum2: $cmd_sum2" } + assert_morethan $cmd_sum2 $cmd_sum1 + assert_lessthan $cmd_sum2 [expr $cmd_sum1+15000] ;# we expect about tens of ms here, but allow some tolerance + } {} {io-threads:skip} ; # skip with io-threads as the eventloop metrics are different in that case. + + test {stats: instantaneous metrics} { + r config resetstat + set retries 0 + for {set retries 1} {$retries < 4} {incr retries} { + after 1600 ;# hz is 10, wait for 16 cron tick so that sample array is fulfilled + set value [s instantaneous_eventloop_cycles_per_sec] + if {$value > 0} break } - } + + assert_lessthan $retries 4 + if {$::verbose} { puts "instantaneous metrics instantaneous_eventloop_cycles_per_sec: $value" } + assert_morethan $value 0 + assert_lessthan $value [expr $retries*15] ;# default hz is 10 + set value [s instantaneous_eventloop_duration_usec] + if {$::verbose} { puts "instantaneous metrics instantaneous_eventloop_duration_usec: $value" } + assert_morethan $value 0 + assert_lessthan $value [expr $retries*22000] ;# default hz is 10, so duration < 1000 / 10, allow some tolerance + } {} {io-threads:skip} ; # skip with io-threads as the eventloop metrics are different in that case. + test {stats: debug metrics} { # make sure debug info is hidden @@ -426,7 +424,8 @@ start_server {tags {"info" "external:skip"}} { set info [r info clients] assert_equal [getInfoProperty $info pubsub_clients] {1} # non-pubsub clients should not be involved - assert_equal {0} [unsubscribe $rd2 {non-exist-chan}] + catch {unsubscribe $rd2 {non-exist-chan}} e + assert_match {*NOSUB*} $e set info [r info clients] assert_equal [getInfoProperty $info pubsub_clients] {1} # close all clients diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index 5396cd2e56..6d0e48e39c 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -533,6 +533,7 @@ start_server {tags {"introspection"}} { io-threads logfile unixsocketperm + unixsocketgroup replicaof slaveof requirepass diff --git a/tests/unit/maxmemory.tcl b/tests/unit/maxmemory.tcl index 66dae2546a..75ec1ce02d 100644 --- a/tests/unit/maxmemory.tcl +++ b/tests/unit/maxmemory.tcl @@ -456,13 +456,15 @@ start_server {tags {"maxmemory external:skip"}} { } {4098} } -start_server {tags {"maxmemory external:skip"}} { +# Skip the following test when running with IO threads +# With IO threads, we asynchronously write to tracking clients. +# This invalidates the assumption that their output buffers will be free within the same event loop. +start_server {tags {"maxmemory external:skip io-threads:skip"}} { test {client tracking don't cause eviction feedback loop} { r config set latency-tracking no r config set maxmemory 0 r config set maxmemory-policy allkeys-lru r config set maxmemory-eviction-tenacity 100 - # 10 clients listening on tracking messages set clients {} for {set j 0} {$j < 10} {incr j} { diff --git a/tests/unit/memefficiency.tcl b/tests/unit/memefficiency.tcl index feb98d9cdd..8a58aea17d 100644 --- a/tests/unit/memefficiency.tcl +++ b/tests/unit/memefficiency.tcl @@ -404,8 +404,6 @@ run_solo {defrag} { r save ;# saving an rdb iterates over all the data / pointers } {OK} - # Skip the following two tests if we are running with IO threads, as the IO threads allocate the command arguments in a different arena. As a result, fragmentation is not as expected. - if {[r config get io-threads] eq 0} { test "Active defrag pubsub: $type" { r flushdb r config resetstat @@ -503,8 +501,7 @@ run_solo {defrag} { $rd_pubsub read } $rd_pubsub close - } - } ;# io-threads + } {0} {io-threads:skip} ; # skip with io-threads as the threads may allocate the command arguments in a different arena. As a result, fragmentation is not as expected. if {$type eq "standalone"} { ;# skip in cluster mode test "Active defrag big list: $type" { diff --git a/tests/unit/other.tcl b/tests/unit/other.tcl index 503ee391a9..6e6230fc19 100644 --- a/tests/unit/other.tcl +++ b/tests/unit/other.tcl @@ -544,3 +544,26 @@ start_server {tags {"other external:skip"}} { } } } + +set tempFileName [file join [pwd] [pid]] +if {$::verbose} { + puts "Creating temp file $tempFileName" +} +set tempFileId [open $tempFileName w] +set group [dict get [file attributes $tempFileName] -group] +if {$group != ""} { + start_server [list tags {"repl external:skip"} overrides [list unixsocketgroup $group unixsocketperm 744]] { + test {test unixsocket options are set correctly} { + set socketpath [lindex [r config get unixsocket] 1] + set attributes [file attributes $socketpath] + set permissions [string range [dict get $attributes -permissions] end-2 end] + assert_equal [dict get $attributes -group] $group + assert_equal 744 $permissions + } + } +} +if {$::verbose} { + puts "Deleting temp file: $tempFileName" +} +close $tempFileId +file delete $tempFileName diff --git a/tests/unit/pubsub.tcl b/tests/unit/pubsub.tcl index 72d0498ce1..68dc79a4a4 100644 --- a/tests/unit/pubsub.tcl +++ b/tests/unit/pubsub.tcl @@ -109,10 +109,12 @@ start_server {tags {"pubsub network"}} { $rd1 close } - test "UNSUBSCRIBE from non-subscribed channels" { + test "UNSUBSCRIBE and PUNSUBSCRIBE from non-subscribed channels" { set rd1 [valkey_deferring_client] - assert_equal {0 0 0} [unsubscribe $rd1 {foo bar quux}] - + foreach command {unsubscribe punsubscribe} { + catch {$command $rd1 {foo bar quux}} e + assert_match {*NOSUB*} $e + } # clean up clients $rd1 close } @@ -202,14 +204,6 @@ start_server {tags {"pubsub network"}} { $rd close } {0} {resp3} - test "PUNSUBSCRIBE from non-subscribed channels" { - set rd1 [valkey_deferring_client] - assert_equal {0 0 0} [punsubscribe $rd1 {foo.* bar.* quux.*}] - - # clean up clients - $rd1 close - } - test "NUMSUB returns numbers, not strings (#1561)" { r pubsub numsub abc def } {abc 0 def 0} @@ -247,16 +241,6 @@ start_server {tags {"pubsub network"}} { $rd1 close } - test "PUNSUBSCRIBE and UNSUBSCRIBE should always reply" { - # Make sure we are not subscribed to any channel at all. - r punsubscribe - r unsubscribe - # Now check if the commands still reply correctly. - set reply1 [r punsubscribe] - set reply2 [r unsubscribe] - concat $reply1 $reply2 - } {punsubscribe {} 0 unsubscribe {} 0} - ### Keyspace events notification tests test "Keyspace notifications: we receive keyspace notifications" { diff --git a/tests/unit/pubsubshard.tcl b/tests/unit/pubsubshard.tcl index e19db211f7..d62a415705 100644 --- a/tests/unit/pubsubshard.tcl +++ b/tests/unit/pubsubshard.tcl @@ -74,9 +74,8 @@ start_server {tags {"pubsubshard external:skip"}} { test "SUNSUBSCRIBE from non-subscribed channels" { set rd1 [valkey_deferring_client] - assert_equal {0} [sunsubscribe $rd1 {foo}] - assert_equal {0} [sunsubscribe $rd1 {bar}] - assert_equal {0} [sunsubscribe $rd1 {quux}] + catch {sunsubscribe $rd1 {foo}} e + assert_match {*NOSUB*} $e # clean up clients $rd1 close @@ -169,4 +168,4 @@ start_server {tags {"pubsubshard external:skip"}} { assert_equal {smessage chan1 world} [$rd1 read] } } -} \ No newline at end of file +} diff --git a/tests/unit/slowlog.tcl b/tests/unit/slowlog.tcl index f1acbaa0ff..1be530d37f 100644 --- a/tests/unit/slowlog.tcl +++ b/tests/unit/slowlog.tcl @@ -248,4 +248,42 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} { $rd close } + + foreach is_eval {0 1} { + test "SLOWLOG - the commands in script are recorded normally - is_eval: $is_eval" { + if {$is_eval == 0} { + r function load replace "#!lua name=mylib \n redis.register_function('myfunc', function(KEYS, ARGS) server.call('ping') end)" + } + + r client setname test-client + r config set slowlog-log-slower-than 0 + r slowlog reset + + if {$is_eval} { + r eval "server.call('ping')" 0 + } else { + r fcall myfunc 0 + } + set slowlog_resp [r slowlog get 2] + assert_equal 2 [llength $slowlog_resp] + + # The first one is the script command, and the second one is the ping command executed in the script + # Each slowlog contains: id, timestamp, execution time, command array, ip:port, client name + set script_cmd [lindex $slowlog_resp 0] + set ping_cmd [lindex $slowlog_resp 1] + + # Make sure the command are logged. + if {$is_eval} { + assert_equal {eval server.call('ping') 0} [lindex $script_cmd 3] + } else { + assert_equal {fcall myfunc 0} [lindex $script_cmd 3] + } + assert_equal {ping} [lindex $ping_cmd 3] + + # Make sure the client info are the logged. + assert_equal [lindex $script_cmd 4] [lindex $ping_cmd 4] + assert_equal {test-client} [lindex $script_cmd 5] + assert_equal {test-client} [lindex $ping_cmd 5] + } + } } diff --git a/utils/build-static-symbols.tcl b/utils/build-static-symbols.tcl index 0aa8f72402..966c827405 100644 --- a/utils/build-static-symbols.tcl +++ b/utils/build-static-symbols.tcl @@ -2,7 +2,7 @@ # Useful to get stack traces on segfault without a debugger. See redis.c # for more information. # -# Copyright(C) 2009 Salvatore Sanfilippo, under the BSD license. +# Copyright(C) 2009 Redis Ltd. set fd [open redis.c] set symlist {} diff --git a/utils/corrupt_rdb.c b/utils/corrupt_rdb.c index df9c93ed88..5f35efb95c 100644 --- a/utils/corrupt_rdb.c +++ b/utils/corrupt_rdb.c @@ -1,7 +1,7 @@ /* Trivia program to corrupt an RDB file in order to check the RDB check * program behavior and effectiveness. * - * Copyright (C) 2016 Salvatore Sanfilippo. + * Copyright (C) 2016 Redis ltd. * This software is released in the 3-clause BSD license. */ #include diff --git a/utils/hyperloglog/hll-err.rb b/utils/hyperloglog/hll-err.rb index e04cf93800..8c5b59fdd1 100644 --- a/utils/hyperloglog/hll-err.rb +++ b/utils/hyperloglog/hll-err.rb @@ -1,4 +1,4 @@ -# hll-err.rb - Copyright (C) 2014 Salvatore Sanfilippo +# hll-err.rb - Copyright (C) 2014 Redis Ltd. # BSD license, See the COPYING file for more information. # # Check error of HyperLogLog implementation for different set sizes. diff --git a/utils/hyperloglog/hll-gnuplot-graph.rb b/utils/hyperloglog/hll-gnuplot-graph.rb index 61f0672637..8b2cbd48ed 100644 --- a/utils/hyperloglog/hll-gnuplot-graph.rb +++ b/utils/hyperloglog/hll-gnuplot-graph.rb @@ -1,4 +1,4 @@ -# hll-err.rb - Copyright (C) 2014 Salvatore Sanfilippo +# hll-err.rb - Copyright (C) 2014 Redis Ltd. # BSD license, See the COPYING file for more information. # # This program is suited to output average and maximum errors of diff --git a/utils/redis-copy.rb b/utils/redis-copy.rb index 9f8335c341..c5e2b59fe1 100644 --- a/utils/redis-copy.rb +++ b/utils/redis-copy.rb @@ -1,4 +1,4 @@ -# redis-copy.rb - Copyright (C) 2009-2010 Salvatore Sanfilippo +# redis-copy.rb - Copyright (C) 2009-2010 Redis Ltd. # BSD license, See the COPYING file for more information. # # Copy the whole dataset from one server instance to another one diff --git a/utils/redis-sha1.rb b/utils/redis-sha1.rb index 6a8b4f3586..43e8580bbd 100644 --- a/utils/redis-sha1.rb +++ b/utils/redis-sha1.rb @@ -1,4 +1,4 @@ -# redis-sha1.rb - Copyright (C) 2009 Salvatore Sanfilippo +# redis-sha1.rb - Copyright (C) 2009 Redis Ltd. # BSD license, See the COPYING file for more information. # # Performs the SHA1 sum of the whole dataset. diff --git a/utils/speed-regression.tcl b/utils/speed-regression.tcl index 2a4ecde452..b51d910869 100755 --- a/utils/speed-regression.tcl +++ b/utils/speed-regression.tcl @@ -1,5 +1,5 @@ #!/usr/bin/env tclsh8.5 -# Copyright (C) 2011 Salvatore Sanfilippo +# Copyright (C) 2011 Redis Ltd. # Released under the BSD license like Redis itself source ../tests/support/valkey.tcl diff --git a/utils/tracking_collisions.c b/utils/tracking_collisions.c index 4df3e84af5..b7c7b77bf1 100644 --- a/utils/tracking_collisions.c +++ b/utils/tracking_collisions.c @@ -17,7 +17,7 @@ * * -------------------------------------------------------------------------- * - * Copyright (C) 2019 Salvatore Sanfilippo + * Copyright (C) 2019 Redis Ltd. * This code is released under the BSD 2 clause license. */ diff --git a/valkey.conf b/valkey.conf index 68f4ad1f72..8465facb50 100644 --- a/valkey.conf +++ b/valkey.conf @@ -154,6 +154,7 @@ tcp-backlog 511 # on a unix socket when not specified. # # unixsocket /run/valkey.sock +# unixsocketgroup wheel # unixsocketperm 700 # Close the connection after a client is idle for N seconds (0 to disable) @@ -734,7 +735,7 @@ repl-disable-tcp-nodelay no # # The backlog is only allocated if there is at least one replica connected. # -# repl-backlog-size 1mb +# repl-backlog-size 10mb # After a primary has no connected replicas for some time, the backlog will be # freed. The following option configures the amount of seconds that need to