Skip to content

Commit

Permalink
Merge branch 'master' into copy-of-branch-0.7.x
Browse files Browse the repository at this point in the history
  • Loading branch information
terry-xiaoyu committed Feb 6, 2025
2 parents 865b7a6 + 0c729d5 commit 1e306bb
Show file tree
Hide file tree
Showing 18 changed files with 391 additions and 394 deletions.
19 changes: 15 additions & 4 deletions .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,32 @@ name: Run test suites

on:
pull_request:
push:
branches:
- master

jobs:
test:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
otp:
- 25.1
erlang:
- otp: "24"
rebar3: "3.20"
- otp: "25"
rebar3: "3.22"
- otp: "26"
rebar3: "3.22"
- otp: "27"
rebar3: "3.24"

steps:
- uses: actions/checkout@v3
- uses: erlef/setup-beam@v1
with:
otp-version: ${{ matrix.otp }}
rebar3-version: 3
otp-version: ${{ matrix.erlang.otp }}
rebar3-version: ${{ matrix.erlang.rebar3 }}
- name: setup redis cluster
run: docker compose up -d --wait
- name: eunit
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,4 @@ doc/
mix.lock
/.idea/
.DS_Store
rebar.lock
12 changes: 0 additions & 12 deletions .travis.yml

This file was deleted.

22 changes: 0 additions & 22 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,6 @@ help:
@echo " start starts a test redis cluster"
@echo " cleanup cleanup config files after redis cluster"
@echo " stop stops all redis servers"
@echo " travis-run starts the redis cluster and runs your tests"
@echo " travis-install install redis from 'unstable' branch"

start: cleanup
echo "$$NODE1_CONF" | redis-server -
Expand All @@ -133,23 +131,3 @@ stop:
kill `cat /tmp/redis_cluster_node5.pid` || true
kill `cat /tmp/redis_cluster_node6.pid` || true
make cleanup

travis-run:
# Start all cluster nodes
make start
sleep 5

# Join all nodes in the cluster
echo "yes" | ruby redis-git/src/redis-trib.rb create --replicas 1 127.0.0.1:30001 127.0.0.1:30002 127.0.0.1:30003 127.0.0.1:30004 127.0.0.1:30005 127.0.0.1:30006
sleep 5

make compile && make test

# Kill all redis nodes and do cleanup
make stop

travis-install:
[ ! -e redis-git ] && git clone -b 3.0.6 --single-branch https://github.com/antirez/redis.git redis-git || true
make -C redis-git -j4
gem install redis
sleep 3
10 changes: 5 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# eredis_cluster
[![Travis](https://img.shields.io/travis/adrienmo/eredis_cluster.svg?branch=master&style=flat-square)](https://travis-ci.org/adrienmo/eredis_cluster)
[![Hex.pm](https://img.shields.io/hexpm/v/eredis_cluster.svg?style=flat-square)](https://hex.pm/packages/eredis_cluster)

[![Build status](https://github.com/emqx/eredis_cluster/actions/workflows/test.yaml/badge.svg)](https://github.com/emqx/eredis_cluster/actions/workflows/test.yaml)

## Description

eredis_cluster is a wrapper for eredis to support cluster mode of redis 3.0.0+
`eredis_cluster` is a wrapper for eredis to support cluster mode of redis 3.0.0+

## TODO

Expand Down Expand Up @@ -104,7 +104,7 @@ eredis_cluster:update_hash_field("abc", "efg", Fun).

%% Eval script, both script and hash are necessary to execute the command,
%% the script hash should be precomputed at compile time otherwise, it will
%% execute it at each request. Could be solved by using a macro though.
%% execute it at each request. Could be solved by using a macro though.
Script = "return redis.call('set', KEYS[1], ARGV[1]);",
ScriptHash = "4bf5e0d8612687699341ea7db19218e83f77b7cf",
eredis_cluster:eval(Script, ScriptHash, ["abc"], ["123"]).
Expand Down Expand Up @@ -137,4 +137,4 @@ eredis_cluster:qk(["FLUSHDB"], "TEST").
]
}

```
```
34 changes: 33 additions & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,31 +3,49 @@ version: '3.9'
services:
redis-node-0: &redis-node
image: docker.io/bitnami/redis-cluster:${REDIS_TAG}
volumes:
- ./test/config/users.acl:/usr/local/etc/redis/users.acl
- ./test/config/overrides.conf:/opt/bitnami/redis/mounted-etc/overrides.conf
ports:
- "30001:${REDIS_PORT_NUMBER}"
environment:
- 'REDIS_PASSWORD=${REDIS_PASSWORD}'
- 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5'
networks:
vpcbr:
ipv4_address: 10.5.0.11

redis-node-1:
<<: *redis-node
ports:
- "30002:${REDIS_PORT_NUMBER}"
networks:
vpcbr:
ipv4_address: 10.5.0.12

redis-node-2:
<<: *redis-node
ports:
- "30003:${REDIS_PORT_NUMBER}"
networks:
vpcbr:
ipv4_address: 10.5.0.13

redis-node-3:
<<: *redis-node
ports:
- "30004:${REDIS_PORT_NUMBER}"
networks:
vpcbr:
ipv4_address: 10.5.0.14

redis-node-4:
<<: *redis-node
ports:
- "30005:${REDIS_PORT_NUMBER}"
networks:
vpcbr:
ipv4_address: 10.5.0.15

redis-node-5:
image: docker.io/bitnami/redis-cluster:${REDIS_TAG}
Expand All @@ -43,11 +61,17 @@ services:
- 'REDIS_CLUSTER_REPLICAS=1'
- 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5'
- 'REDIS_CLUSTER_CREATOR=yes'
volumes:
- ./test/config/users.acl:/usr/local/etc/redis/users.acl
- ./test/config/overrides.conf:/opt/bitnami/redis/mounted-etc/overrides.conf
ports:
- "30006:${REDIS_PORT_NUMBER}"
networks:
vpcbr:
ipv4_address: 10.5.0.16
healthcheck:
test: |
# Convert `cluster info` output into shell variable assignments
# Convert `cluster info` output into shell variable assignments
eval "$(
redis-cli -a "${REDIS_PASSWORD}" --no-auth-warning cluster info | \
grep "^cluster" | \
Expand All @@ -58,3 +82,11 @@ services:
[ "$$cluster_known_nodes" -eq 6 ] || { echo "known nodes = $$cluster_known_nodes"; exit 3; }
interval: 5s
retries: 3

networks:
vpcbr:
driver: bridge
ipam:
config:
- subnet: 10.5.0.0/16
gateway: 10.5.0.1
32 changes: 0 additions & 32 deletions mix.exs

This file was deleted.

8 changes: 4 additions & 4 deletions rebar.config
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
%% -*- mode: erlang -*-
{deps, [
{eredis, {git, "https://github.com/emqx/eredis", {tag, "1.2.10"}}},
{poolboy, {git, "https://github.com/devinus/poolboy.git", {branch, "1.5.2"}}}
{eredis, {git, "https://github.com/emqx/eredis", {tag, "1.2.16"}}},
{ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.3"}}}
]}.
{erl_opts, [warnings_as_errors,
warn_export_all]}.

{erl_opts, [warn_export_all]}.

{xref_checks, [undefined_function_calls]}.
{cover_enabled, true}.
Expand Down
4 changes: 3 additions & 1 deletion src/eredis_cluster.app.src
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@
{registered, []},
{applications, [
kernel,
stdlib
stdlib,
eredis,
ecpool
]},
{mod, {eredis_cluster, []}},
{env, []}
Expand Down
95 changes: 1 addition & 94 deletions src/eredis_cluster.appup.src
Original file line number Diff line number Diff line change
@@ -1,103 +1,10 @@
%% -*- mode: erlang -*-
{"0.7.7",
{VSN,
[
{"0.5.11", [
{load_module, eredis_cluster_sup, brutal_purge, soft_purge, []},
{load_module, eredis_cluster, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool_worker, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []},
{apply,{eredis_cluster_monitor,cache_states,[]}},
{load_module, eredis_cluster_pool, brutal_purge, soft_purge, []}
]},
{"0.6.2", [
{load_module, eredis_cluster_sup, brutal_purge, soft_purge, []},
{load_module, eredis_cluster, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool_worker, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []},
{apply,{eredis_cluster_monitor,cache_states,[]}}
]},
{<<"0\\.6\\.[3-6]">>, [
{load_module, eredis_cluster_sup, brutal_purge, soft_purge, []},
{load_module, eredis_cluster, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool_worker, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []},
{apply,{eredis_cluster_monitor,cache_states,[]}}
]},
{"0.6.7", [
{load_module, eredis_cluster_sup, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool_worker, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []},
{apply,{eredis_cluster_monitor,cache_states,[]}},
{load_module, eredis_cluster, brutal_purge, soft_purge, []}
]},
{<<"0\\.7\\.[0-3]">>, [
{load_module, eredis_cluster_pool, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_sup, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []},
{apply,{eredis_cluster_monitor,cache_states,[]}},
{load_module, eredis_cluster, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool_worker, brutal_purge, soft_purge, []}
]},
{"0.7.4", [
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []},
{apply,{eredis_cluster_monitor,cache_states,[]}},
{load_module, eredis_cluster, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool_worker, brutal_purge, soft_purge, []}
]},
{<<"0\\.7\\.[5-6]">>, [
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []},
{apply,{eredis_cluster_monitor,cache_states,[]}}
]},
{<<".*">>, [
]}
],
[
{"0.5.11", [
{load_module, eredis_cluster_sup, brutal_purge, soft_purge, []},
{load_module, eredis_cluster, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool_worker, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool, brutal_purge, soft_purge, []}
]},
{"0.6.2", [
{load_module, eredis_cluster_sup, brutal_purge, soft_purge, []},
{load_module, eredis_cluster, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool_worker, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []}
]},
{<<"0\\.6\\.[3-6]">>, [
{load_module, eredis_cluster_sup, brutal_purge, soft_purge, []},
{load_module, eredis_cluster, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool_worker, brutal_purge, soft_purge, []}
]},
{"0.6.7", [
{load_module, eredis_cluster_sup, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool_worker, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []},
{load_module, eredis_cluster, brutal_purge, soft_purge, []}
]},
{<<"0\\.7\\.[0-3]">>, [
{load_module, eredis_cluster_pool, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_sup, brutal_purge, soft_purge, []},
{load_module, eredis_cluster, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool_worker, brutal_purge, soft_purge, []}
]},
{"0.7.4", [
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []},
{load_module, eredis_cluster, brutal_purge, soft_purge, []},
{load_module, eredis_cluster_pool_worker, brutal_purge, soft_purge, []}
]},
{<<"0\\.7\\.[5-6]">>, [
{load_module, eredis_cluster_monitor, brutal_purge, soft_purge, []}
]},
{<<".*">>, [
]}
]
Expand Down
15 changes: 15 additions & 0 deletions src/eredis_cluster.erl
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
-export([update_hash_field/4]).
-export([optimistic_locking_transaction/4]).
-export([eval/5]).
-export([ping_all/1]).

start_pool(PoolName, Opts) ->
eredis_cluster_sup:start_child(PoolName, [PoolName, Opts]).
Expand Down Expand Up @@ -124,6 +125,20 @@ query(PoolName, Transaction, Slot, Counter) ->
Result -> Result
end.

ping_all(Pool) ->
Slots = eredis_cluster_monitor:get_slot_samples(Pool),
Transaction = fun(Worker) -> qw(Worker, [<<"PING">>]) end,
Pongs = lists:map(
fun(Slot) -> query(Pool, Transaction, Slot, 0) end,
Slots),

length(Pongs) > 0 andalso
lists:all(
fun({ok, <<"PONG">>}) -> true;
(_) -> false
end,
Pongs).

handle_transaction_result(PoolName, Result, Version) ->
case Result of
% If we detect a node went down, we should probably refresh the slot
Expand Down
Loading

0 comments on commit 1e306bb

Please sign in to comment.