Docker composer 容器化部署中间件.
动机
之前的测试环境用的二进制部署,不好维护,借此机会将其改为容器化部署,目前先用单节点部署。后续如有其他部署方式,也会在此继续更新。
rocketmq
rocketmq单节点
创建目录
1
2
3
4
|
mkdir /data/logs/rocketmq/{namesrv,broker-a,console}
mkdir /data/rocketmq/broker-a
chown -R 3000.3000 /data/logs/rocketmq/{namesrv,broker-a}
chown -R 3000.3000 /data/rocketmq/broker-a
|
创建配置文件 /data/rocketmq/broker-a/conf/broker.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
|
brokerClusterName = DefaultCluster
brokerName = broker-a
brokerId = 0
deleteWhen = 04
fileReservedTime = 48
brokerRole = ASYNC_MASTER
flushDiskType = ASYNC_FLUSH
storePathRootDir = /home/rocketmq/store
storePathCommitLog = /home/rocketmq/store/commitlog
storePathConsumerQueue = /home/rocketmq/store/consumequeue
autoCreateTopicEnable = true
autoCreateSubscriptionGroup = true
slaveReadEnable = true
|
创建 docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
|
version: '3.5'
services:
rocketmq-namesrv:
image: apache/rocketmq:4.9.3
container_name: namesrv
restart: always
ports:
- "9876:9876"
volumes:
- /data/logs/rocketmq/namesrv:/home/rocketmq/logs/rocketmqlogs
command: "sh mqnamesrv"
networks:
- rocketmq
rocketmq-broker:
image: apache/rocketmq:4.9.3
container_name: rmqbroker
restart: always
depends_on:
- rocketmq-namesrv
ports:
- 10909:10909
- 10911:10911
volumes:
- /data/logs/rocketmq/broker-a:/home/rocketmq/logs/rocketmqlogs
- /data/rocketmq/broker-a/store:/home/rocketmq/store
- /data/rocketmq/broker-a/conf/broker.conf:/home/rocketmq/rocketmq-4.9.3/conf/broker.conf
command: "sh mqbroker -c /home/rocketmq/rocketmq-4.9.3/conf/broker.conf"
environment:
NAMESRV_ADDR: "rocketmq-namesrv:9876"
networks:
- rocketmq
rmqconsole:
image: styletang/rocketmq-console-ng
container_name: rocketmq-console
restart: always
ports:
- 8080:8080
depends_on:
- rocketmq-namesrv
# 时区和日志问题
volumes:
- /etc/localtime:/etc/localtime:ro
- /data/logs/rocketmq/console:/root/logs/consolelogs
environment:
JAVA_OPTS: "-Drocketmq.namesrv.addr=rocketmq-namesrv:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false"
networks:
- rocketmq
networks:
rocketmq:
driver: bridge
|
启动
docker compose up -d
rocketmq 两主两从
创建目录
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
mkdir /data/servers/rocketmq -p
mkdir /data1/logs/rocketmq/{namesrv,broker-a,broker-b-s} -p
mkdir /data1/storage/rocketmq/{broker-a,broker-b-s}/{conf,store} -p
chown -R 3000.3000 /data1/storage/rocketmq/{broker-a,broker-b-s}
chown -R 3000.3000 /data1/logs/rocketmq/{broker-a,broker-b-s}
mkdir /data1/storage/rocketmq-dashboard/data/
# 编辑密码文件
vim users.properties
admin=yourpassword
mkdir /data/servers/rocketmq -p
mkdir /data1/logs/rocketmq/{namesrv,broker-b,broker-a-s} -p
mkdir /data1/storage/rocketmq/{broker-b,broker-a-s}/{conf,store} -p
chown -R 3000.3000 /data1/storage/rocketmq/{broker-b,broker-a-s}
chown -R 3000.3000 /data1/logs/rocketmq/{broker-b,broker-a-s}
mkdir /data1/storage/rocketmq-dashboard/data/
# 编辑密码文件
vim users.properties
admin=yourpassword
|
创建 docker-compose.yml
nameserver, broker-a, broker-b-s, dashboard
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
|
services:
rocketmq-namesrv:
image: apache/rocketmq:4.9.7
container_name: namesrv
restart: always
environment:
TZ: Asia/Shanghai
ports:
- "9876:9876"
volumes:
- /data1/logs/rocketmq/namesrv:/home/rocketmq/logs/rocketmqlogs
command: "sh mqnamesrv"
rocketmq-broker-a:
image: apache/rocketmq:4.9.7
container_name: rmqbroker-a
restart: always
depends_on:
- rocketmq-namesrv
ports:
- 10909:10909
- 10911:10911
- 10912:10912
volumes:
- /data1/logs/rocketmq/broker-a:/home/rocketmq/logs/rocketmqlogs
- /data1/storage/rocketmq/broker-a/store:/data/rocketmq/store
- /data1/storage/rocketmq/broker-a/conf/broker.conf:/home/rocketmq/rocketmq-4.9.7/conf/broker.conf
command: "sh mqbroker -c /home/rocketmq/rocketmq-4.9.7/conf/broker.conf"
environment:
TZ: Asia/Shanghai
JAVA_OPT_EXT: "-server -Xms4096m -Xmx4096m"
rocketmq-broker-b-s:
image: apache/rocketmq:4.9.7
container_name: rmqbroker-b-s
restart: always
depends_on:
- rocketmq-namesrv
ports:
- 11909:11909
- 11911:11911
- 11912:11912
volumes:
- /data1/logs/rocketmq/broker-b-s:/home/rocketmq/logs/rocketmqlogs
- /data1/storage/rocketmq/broker-b-s/store:/data/rocketmq/store
- /data1/storage/rocketmq/broker-b-s/conf/broker.conf:/home/rocketmq/rocketmq-4.9.7/conf/broker.conf
command: "sh mqbroker -c /home/rocketmq/rocketmq-4.9.7/conf/broker.conf"
environment:
TZ: Asia/Shanghai
JAVA_OPT_EXT: "-server -Xms4096m -Xmx4096m"
rmqconsole:
image: apacherocketmq/rocketmq-dashboard:latest
ports:
- 8080:8080
volumes:
# 数据目录映射, user.properties需要放在data目录下
- /data1/storage/rocketmq-dashboard/data:/tmp/rocketmq-console/data
environment:
- TZ=Asia/Shanghai
- NAMESRV_ADDR=10.0.1.225:9876;10.0.1.226:9876
# 开启登录认证
- ROCKETMQ_CONFIG_LOGIN_REQUIRED=true
|
broker-a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
|
brokerClusterName = DefaultCluster
brokerName = broker-a
brokerId = 0
listenPort = 10911
namesrvAddr = 10.0.1.225:9876;10.0.1.226:9876
# 设置broker节点所在服务器的ip地址(这个非常重要,主从模式下,从节点会根据主节点的brokerIP2来同步数据
# 如果不配置,主从无法同步,brokerIP1设置为自己外网能访问的ip,服务器双网卡情况下必须配置,比如阿里云
# 主节点需要配置ip1和ip2,从节点只需要配置ip1即可)
brokerIP1=10.0.1.225
brokerIP2=10.0.1.225
defaultTopicQueueNums = 16
autoCreateTopicEnable = false
autoCreateSubscriptionGroup = false
deleteWhen = 04
fileReservedTime = 48
mapedFileSizeCommitLog = 1073741824
mapedFileSizeConsumeQueue = 50000000
destroyMapedFileIntervalForcibly = 120000
redeleteHangedFileInterval = 120000
diskMaxUsedSpaceRatio = 88
storePathRootDir = /data/rocketmq/store
storePathCommitLog = /data/rocketmq/store/commitlog
storePathConsumeQueue = /data/rocketmq/store/consumequeue
storePathIndex = /data/rocketmq/store/index
storeCheckpoint = /data/rocketmq/store/checkpoint
abortFile = /data/rocketmq/store/abort
maxMessageSize = 65536
flushCommitLogLeastPages = 4
flushConsumeQueueLeastPages = 2
flushCommitLogThoroughInterval = 10000
flushConsumeQueueThoroughInterval = 60000
# broker 角色, ASYNC_MASTER 异步复制的 Master 节点, SYNC_MASTER 同步复制的 Master 节点, 值设置 SLAVE 为从节点
brokerRole = ASYNC_MASTER
flushDiskType = ASYNC_FLUSH
# 消费时允许一次拉取的最大消息数
maxTransferCountOnMessageInMemory = 1000
# 是否开启堆外内存传输
transientStorePoolEnable = false
# 是否开启文件预热
warmMapedFileEnable = false
# 发消息线程池数量
#sendMessageThreadPoolNums=128
# 拉消息线程池数量
pullMessageThreadPoolNums = 128
# 是否开启允许从 Slave 节点读取消息
slaveReadEnable = true
# 消息消费时是否从堆内存读取
transferMsgByHeap = true
# 发送消息时在队列中等待时间,超过会抛出超时错误
waitTimeMillsInSendQueue = 1000
|
broker-b-s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
|
brokerClusterName = DefaultCluster
brokerName = broker-b
brokerId = 1
listenPort = 11911
namesrvAddr = 10.0.1.225:9876;10.0.1.226:9876
# 设置broker节点所在服务器的ip地址(这个非常重要,主从模式下,从节点会根据主节点的brokerIP2来同步数据
# 如果不配置,主从无法同步,brokerIP1设置为自己外网能访问的ip,服务器双网卡情况下必须配置,比如阿里云
# 主节点需要配置ip1和ip2,从节点只需要配置ip1即可)
brokerIP1=10.0.1.225
defaultTopicQueueNums = 16
autoCreateTopicEnable = false
autoCreateSubscriptionGroup = false
deleteWhen = 04
fileReservedTime = 48
mapedFileSizeCommitLog = 1073741824
mapedFileSizeConsumeQueue = 50000000
destroyMapedFileIntervalForcibly = 120000
redeleteHangedFileInterval = 120000
diskMaxUsedSpaceRatio = 88
storePathRootDir = /data/rocketmq/store
storePathCommitLog = /data/rocketmq/store/commitlog
storePathConsumeQueue = /data/rocketmq/store/consumequeue
storePathIndex = /data/rocketmq/store/index
storeCheckpoint = /data/rocketmq/store/checkpoint
abortFile = /data/rocketmq/store/abort
maxMessageSize = 65536
flushCommitLogLeastPages = 4
flushConsumeQueueLeastPages = 2
flushCommitLogThoroughInterval = 10000
flushConsumeQueueThoroughInterval = 60000
# broker 角色, ASYNC_MASTER 异步复制的 Master 节点, SYNC_MASTER 同步复制的 Master 节点, 值设置 SLAVE 为从节点
brokerRole = SLAVE
flushDiskType = ASYNC_FLUSH
# 消费时允许一次拉取的最大消息数
maxTransferCountOnMessageInMemory = 1000
# 是否开启堆外内存传输
transientStorePoolEnable = false
# 是否开启文件预热
warmMapedFileEnable = false
# 发消息线程池数量
#sendMessageThreadPoolNums=128
# 拉消息线程池数量
pullMessageThreadPoolNums = 128
# 是否开启允许从 Slave 节点读取消息
slaveReadEnable = true
# 消息消费时是否从堆内存读取
transferMsgByHeap = true
# 发送消息时在队列中等待时间,超过会抛出超时错误
waitTimeMillsInSendQueue = 1000
|
nameserver, broker-b, broker-a-s, dashboard
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
|
services:
rocketmq-namesrv:
image: apache/rocketmq:4.9.7
container_name: namesrv
restart: always
environment:
TZ: Asia/Shanghai
ports:
- "9876:9876"
volumes:
- /data1/logs/rocketmq/namesrv:/home/rocketmq/logs/rocketmqlogs
command: "sh mqnamesrv"
rocketmq-broker-b:
image: apache/rocketmq:4.9.7
container_name: rmqbroker-b
restart: always
depends_on:
- rocketmq-namesrv
ports:
- 10909:10909
- 10911:10911
- 10912:10912
volumes:
- /data1/logs/rocketmq/broker-b:/home/rocketmq/logs/rocketmqlogs
- /data1/storage/rocketmq/broker-b/store:/data/rocketmq/store
- /data1/storage/rocketmq/broker-b/conf/broker.conf:/home/rocketmq/rocketmq-4.9.7/conf/broker.conf
command: "sh mqbroker -c /home/rocketmq/rocketmq-4.9.7/conf/broker.conf"
environment:
TZ: Asia/Shanghai
JAVA_OPT_EXT: "-server -Xms4096m -Xmx4096m"
rocketmq-broker-a-s:
image: apache/rocketmq:4.9.7
container_name: rmqbroker-a-s
restart: always
depends_on:
- rocketmq-namesrv
ports:
- 11909:11909
- 11911:11911
- 11912:11912
volumes:
- /data1/logs/rocketmq/broker-a-s:/home/rocketmq/logs/rocketmqlogs
- /data1/storage/rocketmq/broker-a-s/store:/data/rocketmq/store
- /data1/storage/rocketmq/broker-a-s/conf/broker.conf:/home/rocketmq/rocketmq-4.9.7/conf/broker.conf
command: "sh mqbroker -c /home/rocketmq/rocketmq-4.9.7/conf/broker.conf"
environment:
TZ: Asia/Shanghai
JAVA_OPT_EXT: "-server -Xms4096m -Xmx4096m"
rmqconsole:
image: apacherocketmq/rocketmq-dashboard:latest
ports:
- 8080:8080
volumes:
# 数据目录映射, user.properties需要放在data目录下
- /data1/storage/rocketmq-dashboard/data:/tmp/rocketmq-console/data
environment:
- TZ=Asia/Shanghai
- NAMESRV_ADDR=10.0.1.225:9876;10.0.1.226:9876
# 开启登录认证
- ROCKETMQ_CONFIG_LOGIN_REQUIRED=true
|
broker-b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
|
brokerClusterName = DefaultCluster
brokerName = broker-b
brokerId = 0
listenPort = 10911
namesrvAddr = 10.0.1.225:9876;10.0.1.226:9876
# 设置broker节点所在服务器的ip地址(这个非常重要,主从模式下,从节点会根据主节点的brokerIP2来同步数据
# 如果不配置,主从无法同步,brokerIP1设置为自己外网能访问的ip,服务器双网卡情况下必须配置,比如阿里云
# 主节点需要配置ip1和ip2,从节点只需要配置ip1即可)
brokerIP1=10.0.1.226
brokerIP2=10.0.1.226
defaultTopicQueueNums = 16
autoCreateTopicEnable = false
autoCreateSubscriptionGroup = false
deleteWhen = 04
fileReservedTime = 48
mapedFileSizeCommitLog = 1073741824
mapedFileSizeConsumeQueue = 50000000
destroyMapedFileIntervalForcibly = 120000
redeleteHangedFileInterval = 120000
diskMaxUsedSpaceRatio = 88
storePathRootDir = /data/rocketmq/store
storePathCommitLog = /data/rocketmq/store/commitlog
storePathConsumeQueue = /data/rocketmq/store/consumequeue
storePathIndex = /data/rocketmq/store/index
storeCheckpoint = /data/rocketmq/store/checkpoint
abortFile = /data/rocketmq/store/abort
maxMessageSize = 65536
flushCommitLogLeastPages = 4
flushConsumeQueueLeastPages = 2
flushCommitLogThoroughInterval = 10000
flushConsumeQueueThoroughInterval = 60000
# broker 角色, ASYNC_MASTER 异步复制的 Master 节点, SYNC_MASTER 同步复制的 Master 节点, 值设置 SLAVE 为从节点
brokerRole = ASYNC_MASTER
flushDiskType = ASYNC_FLUSH
# 消费时允许一次拉取的最大消息数
maxTransferCountOnMessageInMemory = 1000
# 是否开启堆外内存传输
transientStorePoolEnable = false
# 是否开启文件预热
warmMapedFileEnable = false
# 发消息线程池数量
#sendMessageThreadPoolNums=128
# 拉消息线程池数量
pullMessageThreadPoolNums = 128
# 是否开启允许从 Slave 节点读取消息
slaveReadEnable = true
# 消息消费时是否从堆内存读取
transferMsgByHeap = true
# 发送消息时在队列中等待时间,超过会抛出超时错误
waitTimeMillsInSendQueue = 1000
|
broker-a-s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
|
brokerClusterName = DefaultCluster
brokerName = broker-a
brokerId = 1
listenPort = 11911
namesrvAddr = 10.0.1.225:9876;10.0.1.226:9876
# 设置broker节点所在服务器的ip地址(这个非常重要,主从模式下,从节点会根据主节点的brokerIP2来同步数据
# 如果不配置,主从无法同步,brokerIP1设置为自己外网能访问的ip,服务器双网卡情况下必须配置,比如阿里云
# 主节点需要配置ip1和ip2,从节点只需要配置ip1即可)
brokerIP1=10.0.1.226
defaultTopicQueueNums = 16
autoCreateTopicEnable = false
autoCreateSubscriptionGroup = false
deleteWhen = 04
fileReservedTime = 48
mapedFileSizeCommitLog = 1073741824
mapedFileSizeConsumeQueue = 50000000
destroyMapedFileIntervalForcibly = 120000
redeleteHangedFileInterval = 120000
diskMaxUsedSpaceRatio = 88
storePathRootDir = /data/rocketmq/store
storePathCommitLog = /data/rocketmq/store/commitlog
storePathConsumeQueue = /data/rocketmq/store/consumequeue
storePathIndex = /data/rocketmq/store/index
storeCheckpoint = /data/rocketmq/store/checkpoint
abortFile = /data/rocketmq/store/abort
maxMessageSize = 65536
flushCommitLogLeastPages = 4
flushConsumeQueueLeastPages = 2
flushCommitLogThoroughInterval = 10000
flushConsumeQueueThoroughInterval = 60000
# broker 角色, ASYNC_MASTER 异步复制的 Master 节点, SYNC_MASTER 同步复制的 Master 节点, 值设置 SLAVE 为从节点
brokerRole = SLAVE
flushDiskType = ASYNC_FLUSH
# 消费时允许一次拉取的最大消息数
maxTransferCountOnMessageInMemory = 1000
# 是否开启堆外内存传输
transientStorePoolEnable = false
# 是否开启文件预热
warmMapedFileEnable = false
# 发消息线程池数量
#sendMessageThreadPoolNums=128
# 拉消息线程池数量
pullMessageThreadPoolNums = 128
# 是否开启允许从 Slave 节点读取消息
slaveReadEnable = true
# 消息消费时是否从堆内存读取
transferMsgByHeap = true
# 发送消息时在队列中等待时间,超过会抛出超时错误
waitTimeMillsInSendQueue = 1000
|
redis
redis单节点
创建目录
mkdir /data/apps/{data,logs} -p
创建配置文件
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
|
version: '3.3'
services:
redis:
image: redis:latest
container_name: redis
restart: always
ports:
- '6379:6379'
volumes:
- ./data:/data
- ./redis.conf:/usr/local/etc/redis/redis.conf
- ./logs:/logs
command: redis-server /usr/local/etc/redis/redis.conf
|
redis.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
|
# Redis 服务器的端口号(默认:6379)
port 6379
# 绑定的 IP 地址,如果设置为 127.0.0.1,则只能本地访问;若设置为 0.0.0.0,则监听所有接口(默认:127.0.0.1)
bind 0.0.0.0
# 设置密码,客户端连接时需要提供密码才能进行操作,如果不设置密码,可以注释掉此行(默认:无)
# requirepass foobared
# 设置在客户端闲置一段时间后关闭连接,单位为秒(默认:0,表示禁用)
# timeout 0
# 是否以守护进程(daemon)模式运行,默认为 "no",设置为 "yes" 后 Redis 会在后台运行
daemonize no
# 设置日志级别(默认:notice)。可以是 debug、verbose、notice、warning
loglevel notice
# 设置日志文件的路径(默认:空字符串),如果不设置,日志会输出到标准输出
logfile "/logs/redis.log"
# 设置数据库数量(默认:16),Redis 使用数据库索引从 0 到 15
databases 16
# 是否启用 AOF 持久化,默认为 "no"。如果设置为 "yes",将在每个写操作执行时将其追加到文件中
appendonly no
# 设置 AOF 持久化的文件路径(默认:appendonly.aof)
# appendfilename "appendonly.aof"
# AOF 持久化模式,默认为 "always"。可以是 always、everysec 或 no
# always:每个写操作都立即同步到磁盘
# everysec:每秒钟同步一次到磁盘
# no:完全依赖操作系统的行为,可能会丢失数据,但性能最高
# appendfsync always
# 设置是否在后台进行 AOF 文件重写,默认为 "no"
# auto-aof-rewrite-on-rewrite no
# 设置 AOF 文件重写触发时,原 AOF 文件大小与新 AOF 文件大小之间的比率(默认:100)
# auto-aof-rewrite-percentage 100
# 设置是否开启 RDB 持久化,默认为 "yes"。如果设置为 "no",禁用 RDB 持久化功能
save 900 1
save 300 10
save 60 10000
# 设置 RDB 持久化文件的名称(默认:dump.rdb)
# dbfilename dump.rdb
# 设置 RDB 持久化文件的保存路径,默认保存在当前目录
# dir ./
# 设置是否开启对主从同步的支持,默认为 "no"
# slaveof <masterip> <masterport>
# 设置主从同步时是否进行数据完整性校验,默认为 "yes"
# repl-diskless-sync no
# 设置在复制时是否进行异步复制,默认为 "yes",可以加快复制速度,但会增加数据丢失的风险
# repl-backlog-size 1mb
# 设置是否开启集群模式(cluster mode),默认为 "no"
# cluster-enabled no
# 设置集群中的节点超时时间(默认:15000毫秒)
# cluster-node-timeout 15000
# 设置集群中节点间通信使用的端口号(默认:0)
# cluster-announce-port 0
# 设置集群中节点间通信使用的 IP 地址
# cluster-announce-ip 127.0.0.1
# 设置是否开启慢查询日志,默认为 "no"
# slowlog-log-slower-than 10000
# 设置慢查询日志的最大长度,默认为 128
# slowlog-max-len 128
# 设置每秒最大处理的写入命令数量,用于保护 Redis 服务器不被超负荷写入(默认:0,表示不限制)
# maxclients 10000
# 设置最大连接客户端数量(默认:10000,0 表示不限制)
# maxmemory <bytes>
# 设置最大使用内存的策略(默认:noeviction)。可以是 volatile-lru、allkeys-lru、volatile-random、allkeys-random、volatile-ttl 或 noeviction
# maxmemory-policy noeviction
# 设置允许最大使用内存的比例(默认:0),设置为 0 表示禁用
# maxmemory-samples 5
|
mysql
mysql单节点
创建目录和文件
1
2
3
4
5
|
mkdir /data/db/{mysql8.0,logs} -p
mkdir /etc/mysql
touch /data/db/logs/mysql.error.log
touch /data/db/logs/slowquery.log
|
创建配置文件
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
version: '3.5'
services:
mysql8.0:
network_mode: "host"
container_name: "mysql8.0"
environment:
TZ: "Asia/Shanghai"
MYSQL_ROOT_PASSWORD: "root"
image: mysql:8.0
restart: always
privileged: true
volumes:
- /etc/localtime:/etc/localtime
- /data/db/mysql8.0:/data/db/mysql8.0
- /etc/mysql/mysql8.0.cnf:/etc/mysql/my.cnf
- /data/logs/mysql8.0:/data/logs/mysql8.0
|
/etc/mysql/my.cnf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
|
[client]
port=3306
socket =/var/run/mysqld/mysql8.0.sock
[mysqld_safe]
pid-file =/var/run/mysqld/mysql8.0.pid
socket =/var/run/mysqld/mysql8.0.sock
nice = 0
# The MySQL server
[mysqld]
default_authentication_plugin=mysql_native_password
default-time_zone = '+8:00'
server-id=60
#read_only = 1
master-info-repository = file
relay-log-info_repository = file
binlog-format = ROW
gtid-mode = on
enforce-gtid-consistency = true
log_slave_updates = 1
#sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES
sql_mode=''
#replicate-wild-ignore-table = mysql.% #主从复制的时候忽略
#replicate-wild-ignore-table = test.% #主从复制的时候忽略
log_bin_trust_function_creators = 1 ### 同步函数
#innodb_force_recovery = 1
replicate-ignore-db = mysql
replicate-ignore-db = test
replicate-ignore-db = information_schema
replicate-ignore-db = performance_schema
user = mysql
pid-file =/var/run/mysqld/mysql8.0.pid
socket =/var/run/mysqld/mysql8.0.sock
port=3306
basedir = /usr
secure_file_priv = /data/db/mysql8.0
datadir = /data/db/mysql8.0
tmpdir = /tmp
explicit_defaults_for_timestamp
log-error =/data/logs/mysql8.0/mysql.error.log
slow_query_log = on
long_query_time = 10
log_queries_not_using_indexes=1
slow_query_log_file =/data/logs/mysql8.0/slowquery.log
log-bin-index = mysql-bin.index
relay_log = relay-log
relay_log_index = relay-log.index
transaction-isolation = REPEATABLE-READ
log-bin=mysql-bin
expire_logs_days = 2
max_binlog_size = 100M
innodb_checksum_algorithm=innodb
#innodb_log_checksum_algorithm=innodb
innodb_data_file_path=ibdata1:200M:autoextend
innodb_log_files_in_group=2
innodb_log_file_size=1572864000
#innodb_fast_checksum=false
#innodb_page_size=16384
#innodb_log_block_size=512
innodb_undo_directory=.
innodb_undo_tablespaces=0
key_buffer_size=32M #myisam索引缓冲,对myisam很重要,缓存myisam表索引数据
max_allowed_packet=8M #限制Server接受的数据包大小
myisam_sort_buffer_size = 256M
myisam_max_sort_file_size = 5G
character-set-server=utf8 # 字符编码
max_connect_errors = 10000000 # 允许最大错误连接数
max_connections = 10000 # 最大连接数
lower_case_table_names = 1 # 大小写敏感设置
innodb_write_io_threads = 2
innodb_read_io_threads = 2
innodb_buffer_pool_size = 512M
innodb_flush_method = O_DIRECT
innodb_flush_log_at_trx_commit = 2
sync_binlog = 1000
binlog_rows_query_log_events =1
##binlog_row_image='minimal'
skip-name-resolve
#skip-grant-tables
|
nginx
创建目录
1
2
|
mkdir /data/server/nginx/conf/{sites,ssl} -p
mkdir /data/logs/nginx
|
创建配置文件
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
version: '3.5'
services:
nginx:
restart: always
network_mode: "host"
image: nginx:1.16
container_name: nginx
environment:
- NAMESPACE=dev
volumes:
- /data/server/nginx/conf/nginx.conf:/etc/nginx/nginx.conf
- /data/server/nginx/conf/sites:/etc/nginx/sites
- /data/server/nginx/conf/ssl:/etc/nginx/ssl
- /data/logs/nginx:/data/logs/nginx
- /data/server:/data/server
|
/data/server/nginx/conf/nginx.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
|
worker_processes 4;
worker_rlimit_nofile 102400;
error_log /data/logs/nginx/error.log;
events {
use epoll;
worker_connections 102400;
}
http {
log_format access ' [$time_local] | $host | $remote_addr | $request | $request_time | $body_bytes_sent | $status |'
'| $upstream_addr | $upstream_response_time | $upstream_status |'
' "$http_referer" | "$http_user_agent" ';
log_format post_log escape=json '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" '
'"$http_x_forwarded_for" "$request_body"';
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
#access_log /data/logs/nginx/access.log access;
access_log off;
underscores_in_headers on;
tcp_nopush on;
tcp_nodelay on;
types_hash_max_size 2048;
client_max_body_size 20M;
client_body_buffer_size 128k;
include sites/*.conf;
}
stream{
resolver_timeout 5s;
log_format log_json '{"access_time":"$time_local",'
'"remote_addr": "$remote_addr", '
'"protocol":"$protocol",'
'"status":$status,'
'"bytes_sent":"$bytes_sent",'
'"bytes_received":"$bytes_received",'
'"session_time":"$session_time",'
'"upstream_addr":"$upstream_addr",'
'"upstream_bytes_sent":"$upstream_bytes_sent",'
'"upstream_bytes_received":"$upstream_bytes_received",'
'"upstream_connect_time":"$upstream_connect_time"}';
open_log_file_cache off;
access_log /data/logs/nginx/access.log log_json;
include /data/server/nginx/conf/stream/*.conf;
}
|
node_exporter
创建目录
mkdir /data/apps/etc/node_exporter/prom -p
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
|
version: '2'
services:
cadvisor:
image: cadvisor:latest
container_name: cadvisor
restart: unless-stopped
ports:
- "58080:8080"
volumes:
- /:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /data/docker:/var/lib/docker:ro
node-exporter:
image: node-exporter:latest
container_name: node-exporter
restart: unless-stopped
ports:
- "59100:9100"
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- '--path.rootfs=/host/root'
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc|run)($$|/)'
- '--collector.textfile.directory=/node_exporter/prom'
volumes:
- /proc:/host/proc
- /sys:/host/sys
- /:/host/root
- ./etc/node_exporter/prom:/node_exporter/prom
|
elasticsearch
elasticsearch单节点
创建目录
mkdir /data/apps/{config,data,logs,plugins} -p
创建 docker-composer.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
|
name: 'elasticsearch'
services:
elasticsearch:
deploy:
resources:
limits:
memory: 6144M
restart: always
image: elasticsearch:8.13.4
network_mode: "host"
container_name: es-single
ulimits:
nproc: 65535
nofile:
soft: 65535
hard: 65535
memlock:
soft: -1
hard: -1
environment:
- TZ=Asia/Shanghai
- ES_JAVA_OPTS=-Xms4096m -Xmx4096m
volumes:
- ./data:/usr/share/elasticsearch/data
- ./plugins:/usr/share/elasticsearch/plugins
- ./logs:/usr/share/elasticsearch/logs
- ./config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
|
创建配置文件 elasticsearch.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
# 基本配置
cluster.name: es-cluster
node.name: skyes01
discovery.type: single-node
network.host: 0.0.0.0
http.port: 9200
# 禁用安全特性
xpack.security.enabled: true
xpack.security.enrollment.enabled: false
xpack.security.http.ssl.enabled: false
xpack.security.transport.ssl.enabled: false
xpack.ml.enabled: false
xpack.monitoring.collection.enabled: false
xpack.watcher.enabled: false
# 禁用 geoip
ingest.geoip.downloader.enabled: false
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type
|
启动 docker compose up -d
改密码,进入到容器内执行
1
|
.bin/elasticsearch-reset-password --auto -u elastic
|
elasticsearch集群
网上的教程基本对新版本都不能用,总结了三点,这3个参数设置对了就没问题。
-
network.publish_host=宿主机ip
-
xpack.security.enabled=false
-
xpack.security.http.ssl.enabled=false
给目录授权,不然启动会报错。
chown -R 1000:1000 /data/servers/elasticsearch/{data,certs}
等 es01 启动后,scp -r /data/servers/elasticsearch/certs 到其他机器。
es01
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
|
services:
setup:
image: elasticsearch:${STACK_VERSION}
volumes:
- ./certs:/usr/share/elasticsearch/config/certs
user: "0"
command: >
bash -c '
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
if [ ! -f config/certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f config/certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: es01\n"\
" dns:\n"\
" - es01\n"\
" ip:\n"\
" - 192.168.2.235\n"\
" - name: es02\n"\
" dns:\n"\
" - es02\n"\
" ip:\n"\
" - 192.168.2.236\n"\
" - name: es03\n"\
" dns:\n"\
" - es03\n"\
" ip:\n"\
" - 192.168.2.237\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";
'
healthcheck:
test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
interval: 1s
timeout: 5s
retries: 120
es01:
depends_on:
setup:
condition: service_healthy
image: elasticsearch:${STACK_VERSION}
extra_hosts:
- "es01:192.168.2.235"
- "es02:192.168.2.236"
- "es03:192.168.2.237"
volumes:
- ./certs:/usr/share/elasticsearch/config/certs
- ./data:/usr/share/elasticsearch/data
ports:
- ${ES_PORT}:9200
- 9300:9300
environment:
- node.name=es01
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- network.publish_host=192.168.2.235
- discovery.seed_hosts=es02,es03
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enrollment.enabled=true
- xpack.security.enabled=false
- xpack.security.http.ssl.enabled=false
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es01/es01.key
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
|
.env 文件。
1
2
3
4
5
6
7
8
|
CLUSTER_NAME=docker-cluster
ES_PORT=9200
KIBANA_PORT=5601
ELASTIC_PASSWORD=xxxx
KIBANA_PASSWORD=xxxx
STACK_VERSION=8.15.2
LICENSE=basic
MEM_LIMIT=6442450944
|
kibana
docker-compose.yml
1
2
3
4
5
6
7
8
9
|
services:
kibana:
network_mode: "host"
image: kibana:8.15.2
container_name: kibana
environment:
- TZ=Asia/Shanghai
volumes:
- ./config:/usr/share/kibana/config
|
kibana.yml
1
2
3
4
|
server.name: kibana
server.port: 5601
server.host: "0.0.0.0"
i18n.locale: "zh-CN"
|
使用 kibana_system 用户登录,密码就是我们上面指定的环境变量的值。
es02
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
|
services:
es02:
image: elasticsearch:${STACK_VERSION}
extra_hosts:
- "es01:192.168.2.235"
- "es02:192.168.2.236"
- "es03:192.168.2.237"
volumes:
- ./certs:/usr/share/elasticsearch/config/certs
- ./data:/usr/share/elasticsearch/data
ports:
- ${ES_PORT}:9200
- 9300:9300
environment:
- node.name=es02
- network.publish_host=192.168.2.236
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- discovery.seed_hosts=es01,es03
- bootstrap.memory_lock=true
- xpack.security.enrollment.enabled=true
- xpack.security.enabled=false
- xpack.security.http.ssl.enabled=false
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es02/es02.key
- xpack.security.transport.ssl.certificate=certs/es02/es02.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
|
es03
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
|
services:
es03:
image: elasticsearch:${STACK_VERSION}
extra_hosts:
- "es01:192.168.2.235"
- "es02:192.168.2.236"
- "es03:192.168.2.237"
volumes:
- ./certs:/usr/share/elasticsearch/config/certs
- ./data:/usr/share/elasticsearch/data
ports:
- ${ES_PORT}:9200
- 9300:9300
environment:
- node.name=es03
- network.publish_host=192.168.2.237
- cluster.name=${CLUSTER_NAME}
- cluster.initial_master_nodes=es01,es02,es03
- discovery.seed_hosts=es01,es03
- bootstrap.memory_lock=true
- xpack.security.enrollment.enabled=true
- xpack.security.enabled=false
- xpack.security.http.ssl.enabled=false
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es02/es02.key
- xpack.security.transport.ssl.certificate=certs/es02/es02.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
|
logstash
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
|
name: 'logstash'
services:
logstash:
deploy:
resources:
limits:
memory: 6144M
restart: always
image: 192.168.2.221:80/devops/logstash:7.17.24
network_mode: "host"
container_name: logstash
ulimits:
nproc: 65535
nofile:
soft: 65535
hard: 65535
memlock:
soft: -1
hard: -1
environment:
- TZ=Asia/Shanghai
- ES_JAVA_OPTS=-Xms4096m -Xmx4096m
volumes:
- ./logstash.conf:/usr/share/logstash/pipeline/logstash.conf
|
logstash.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
|
input {
kafka {
bootstrap_servers => "192.168.2.239:9092"
topics_pattern => ["your-logs"]
group_id => "your-logstash-group"
session_timeout_ms => 10000
max_poll_records => 10
decorate_events => true
codec => json
}
}
filter {
}
output {
elasticsearch {
hosts => ["192.168.2.239:9200"]
index => "logs-%{+YYYY.MM.dd}"
user =>"elastic"
password =>"yourpasswd"
}
stdout { codec => rubydebug }
}
|
prometheus
创建目录用于存放数据
mkdir /data1/storage/prometheus -p
/data/servers/prometheus 为 prometheus 的主目录所在,在容器内部会挂载该路径下的 prometheus.yml 。
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
version: '3'
services:
prometheus:
image: prom/prometheus:v2.53.1
container_name: prometheus
restart: unless-stopped
ports:
- "9090:9090"
command:
- "--web.enable-lifecycle"
- "--config.file=/etc/prometheus/prometheus.yml"
volumes:
- /data1/storage/prometheus/:/prometheus
- /data/servers/prometheus:/etc/prometheus
|
grafana
创建目录用于存放数据
mkdir /data1/storage/grafana/data -p
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
|
services:
grafana:
user: '0'
image: grafana/grafana:11.1.2-ubuntu
container_name: grafana
restart: unless-stopped
ports:
- "3000:3000"
volumes:
- /data1/storage/grafana/data:/var/lib/grafana
|
consul
consul 单节点
创建目录用于存放数据
mkdir /data1/storage/consul/data -p
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
version: '3'
services:
consul:
image: consul:1.15.4
container_name: consul
restart: unless-stopped
ports:
- "8300:8300"
- "8301:8301"
- "8500:8500"
- "8600:8600"
volumes:
- /data1/storage/consul/data:/consul/data
command:
- "agent"
- "-server"
- "-ui"
- "-node=n1"
- "-bootstrap-expect=1"
- "-client=0.0.0.0"
- "-advertise=192.168.2.201"
|
jumpserver
跳板机容器化部署,前提需要部署 mysql 和 redis,redis 要求 7.x 以上版本,参考上面的其他组件安装,这里不单独放出。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
services:
jumpserver:
image: jumpserver/jms_all:v4.0.2
container_name: jumpserver
volumes:
- /data1/storage/jumpserver/core:/opt/jumpserver/data
- /data1/storage/jumpserver/koko:/opt/koko/data
- /data1/storage/jumpserver/lion:/opt/lion/data
- /data1/storage/jumpserver/chen:/opt/chen/data
- /data1/storage/jumpserver/logs:/var/log/nginx
- /data1/storage/jumpserver/download:/opt/download
ports:
- 8081:80
- 2222:2222
#- 30000-30100:30000-30100
env_file:
- .env
restart: on-failure
|
.env 文件配置如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
SECRET_KEY=7BKXPVB1BE8fx2a5ygoRPPzCj5pI2m8utG3axWghBg70Ux0kzz
BOOTSTRAP_TOKEN=pmqVUafj29xaxH8EipZ4Vhji
LOG_LEVEL=ERROR
DB_ENGINE=mysql
DB_HOST=xxxx
DB_PORT=3306
DB_USER=jumpserver
DB_PASSWORD=xxxx
DB_NAME=jumpserver
REDIS_HOST=xxxx
REDIS_PORT=6379
REDIS_PASSWORD=xxxx
|
yearning部署
yearning部署总体来说挺简单的,就2步,初始化数据库和容器部署。
docker run --rm -it -p8000:8000 -e SECRET_KEY=z5gXGjhusHVxxSXb -e MYSQL_USER=yearning -e MYSQL_ADDR=xxxx -e MYSQL_PASSWORD=xxxx -e MYSQL_DB=yearning -e Y_LANG=zh_CN yeelabs/yearning /opt/Yearning install
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
services:
yearning:
image: yeelabs/yearning
container_name: yearning
restart: always
ports:
- "8000:8000"
environment:
SECRET_KEY: z5gXGjx5xaefxSXb
MYSQL_USER: yearning
MYSQL_ADDR: xxxx
MYSQL_PASSWORD: xxxx
MYSQL_DB: yearning
Y_LANG: zh_CN
|
nightingale
Prometheus 告警规则下发平台,功能挺强大的,有点像很久之前360的哆啦A梦。挺好用的。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
|
networks:
nightingale:
driver: bridge
services:
mysql:
image: "192.168.2.221:80/devops/mysql:8"
container_name: mysql
hostname: mysql
restart: always
environment:
TZ: Asia/Shanghai
MYSQL_ROOT_PASSWORD: xxxx
volumes:
- ./mysqldata:/var/lib/mysql/
#- ../initsql:/docker-entrypoint-initdb.d/
- ./etc-mysql/my.cnf:/etc/my.cnf
networks:
- nightingale
ports:
- "3306:3306"
redis:
image: "192.168.2.221:80/devops/redis:6.2"
container_name: redis
hostname: redis
restart: always
environment:
TZ: Asia/Shanghai
networks:
- nightingale
ports:
- "6379:6379"
nightingale:
image: "192.168.2.221:80/devops/nightingale:7.4.1"
container_name: nightingale
hostname: nightingale
restart: always
environment:
GIN_MODE: release
TZ: Asia/Shanghai
WAIT_HOSTS: mysql:3306, redis:6379
volumes:
- ./etc-nightingale:/app/etc
networks:
- nightingale
ports:
- "17000:17000"
- "20090:20090"
depends_on:
- mysql
- redis
#- victoriametrics
command: >
sh -c "/app/n9e"
|
kafka集群
先创建 data 数据目录。
注意 KAFKA_CFG_NODE_ID 要和 KAFKA_BROKER_ID 一致。
node01
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
|
services:
kafka:
image: '192.168.2.221:80/devops/kafka:3.7'
container_name: kafka
ports:
- "9092:9092"
- "9093:9093"
privileged: true #不添加的话会启动报错(Failed to start thread "GC Thread#0" - pthread_create failed (EPERM) )
environment:
#允许使用kraft,即Kafka替代Zookeeper
- KAFKA_ENABLE_KRAFT=yes
- KAFKA_CFG_NODE_ID=1
#kafka角色,做broker,也要做controller
- KAFKA_CFG_PROCESS_ROLES=controller,broker
#定义kafka服务端socket监听端口(Docker内部的ip地址和端口)
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093
# 定义外网访问地址,必须填写宿主机ip地址和端口,ip不能是0.0.0.0
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://192.168.2.238:9092
#定义安全协议
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
#集群地址
- [email protected]:9093,[email protected]:9093,[email protected]:9093
#指定供外部使用的控制类请求信息
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
#设置broker最大内存,和初始内存
- KAFKA_HEAP_OPTS=-Xmx512M -Xms256M
#使用Kafka时的集群id,集群内的Kafka都要用这个id做初始化,生成一个UUID即可(22byte)
- KAFKA_KRAFT_CLUSTER_ID=xYcCyHmJlIaLzLoBzVwIcP
#允许使用PLAINTEXT监听器,默认false,不建议在生产环境使用
#- ALLOW_PLAINTEXT_LISTENER=yes
# 允许自动创建主题
- KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true
#broker.id,必须唯一,且与KAFKA_CFG_NODE_ID一致
- KAFKA_BROKER_ID=1
volumes:
- ./data:/bitnami/kafka:rw
|
node02
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
|
services:
kafka:
image: '192.168.2.221:80/devops/kafka:3.7'
container_name: kafka
ports:
- "9092:9092"
- "9093:9093"
privileged: true #不添加的话会启动报错(Failed to start thread "GC Thread#0" - pthread_create failed (EPERM) )
environment:
#允许使用kraft,即Kafka替代Zookeeper
- KAFKA_ENABLE_KRAFT=yes
- KAFKA_CFG_NODE_ID=2
#kafka角色,做broker,也要做controller
- KAFKA_CFG_PROCESS_ROLES=controller,broker
#定义kafka服务端socket监听端口(Docker内部的ip地址和端口)
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093
# 定义外网访问地址,必须填写宿主机ip地址和端口,ip不能是0.0.0.0
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://192.168.2.239:9092
#定义安全协议
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
#集群地址
- [email protected]:9093,[email protected]:9093,[email protected]:9093
#指定供外部使用的控制类请求信息
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
#设置broker最大内存,和初始内存
- KAFKA_HEAP_OPTS=-Xmx512M -Xms256M
#使用Kafka时的集群id,集群内的Kafka都要用这个id做初始化,生成一个UUID即可(22byte)
- KAFKA_KRAFT_CLUSTER_ID=xYcCyHmJlIaLzLoBzVwIcP
#允许使用PLAINTEXT监听器,默认false,不建议在生产环境使用
#- ALLOW_PLAINTEXT_LISTENER=yes
# 允许自动创建主题
- KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true
#broker.id,必须唯一,且与KAFKA_CFG_NODE_ID一致
- KAFKA_BROKER_ID=2
volumes:
- ./data:/bitnami/kafka:rw
|
node03
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
|
services:
kafka:
image: '192.168.2.221:80/devops/kafka:3.7'
container_name: kafka
ports:
- "9092:9092"
- "9093:9093"
privileged: true #不添加的话会启动报错(Failed to start thread "GC Thread#0" - pthread_create failed (EPERM) )
environment:
#允许使用kraft,即Kafka替代Zookeeper
- KAFKA_ENABLE_KRAFT=yes
- KAFKA_CFG_NODE_ID=3
#kafka角色,做broker,也要做controller
- KAFKA_CFG_PROCESS_ROLES=controller,broker
#定义kafka服务端socket监听端口(Docker内部的ip地址和端口)
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093
# 定义外网访问地址,必须填写宿主机ip地址和端口,ip不能是0.0.0.0
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://192.168.2.240:9092
#定义安全协议
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
#集群地址
- [email protected]:9093,[email protected]:9093,[email protected]:9093
#指定供外部使用的控制类请求信息
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
#设置broker最大内存,和初始内存
- KAFKA_HEAP_OPTS=-Xmx512M -Xms256M
#使用Kafka时的集群id,集群内的Kafka都要用这个id做初始化,生成一个UUID即可(22byte)
- KAFKA_KRAFT_CLUSTER_ID=xYcCyHmJlIaLzLoBzVwIcP
#允许使用PLAINTEXT监听器,默认false,不建议在生产环境使用
#- ALLOW_PLAINTEXT_LISTENER=yes
# 不允许自动创建主题
- KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true
#broker.id,必须唯一,且与KAFKA_CFG_NODE_ID一致
- KAFKA_BROKER_ID=3
volumes:
- ./data:/bitnami/kafka:rw
|