目录

docker应用-容器化部署中间件

Docker composer 容器化部署中间件.

动机

之前的测试环境用的二进制部署,不好维护,借此机会将其改为容器化部署,目前先用单节点部署。后续如有其他部署方式,也会在此继续更新。

rocketmq

rocketmq单节点

创建目录

1
2
3
4
mkdir /data/logs/rocketmq/{namesrv,broker-a,console}
mkdir /data/rocketmq/broker-a
chown -R 3000.3000 /data/logs/rocketmq/{namesrv,broker-a}
chown -R 3000.3000 /data/rocketmq/broker-a

创建配置文件 /data/rocketmq/broker-a/conf/broker.conf

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
brokerClusterName = DefaultCluster
brokerName = broker-a
brokerId = 0
deleteWhen = 04
fileReservedTime = 48
brokerRole = ASYNC_MASTER
flushDiskType = ASYNC_FLUSH
storePathRootDir = /home/rocketmq/store
storePathCommitLog = /home/rocketmq/store/commitlog
storePathConsumerQueue = /home/rocketmq/store/consumequeue
autoCreateTopicEnable = true
autoCreateSubscriptionGroup = true
slaveReadEnable = true

创建 docker-compose.yml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
version: '3.5'
services:
  rocketmq-namesrv:
    image: apache/rocketmq:4.9.3
    container_name: namesrv
    restart: always
    ports:
      - "9876:9876"
    volumes:
      - /data/logs/rocketmq/namesrv:/home/rocketmq/logs/rocketmqlogs
    command: "sh mqnamesrv"
    networks:
      - rocketmq
  rocketmq-broker:
    image: apache/rocketmq:4.9.3
    container_name: rmqbroker
    restart: always
    depends_on:
      - rocketmq-namesrv
    ports:
      - 10909:10909
      - 10911:10911
    volumes:
      - /data/logs/rocketmq/broker-a:/home/rocketmq/logs/rocketmqlogs
      - /data/rocketmq/broker-a/store:/home/rocketmq/store
      - /data/rocketmq/broker-a/conf/broker.conf:/home/rocketmq/rocketmq-4.9.3/conf/broker.conf
    command: "sh mqbroker -c /home/rocketmq/rocketmq-4.9.3/conf/broker.conf"
    environment:
      NAMESRV_ADDR: "rocketmq-namesrv:9876"
    networks:
      - rocketmq
  rmqconsole:
    image: styletang/rocketmq-console-ng
    container_name: rocketmq-console
    restart: always
    ports:
      - 8080:8080
    depends_on:
      - rocketmq-namesrv
    # 时区和日志问题
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - /data/logs/rocketmq/console:/root/logs/consolelogs
    environment:
      JAVA_OPTS: "-Drocketmq.namesrv.addr=rocketmq-namesrv:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false"
    networks:
      - rocketmq
networks:
  rocketmq:
    driver: bridge               

启动

docker compose up -d

redis

redis单节点

创建目录

mkdir /data/apps/{data,logs} -p

创建配置文件

docker-compose.yml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
version: '3.3'
services:
  redis:
    image: redis:latest
    container_name: redis
    restart: always
    ports:
      - '6379:6379'
    volumes:
      - ./data:/data
      - ./redis.conf:/usr/local/etc/redis/redis.conf
      - ./logs:/logs
    command: redis-server /usr/local/etc/redis/redis.conf

redis.conf

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# Redis 服务器的端口号(默认:6379)
port 6379

# 绑定的 IP 地址,如果设置为 127.0.0.1,则只能本地访问;若设置为 0.0.0.0,则监听所有接口(默认:127.0.0.1)
bind 0.0.0.0

# 设置密码,客户端连接时需要提供密码才能进行操作,如果不设置密码,可以注释掉此行(默认:无)
# requirepass foobared

# 设置在客户端闲置一段时间后关闭连接,单位为秒(默认:0,表示禁用)
# timeout 0

# 是否以守护进程(daemon)模式运行,默认为 "no",设置为 "yes" 后 Redis 会在后台运行
daemonize no

# 设置日志级别(默认:notice)。可以是 debug、verbose、notice、warning
loglevel notice

# 设置日志文件的路径(默认:空字符串),如果不设置,日志会输出到标准输出
logfile "/logs/redis.log"

# 设置数据库数量(默认:16),Redis 使用数据库索引从 0 到 15
databases 16

# 是否启用 AOF 持久化,默认为 "no"。如果设置为 "yes",将在每个写操作执行时将其追加到文件中
appendonly no

# 设置 AOF 持久化的文件路径(默认:appendonly.aof)
# appendfilename "appendonly.aof"

# AOF 持久化模式,默认为 "always"。可以是 always、everysec 或 no
# always:每个写操作都立即同步到磁盘
# everysec:每秒钟同步一次到磁盘
# no:完全依赖操作系统的行为,可能会丢失数据,但性能最高
# appendfsync always

# 设置是否在后台进行 AOF 文件重写,默认为 "no"
# auto-aof-rewrite-on-rewrite no

# 设置 AOF 文件重写触发时,原 AOF 文件大小与新 AOF 文件大小之间的比率(默认:100)
# auto-aof-rewrite-percentage 100

# 设置是否开启 RDB 持久化,默认为 "yes"。如果设置为 "no",禁用 RDB 持久化功能
save 900 1
save 300 10
save 60 10000

# 设置 RDB 持久化文件的名称(默认:dump.rdb)
# dbfilename dump.rdb

# 设置 RDB 持久化文件的保存路径,默认保存在当前目录
# dir ./

# 设置是否开启对主从同步的支持,默认为 "no"
# slaveof <masterip> <masterport>

# 设置主从同步时是否进行数据完整性校验,默认为 "yes"
# repl-diskless-sync no

# 设置在复制时是否进行异步复制,默认为 "yes",可以加快复制速度,但会增加数据丢失的风险
# repl-backlog-size 1mb

# 设置是否开启集群模式(cluster mode),默认为 "no"
# cluster-enabled no

# 设置集群中的节点超时时间(默认:15000毫秒)
# cluster-node-timeout 15000

# 设置集群中节点间通信使用的端口号(默认:0)
# cluster-announce-port 0

# 设置集群中节点间通信使用的 IP 地址
# cluster-announce-ip 127.0.0.1

# 设置是否开启慢查询日志,默认为 "no"
# slowlog-log-slower-than 10000

# 设置慢查询日志的最大长度,默认为 128
# slowlog-max-len 128

# 设置每秒最大处理的写入命令数量,用于保护 Redis 服务器不被超负荷写入(默认:0,表示不限制)
# maxclients 10000

# 设置最大连接客户端数量(默认:10000,0 表示不限制)
# maxmemory <bytes>

# 设置最大使用内存的策略(默认:noeviction)。可以是 volatile-lru、allkeys-lru、volatile-random、allkeys-random、volatile-ttl 或 noeviction
# maxmemory-policy noeviction

# 设置允许最大使用内存的比例(默认:0),设置为 0 表示禁用
# maxmemory-samples 5

mysql

mysql单节点

创建目录和文件

1
2
3
4
5
mkdir /data/db/{mysql8.0,logs} -p
mkdir /etc/mysql

touch /data/db/logs/mysql.error.log
touch /data/db/logs/slowquery.log

创建配置文件

docker-compose.yml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
version: '3.5'
services:
    mysql8.0:
        network_mode: "host"
        container_name: "mysql8.0"
        environment:
            TZ: "Asia/Shanghai"
            MYSQL_ROOT_PASSWORD: "root"
        image: mysql:8.0
        restart: always
        privileged: true
        volumes:
            - /etc/localtime:/etc/localtime
            - /data/db/mysql8.0:/data/db/mysql8.0
            - /etc/mysql/mysql8.0.cnf:/etc/mysql/my.cnf
            - /data/logs/mysql8.0:/data/logs/mysql8.0

/etc/mysql/my.cnf

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
[client]
port=3306
socket =/var/run/mysqld/mysql8.0.sock

[mysqld_safe]
pid-file =/var/run/mysqld/mysql8.0.pid
socket =/var/run/mysqld/mysql8.0.sock
nice = 0


# The MySQL server
[mysqld]
default_authentication_plugin=mysql_native_password
default-time_zone = '+8:00'
server-id=60
#read_only = 1
master-info-repository = file
relay-log-info_repository = file
binlog-format = ROW
gtid-mode = on
enforce-gtid-consistency = true
log_slave_updates = 1
#sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES
sql_mode=''
#replicate-wild-ignore-table = mysql.% #主从复制的时候忽略
#replicate-wild-ignore-table = test.% #主从复制的时候忽略
log_bin_trust_function_creators = 1  ### 同步函数
#innodb_force_recovery = 1
replicate-ignore-db = mysql
replicate-ignore-db = test
replicate-ignore-db = information_schema
replicate-ignore-db = performance_schema
user = mysql
pid-file =/var/run/mysqld/mysql8.0.pid
socket =/var/run/mysqld/mysql8.0.sock
port=3306
basedir = /usr
secure_file_priv = /data/db/mysql8.0
datadir = /data/db/mysql8.0
tmpdir = /tmp
explicit_defaults_for_timestamp
log-error =/data/logs/mysql8.0/mysql.error.log
slow_query_log = on
long_query_time = 10
log_queries_not_using_indexes=1
slow_query_log_file =/data/logs/mysql8.0/slowquery.log
log-bin-index = mysql-bin.index
relay_log = relay-log
relay_log_index = relay-log.index
transaction-isolation = REPEATABLE-READ
log-bin=mysql-bin
expire_logs_days = 2
max_binlog_size = 100M
innodb_checksum_algorithm=innodb
#innodb_log_checksum_algorithm=innodb
innodb_data_file_path=ibdata1:200M:autoextend
innodb_log_files_in_group=2
innodb_log_file_size=1572864000
#innodb_fast_checksum=false
#innodb_page_size=16384
#innodb_log_block_size=512
innodb_undo_directory=.
innodb_undo_tablespaces=0
key_buffer_size=32M #myisam索引缓冲,对myisam很重要,缓存myisam表索引数据
max_allowed_packet=8M #限制Server接受的数据包大小
myisam_sort_buffer_size = 256M
myisam_max_sort_file_size = 5G
character-set-server=utf8   #  字符编码
max_connect_errors = 10000000  #  允许最大错误连接数
max_connections = 10000     # 最大连接数
lower_case_table_names = 1    # 大小写敏感设置
innodb_write_io_threads = 2
innodb_read_io_threads = 2
innodb_buffer_pool_size = 512M
innodb_flush_method = O_DIRECT
innodb_flush_log_at_trx_commit = 2
sync_binlog = 1000
binlog_rows_query_log_events =1
##binlog_row_image='minimal'
skip-name-resolve
#skip-grant-tables

nginx

创建目录

1
2
mkdir /data/server/nginx/conf/{sites,ssl} -p
mkdir /data/logs/nginx

创建配置文件

docker-compose.yml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
version: '3.5'
services:
  nginx:
    restart: always
    network_mode: "host"
    image: nginx:1.16
    container_name: nginx
    environment:
      - NAMESPACE=dev
    volumes:
      - /data/server/nginx/conf/nginx.conf:/etc/nginx/nginx.conf 
      - /data/server/nginx/conf/sites:/etc/nginx/sites
      - /data/server/nginx/conf/ssl:/etc/nginx/ssl
      - /data/logs/nginx:/data/logs/nginx
      - /data/server:/data/server

/data/server/nginx/conf/nginx.conf

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
worker_processes     4;
worker_rlimit_nofile 102400;

error_log  /data/logs/nginx/error.log;
events {
    use epoll;
    worker_connections  102400;
}

http {  
    log_format access ' [$time_local] | $host | $remote_addr | $request | $request_time | $body_bytes_sent | $status |' 
  '| $upstream_addr | $upstream_response_time | $upstream_status |' 
  ' "$http_referer" | "$http_user_agent" ';

    log_format post_log escape=json   '$remote_addr - $remote_user [$time_local] "$request" '
                         '$status $body_bytes_sent "$http_referer" '
                         '"$http_user_agent" '
                         '"$http_x_forwarded_for" "$request_body"';

    include       mime.types;  
    default_type  application/octet-stream;  
    sendfile        on;     
    keepalive_timeout  65;
    #access_log /data/logs/nginx/access.log access;
    access_log off;

    underscores_in_headers on;
    tcp_nopush          on;
    tcp_nodelay         on;
    types_hash_max_size 2048;
    client_max_body_size 20M;
    client_body_buffer_size 128k;
    include  sites/*.conf;
}

stream{
   resolver_timeout 5s;
   log_format log_json '{"access_time":"$time_local",'
                   '"remote_addr": "$remote_addr", '
                   '"protocol":"$protocol",'
                   '"status":$status,'
                   '"bytes_sent":"$bytes_sent",'
                   '"bytes_received":"$bytes_received",'
                   '"session_time":"$session_time",'
                   '"upstream_addr":"$upstream_addr",'
                   '"upstream_bytes_sent":"$upstream_bytes_sent",'
                   '"upstream_bytes_received":"$upstream_bytes_received",'
                   '"upstream_connect_time":"$upstream_connect_time"}';
 
   open_log_file_cache off;
   access_log /data/logs/nginx/access.log log_json;
   include /data/server/nginx/conf/stream/*.conf;
}

node_exporter

创建目录

mkdir /data/apps/etc/node_exporter/prom -p

docker-compose.yml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
version: '2'
services:
  cadvisor:
    image: cadvisor:latest
    container_name: cadvisor
    restart: unless-stopped
    ports:
      - "58080:8080"
    volumes:
      - /:/rootfs:ro
      - /var/run:/var/run:rw
      - /sys:/sys:ro
      - /data/docker:/var/lib/docker:ro

  node-exporter:
    image: node-exporter:latest
    container_name: node-exporter
    restart: unless-stopped
    ports:
      - "59100:9100"
    command:
      - '--path.procfs=/host/proc'
      - '--path.sysfs=/host/sys'
      - '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
      - '--collector.textfile.directory=/node_exporter/prom'
    volumes:
      - /proc:/host/proc
      - /sys:/host/sys
      - /:/rootfs
      - ./etc/node_exporter/prom:/node_exporter/prom

elasticsearch

elasticsearch单节点

创建目录

mkdir /data/apps/{config, data, logs, plugins} -p

创建 docker-composer.yml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
name: 'elasticsearch'
services:
  elasticsearch:
    deploy:
      resources:
        limits:
          memory: 6144M
    restart: always
    image: elasticsearch:8.13.4
    network_mode: "host"
    container_name: es-single
    ulimits:
      nproc: 65535
      nofile:
        soft: 65535
        hard: 65535
      memlock:
        soft: -1
        hard: -1
    environment:
      - TZ=Asia/Shanghai
      - ES_JAVA_OPTS=-Xms4096m -Xmx4096m
    volumes:
      - ./data:/usr/share/elasticsearch/data
      - ./plugins:/usr/share/elasticsearch/plugins
      - ./logs:/usr/share/elasticsearch/logs
      - ./config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml

创建配置文件 elasticsearch.yml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
# 基本配置
cluster.name: es-cluster
node.name: skyes01
discovery.type: single-node
network.host: 0.0.0.0
http.port: 9200

# 禁用安全特性
xpack.security.enabled: true
xpack.security.enrollment.enabled: false
xpack.security.http.ssl.enabled: false
xpack.security.transport.ssl.enabled: false
xpack.ml.enabled: false
xpack.monitoring.collection.enabled: false
xpack.watcher.enabled: false

# 禁用 geoip
ingest.geoip.downloader.enabled: false

http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type

启动 docker compose up -d

改密码,进入到容器内执行

1
.bin/elasticsearch-reset-password --auto -u elastic