Docker compose使用
Docker Compose官方文档:https://docs.docker.com/compose/
一、Docker Compose安装
如果安装使用Docker Desktop 默认就安装了Docker Compose,
docker Compose 安装参考:
https://www.cnblogs.com/morang/p/devops-docker24-composev2-install.html
https://blog.csdn.net/pushiqiang/article/details/78682323
二、YAML入门
docker Compose 的配置文件是使用YAML语言格式的
https://www.runoob.com/w3cnote/yaml-intro.html
https://zhuanlan.zhihu.com/p/145173920
三、Docker Compose 配置文件 .yml 参考
https://www.runoob.com/docker/docker-compose.html
https://zhuanlan.zhihu.com/p/387840381
简单示例
docker-compose.yml
version: '3.5'
# version 是必须指定的,而且总是位于文件的第一行。它定义了 Compose 文件格式(主要是 API)的版本。注意,version 并非定义 Docker Compose 或 Docker 引擎的版本号。
networks: #networks 用于指引 Docker 创建新的网络。默认情况下,Docker Compose 会创建bridge网络。 这是一种单主机网络,只能够实现同一主机上容器的连接。当然,也可以使用driver属性来指定不同的网络类型。
ehub_frontend: #自定义的网络
name: ehub_frontend
ehub_storage:
name: ehub_storage
#volumes: #volumes 用于指引 Docker 来创建新的卷。
#data-elasticsearch:
#data-mongo:
#data-gateway-plugins-volume:
#data-gateway-log-volume:
#data-management-api-plugins-volume:
#data-management-api-log-volume:
#data-management-ui-log-volume:
#data-portal-ui-log-volume:
services: #services 用于定义不同的应用服务。工程的服务配置列表。Docker Compose 会将每个服务部署在各自的容器中
#1、构建Gravitee.io 里APIM六个服务
mongodb: #服务名,自定义
#image: mongo:${MONGODB_VERSION:-3.6} 指定启动容器的镜像,可以是镜像仓库/标签或者镜像id(或者id的前一部分)
image: mongo:6 #指定为镜像名称或镜像 ID。如果镜像在本地不存在,Compose 将会尝试拉取这个镜像。
container_name: ehub_gio_apim_mongodb #容器名
restart: always #是否随docker服务启动重启
volumes: #目录挂载
#- data-mongo:/data/db #data-mongo填写宿主机的目录
#- ./logs/apim-mongodb:/var/log/mongodb
#- /f/home/ehub/gravitee/mongodb/data:/data/db
- ./gravitee/mongodb/data:/data/db
networks: #加入指定网络
- ehub_storage #自定义的网络名
elasticsearch:
#image: docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION:-7.7.0}
image: docker.elastic.co/elasticsearch/elasticsearch:8.8.1
container_name: ehub_gio_apim_elasticsearch
restart: always #是否随docker服务启动重启
volumes:
#- data-elasticsearch:/usr/share/elasticsearch/data #data-elasticsearch填写成自己宿主机的目录
#- data-elasticsearch:/var/lib/elasticsearch/data # 命名卷
#- /f/home/ehub/gravitee/elasticsearch/data:/var/lib/elasticsearch/data #指定绝对路径映射
- ./gravitee/elasticsearch/data:/var/lib/elasticsearch/data
environment: #设置容器的环境变量
- http.host=0.0.0.0
- transport.host=0.0.0.0
- xpack.security.enabled=false
#- xpack.monitoring.enabled=false
- cluster.name=elasticsearch
- bootstrap.memory_lock=true
- discovery.type=single-node
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
#ulimits: #ulimits指定容器的 ulimits 限制值。例如,指定最大进程数为 65535 nproc: 65535,指定文件句柄数为 20000(软限制,应用可以随时修改,不能超过硬限制) 和 40000(系统硬限制,只能 root 用户提高)。
# memlock:
# soft: -1
# hard: -1
# nofile: 65536
# #nproc: 65535
# #nofile:
# #soft: 20000
# #hard: 40000
networks:
- ehub_storage
gateway:
#image: graviteeio/apim-gateway:${APIM_VERSION:-3}
image: graviteeio/apim-gateway:4.0
container_name: ehub_gio_apim_gateway
restart: always #restart指定容器退出后的重启策略为始终重启。该命令对保持服务始终运行十分有效,在生产环境中推荐配置为 always 或者 unless-stopped。
ports: #暴露端口信息。使用宿主端口:容器端口 (HOST:CONTAINER) 格式,或者仅仅指定容器的端口(宿主将会随机选择端口)都可以,端口字符串都使用引号包括起来的字符串格式。
- "38082:8082"
depends_on: #解决容器的依赖、启动先后的问题, 本服务启动,依赖于mongodb和elasticsearch,也就是先启动mongodb和elasticsearch再启动gateway
- mongodb
- elasticsearch
volumes: #数据卷所挂载路径设置。可以设置为宿主机路径(HOST:CONTAINER)或者数据卷名称(VOLUME:CONTAINER),并且可以设置访问模式 (HOST:CONTAINER:ro)。
#- ./logs/apim-gateway:/opt/graviteeio-gateway/logs # 相对于当前compose文件的相对路径
#- data-gateway-plugins-volume:/opt/graviteeio-gateway/plugins-ext # 命名卷
#- data-gateway-log-volume:/opt/graviteeio-gateway/logs # 命名卷
#- /f/home/ehub/gravitee/apim-gateway/plugins:/opt/graviteeio-gateway/plugins-ext
#- /f/home/ehub/gravitee/apim-gateway/logs:/opt/graviteeio-gateway/logs
- ./gravitee/apim-gateway/plugins:/opt/graviteeio-gateway/plugins-ext
- ./gravitee/apim-gateway/logs:/opt/graviteeio-gateway/logs
environment: #设置环境变量。你可以使用数组或字典两种格式。
#- gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000
#- gravitee_ratelimit_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000
#- gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200
#- gravitee_management_mongodb_uri="mongodb://ehub_gio_apim_mongodb:27017/gravitee-apim?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000"
#- gravitee_ratelimit_mongodb_uri="mongodb://ehub_gio_apim_mongodb:27017/gravitee-apim?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000"
- gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee-apim?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000
- gravitee_ratelimit_mongodb_uri=mongodb://mongodb:27017/gravitee-apim?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000
- gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200
- gravitee_plugins_path_0=/opt/graviteeio-gateway/plugins
- gravitee_plugins_path_1=/opt/graviteeio-gateway/plugins-ext
networks:
- ehub_storage
- ehub_frontend
management_api:
#image: graviteeio/apim-management-api:${APIM_VERSION:-3}
image: graviteeio/apim-management-api:4.0
container_name: ehub_gio_apim_management_api
restart: always
ports:
- "38083:8083"
links: # 链接到另一个服务中的容器。 请指定服务名称和链接别名(SERVICE:ALIAS),或者仅指定服务名称。 - db/- db:database
- mongodb
- elasticsearch
depends_on:
- mongodb
- elasticsearch
volumes:
#- ./logs/apim-management-api:/opt/graviteeio-management-api/logs
#- data-management-api-plugins-volume:/opt/graviteeio-management-api/plugins-ext # 命名卷
#- data-management-api-log-volume:/opt/graviteeio-management-api/logs # 命名卷
#- /f/home/ehub/gravitee/apim-management-api/plugins:/opt/graviteeio-management-api/plugins-ext
#- /f/home/ehub/gravitee/apim-management-api/logs:/opt/graviteeio-management-api/logs
- ./gravitee/apim-management-api/plugins:/opt/graviteeio-management-api/plugins-ext
- ./gravitee/apim-management-api/logs:/opt/graviteeio-management-api/logs
environment:
#- gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000
#- gravitee_analytics_elasticsearch_endpoints_0=http://elasticsearch:9200
#- gravitee_management_mongodb_uri="mongodb://ehub_gio_apim_mongodb:27017/gravitee-apim?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000"
- gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee-apim?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000
- gravitee_analytics_elasticsearch_endpoints_0=http://elasticsearch:9200
- gravitee_plugins_path_0=/opt/graviteeio-management-api/plugins
- gravitee_plugins_path_1=/opt/graviteeio-management-api/plugins-ext
networks:
- ehub_storage
- ehub_frontend
management_ui:
#image: graviteeio/apim-management-ui:${APIM_VERSION:-3}
image: graviteeio/apim-management-ui:4.0
container_name: ehub_gio_apim_management_ui
restart: always
ports:
- "38084:8080"
depends_on:
- management_api
environment:
#- MGMT_API_URL=http://localhost:8083/management/organizations/DEFAULT/environments/DEFAULT/ #localhost改成自己服务器的IP地址
- MGMT_API_URL=http://192.168.1.102:38083/management/organizations/DEFAULT/environments/DEFAULT
volumes:
#- ./logs/apim-management-ui:/var/log/nginx # 相对于当前compose文件的相对路径
#- data-management-ui-log-volume:/var/log/nginx # 命名卷
#- /f/home/ehub/gravitee/apim-management-ui/logs:/var/log/nginx # 指定绝对路径映射
- ./gravitee/apim-management-ui/logs:/var/log/nginx
networks:
- ehub_frontend
portal_ui:
#image: graviteeio/apim-portal-ui:${APIM_VERSION:-3}
image: graviteeio/apim-portal-ui:4.0
container_name: ehub_gio_apim_portal_ui
restart: always
ports:
- "38085:8080"
depends_on:
- management_api
environment:
#- PORTAL_API_URL=http://localhost:8083/portal/environments/DEFAULT #localhost改成自己服务器的IP地址
- PORTAL_API_URL=http://192.168.1.102:38083/portal/environments/DEFAULT
volumes:
#- ./logs/apim-portal-ui:/var/log/nginx # 相对于当前compose文件的相对路径
#- /gravitee/apim-portal-ui/logs:/var/log/nginx # 指定绝对路径映射
#- data-portal-ui-log-volume:/var/log/nginx # 命名卷
#- /f/home/ehub/gravitee/apim-portal-ui/logs:/var/log/nginx # 指定绝对路径映射
- ./gravitee/apim-portal-ui/logs:/var/log/nginx
networks:
- ehub_frontend
#2、构建Hop两个服务
hop_web:
image: apache/hop-web:latest
container_name: ehub_hop_web
restart: always
ports:
- "38086:8080"
volumes:
#- /f/home/ehub/hop-web/config:/config/
#- /f/home/ehub/hop-web/logs:/usr/local/tomcat/logs/
#- /f/home/ehub/hop-web/plugins/databases/mysql/lib:/usr/local/tomcat/plugins/databases/mysql/lib/
#- /f/home/ehub/hop-web/plugins/databases/mssql/lib:/usr/local/tomcat/plugins/databases/mssql/lib/
#- /f/home/ehub/hop-web/plugins/databases/oracle/lib:/usr/local/tomcat/plugins/databases/oracle/lib/
- ./hop-web/config:/config/
- ./hop-web/logs:/usr/local/tomcat/logs/
- ./hop-web/plugins/databases/mysql/lib:/usr/local/tomcat/plugins/databases/mysql/lib/
- ./hop-web/plugins/databases/mssql/lib:/usr/local/tomcat/plugins/databases/mssql/lib/
- ./hop-web/plugins/databases/oracle/lib:/usr/local/tomcat/plugins/databases/oracle/lib/
hop_server:
image: apache/hop:latest # apache/hop:2.5.0 apache/hop:latest
container_name: ehub_hop_server
restart: always
ports:
- "38087:8080"
volumes:
#- /f/home/ehub/hop/hop.err.log:/opt/hop/hop.err.log/
#- /f/home/ehub/hop/files:/files/
- ./hop/hop.err.log:/opt/hop/hop.err.log/
- ./hop/files:/files/
environment:
- HOP_SERVER_PORT=8080
- HOP_SERVER_USER=admin
- HOP_SERVER_PASS=admin
- HOP_PROJECT_FOLDER=/files
- HOP_SERVER_METADATA_FOLDER=/files/metadata
- HOP_SHARED_JDBC_FOLDER=/files/jdbc
- HOP_PROJECT_NAME=demoProject
- HOP_ENVIRONMENT_NAME=Local
- HOP_SERVER_MAX_OBJECT_TIMEOUT=14400000
#3、构建Tresin系统管理服务
tresin_system:
#image: 192.168.1.153:5000/tresinbackendbase:ndri.lic.off
image: 192.168.1.153:5000/tresinbackendbase:2.15beta
container_name: ehub_tresin_system
restart: always
ports:
- "38088:80"
volumes:
#- /f/home/ehub/tresin-system/appsettings.json:/app/appsettings.json
#- /f/home/ehub/tresin-system/Cert/id4svr.pfx:/app/Cert/id4svr.pfx
#- /f/home/ehub/tresin-system/vault/:/home/vault
#- /f/home/ehub/tresin-system/logs/:/app/logs
- ./tresin-system/appsettings.json:/app/appsettings.json
- ./tresin-system/Cert/id4svr.pfx:/app/Cert/id4svr.pfx
- ./tresin-system/vault/:/home/vault
- ./tresin-system/logs/:/app/logs
#4、构建Camunda 7服务
camunda:
image: camunda-plugin-history:7.18
container_name: ehub_camunda_plugin_history
restart: always
ports:
- "38089:8080"
#links:
#mysql:db
volumes:
#- /f/home/ehub/camunda-plugin/default.yml:/camunda/configuration/default.yml
- ./camunda-plugin/default.yml:/camunda/configuration/default.yml
environment:
#- file=/f/home/ehub/camunda-plugin/env.txt
- file=./camunda-plugin/env.txt
#- /f/home/ehub/camunda-plugin/env.txt
#env_file: #相对路径
#- ./camunda-plugin/env.txt
#5、构建LogerView服务
logerview:
image: 192.168.1.153:5000/tsdhlogviewer:0.4
container_name: ehub_logerview
restart: always
depends_on:
- camunda
ports:
- "38090:80"
volumes:
#- /f/home/ehub/logviewer/appsettings.json:/app/appsettings.json
#- /f/home/ehub/logviewer/logs/:/app/logs
- ./logviewer/appsettings.json:/app/appsettings.json
- ./logviewer/logs/:/app/logs
#6、构建MQ服务
rmqnamesrv:
image: apache/rocketmq:5.1.3
container_name: ehub_rmqnamesrv
restart: always
ports:
- 9876:9876
volumes:
#- /f/home/ehub/rocketmq/rmqnamesrv/logs:/home/rocketmq/logs
- ./rocketmq/rmqnamesrv/logs:/home/rocketmq/logs
environment:
#- "MAX_POSSIBLE_HEAP=100000000"
#- "JAVA_OPT_EXT=-Xms512M -Xmx512M -Xmn128m"
JAVA_OPT_EXT: "-Duser.home=/home/rocketmq -Xms512M -Xmx512M -Xmn128m"
command: ["sh","mqnamesrv"]
rocketmqbroker:
image: apache/rocketmq:5.1.3
container_name: ehub_rocketmq_broker
restart: always
links:
- rmqnamesrv:namesrv
ports:
#- "10911:10911"
#- "10912:10912"
#- "10909:10909"
- 10909:10909
- 10911:10911
- 10912:10912
volumes:
#- /f/home/ehub/rocketmq/broker/logs:/home/rocketmq/logs
#- /f/home/ehub/rocketmq/broker/store:/home/rocketmq/store
#- /f/home/ehub/rocketmq/broker/broker.conf:/home/rocketmq/rocketmq-5.1.3/conf/broker.conf
- ./rocketmq/broker/logs:/home/rocketmq/logs
- ./rocketmq/broker/store:/home/rocketmq/store
- ./rocketmq/broker/broker.conf:/home/rocketmq/rocketmq-5.1.3/conf/broker.conf
environment:
#- "NAMESRV_ADDR=namesrv:9876"
#- "JAVA_OPT_EXT=-Xms512M -Xmx512M -Xmn128m"
#- "MAX_POSSIBLE_HEAP=200000000"
NAMESRV_ADDR: namesrv:9876
JAVA_OPT_EXT: "-Duser.home=/home/rocketmq -Xms512M -Xmx512M -Xmn128m"
#command: ["sh","mqbroker","-c","/home/rocketmq/rocketmq-4.9.4/conf/broker.conf"]
command: ["sh","mqbroker","-c","../conf/broker.conf","autoCreateTopicEnable=true"]
rocketmq-dashboard:
#image: styletang/rocketmq-console-ng:latest
image: apacherocketmq/rocketmq-dashboard:latest
container_name: ehub_rocketmq_dashboard # ehub_rocketmq_console_ng
restart: always
links:
- rmqnamesrv:namesrv
depends_on:
- rmqnamesrv
ports:
- "38091:8080"
environment:
#- "JAVA_OPTS=-Drocketmq.namesrv.addr=192.168.27.129:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false"
JAVA_OPTS: "-Drocketmq.namesrv.addr=namesrv:9876"
#7、构建Nginx服务
nginx:
image: nginx:latest
container_name: ehub_nginx
restart: always
ports:
- "9001:80"
volumes:
#- /f/home/ehub/nginx/conf/nginx.conf:/etc/nginx/nginx.conf
#- /f/home/ehub/nginx/conf/conf.d:/etc/nginx/conf.d
#- /f/home/ehub/nginx/log:/var/log/nginx
#- /f/home/ehub/nginx/html:/usr/share/nginx/html
- ./nginx/conf/nginx.conf:/etc/nginx/nginx.conf
- ./nginx/conf/conf.d:/etc/nginx/conf.d
- ./nginx/log:/var/log/nginx
- ./nginx/html:/usr/share/nginx/html
docker-compose.yml
version: '1'
services:
mysql:
container_name: mysql-crane
restart: always
image: mysql
ports:
- "13306:3306"
volumes:
- ./mysql/mydir:/mydir
- ./mysql/datadir:/var/lib/mysql
environment:
- "MYSQL_ROOT_PASSWORD=root"
- "MYSQL_DATABASE=root"
- "TZ=Asia/Shanghai"
gateway:
image: 192.168.1.153:5000/tresinbackendgateway
ports:
- "15000:80"
container_name: crane-gateway
volumes:
- "./gateway/configuration.json:/app/configuration.json"
system:
image: 192.168.1.153:5000/tresinbackendbase:crane
container_name: crane-system
ports:
- "15001:80"
depends_on:
- gateway
- mysql
volumes:
- "./system/appsettings.json:/app/appsettings.json"
- "./system/Cert/id4svr.pfx:/app/Cert/id4svr.pfx"
- "./system/vault/:/home/vault"
- "./system/logs/:/app/logs"
nginx:
container_name: crane-nginx
image: nginx
ports:
- "16001:80"
- "16020:8080"
volumes:
- "./nginx/html:/usr/share/nginx/html"
- "./nginx/conf.d/:/etc/nginx/conf.d/"
crane:
container_name: crane-back
ports:
- "15020:80"
depends_on:
- mysql
image: 192.168.1.153:5000/ndricrane
volumes:
- "./crane/appsettings.json:/app/appsettings.json"
# tdengine:
# container_name: tdengine
# image: tdengine/tdengine:2.4.0.26
# ports:
# - "6030-6049:6030-6049"
# - "6030-6049:6030-6049/udp"
# volumes:
# - ./taos:/etc/taos
四、Docker Compose 常用命令
注意
#旧版使用 docker-compose 前缀
#新版使用 docker compose 前缀
# 运行容器
docker compose up
# 在后台运行容器docker compose文件名称是docker-compose.yml
docker compose up -d
# 指定yaml在后台运行容器
docker compose -f docker-compose.prod.yaml up -d
# 查看日志
docker compose logs -f <service-name>
# 也可以直接使用容器ID查看容器日志
docker logs -f <container-id>
# 进入容器
docker attach <container-id> 不推荐,退出容器时,容器会停止
docker exec -it <container-id> /bin/bash 推荐使用
# 停止容器,并删除相应的配置,推荐使用,如果使用docker stop并不会删除容器的network和volume,使用下面的命令可以停止并删除由docker compose文件定义的容器、网络和卷
docker compose down
帮助文档
docker compose --help
docker@default:~$ docker-compose --help
Define and run multi-container applications with Docker.
Usage:
docker-compose [-f <arg>...] [options] [COMMAND] [ARGS...]
docker-compose -h|--help
Options:
-f, --file FILE Specify an alternate compose file
(default: docker-compose.yml)
-p, --project-name NAME Specify an alternate project name
(default: directory name)
--verbose Show more output
--log-level LEVEL Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
--no-ansi Do not print ANSI control characters
-v, --version Print version and exit
-H, --host HOST Daemon socket to connect to
--tls Use TLS; implied by --tlsverify
--tlscacert CA_PATH Trust certs signed only by this CA
--tlscert CLIENT_CERT_PATH Path to TLS certificate file
--tlskey TLS_KEY_PATH Path to TLS key file
--tlsverify Use TLS and verify the remote
--skip-hostname-check Don't check the daemon's hostname against the
name specified in the client certificate
--project-directory PATH Specify an alternate working directory
(default: the path of the Compose file)
--compatibility If set, Compose will attempt to convert keys
in v3 files to their non-Swarm equivalent
Commands:
build 构建或重建服务
bundle 从compose配置文件中产生一个docker绑定
config 验证并查看compose配置文件
create 创建服务
down 停止并移除容器、网络、镜像和数据卷
events 从容器中接收实时的事件
exec 在一个运行中的容器上执行一个命令
help 获取命令的帮助信息
images 列出所有镜像
kill 通过发送SIGKILL信号来停止指定服务的容器
logs 从容器中查看服务日志输出
pause 暂停服务
port 打印绑定的公共端口
ps 列出所有运行中的容器
pull 拉取并下载指定服务镜像
push Push service images
restart 重启YAML文件中定义的服务
rm 删除指定已经停止服务的容器
run 在一个服务上执行一条命令
scale 设置指定服务运行容器的个数
start 在容器中启动指定服务
stop 停止已运行的服务
top 显示各个服务容器内运行的进程
unpause 恢复容器服务
up 创建并启动容器
version 显示Docker-Compose版本信息
命令解析
docker-compose
docker-compose [-f <args>...] [options] [COMMAND] [ARGS...]
命令选项[-f <args>...]
-f, --file FILE
指定Compose模板文件,默认为docker-compose.yml
可多次指定。
$ docker-compose -f docker-compose.yml up -d
-p, --project-name NAME
指定项目名称,默认使用当前所在目录名称作为项目名称。-x-network-driver
使用Docker可拔插网络后端特性,需Docker1.9+版本支持。-x-network-driver DRIVER
指定网络后端驱动,默认为bridge
,需Docker1.9+版本支持。-verbose
输出更多调试信息-v, --version
打印版本并退出
docker-compose up
启动所有服务
docker-compose up [options] [--scale SERVICE=NUM...] [SERVICE...]
命令选项[options]
-d
指定在后台以守护进程方式运行服务容器-no-color
设置不使用颜色来区分不同的服务器的控制输出-no-deps
设置不启动服务所链接的容器-force-recreate
设置强制重新创建容器,不能与--no-recreate
选项同时使用。--no-create
若容器已经存在则不再重新创建,不能与--force-recreate
选项同时使用。--no-build
设置不自动构建缺失的服务镜像--build
设置在启动容器前构建服务镜像--abort-on-container-exit
若任何一个容器被停止则停止所有容器,不能与选项-d
同时使用。-t, --timeout TIMEOUT
设置停止容器时的超时秒数,默认为10秒。--remove-orphans
设置删除服务中没有在compose
文件中定义的容器--scale SERVICE=NUM
设置服务运行容器的个数,此选项将会负载在compose
中通过scale
指定的参数。
docker-compose ps
列出项目中当前的所有容器
docker-compose ps [options] [SERVICE...]
docker@default:~$ docker-compose ps
ERROR:
Can't find a suitable configuration file in this directory or any
parent. Are you in the right directory?
Supported filenames: docker-compose.yml, docker-compose.yaml
docker@default:~$ docker-compose ps
Name Command State Ports
------------------------------------------
swoft_swoft_1 /bin/bash Exit 0
docker-compose -h
查看docker-compose帮助
docker-compose down
停止和删除容器、网络、卷、镜像
docker-compose down [options]
命令选项 [options]
-
--rmi type
删除镜像类型,类型可选:
--rmi all
删除compose文件中定义的所有镜像--rmi local
删除镜像名为空的镜像
-
-v, --volumes
删除已经在compose文件中定义的和匿名的附在容器上的数据卷 -
--remove-orphans
删除服务中没有在compose中定义的容器
docker-compose logs
查看服务容器的输出,默认情况下docker-compose
将对不同的服务输出使用不同的颜色来区分。可以通过--no-color
来关闭颜色。
docker-compose logs [options] [SERVICE...]
例如:
root@default:/var/www/swoft# docker-compose logs
Attaching to swoft_swoft_1
swoft_1 | root@cd054651dfcb:/var/www/swoft# exit
docker-compose build
构建或重构项目中的服务容器,服务容器一旦构建后将会带上一个标记名称,可以随时在项目目录下运行docker-compose build
来重新构建服务。
docker-compose build [options] [--build-arg key=val...] [SERVICE...]
命令选项[options]
--compress
通过gzip压缩构建上下文环境--force-rm
删除构建过程中的临时容器--no-cache
构建镜像过程中不使用缓存--pull
始终尝试通过拉取操作来获取更新版本的镜像-m, --memory MEM
为构建的容器设置内存大小--build-arg key=val
为服务设置build-time
变量
docker-compose pull
拉取服务依赖的镜像
docker-compose pull [options] [SERVICE...]
命令选项[options]
--ignore-pull-failures
忽略拉取镜像过程中的错误--parallel
同时拉取多个镜像--quiet
拉取镜像过程中不打印进度信息
docker-compose restart
重启项目中的服务
docker-compose restart [options] [SERVICE...]
命令选项[options]
-t, --timeout TIMEOUT
指定重启前停止容器的超时时长,默认为10秒。
docker-compose rm
删除所有停止状态的服务容器,推荐先执行docker-compose stop
命令来停止容器。
docker-compose rm [options] [SERVICE...]
命令选项[options]
-f, --force
强制直接删除包含非停止状态的容器-v
删除容器所挂载的数据卷
docker-compose start
启动已经存在的服务容器
docker-compose start [SERVICE...]
docker-compose run
在指定服务上执行一条命令
docker-compose run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
例如:在ubuntu容器上运行ping命令10次
$ docker-compose run ubuntu ping www.baidu.com -c 10
docker-compose scale
设置指定服务运行的容器个数,通过service=num
参数设置服务数量。
docker-compose scale
例如:
$ docker-compoose scale web=3 db=2
docker-compose pause
暂停一个服务容器
docker-compose pause [SERVICE...]
docker-compose kill
发送SIGKILL信号来强制停止服务容器,支持通过-s
参数来指定发送的信号。
docker-compose kill [options] [SERVICE...]
例如:发送SIGINT信号强制停止服务容器
$ docker-compose kill -s SIGINT
docker-compose config
验证并查看compose文件配置
docker-compose config [options]
选项参数[options]
--resolve-image-digests
将镜像标签标记为摘要-q, --quiet
只验证配置不输出,当配置正确时不输出任何容器,当配置错误时输出错误信息。--services
打印服务名称,一行显示一个。--volumes
打印数据卷名称,一行显示一个。
例如:
root@default:/var/www/swoft# docker-compose config
services:
swoft:
entrypoint:
- /bin/bash
image: swoft/swoft:latest
ports:
- 83:83/tcp
privileged: true
stdin_open: true
tty: true
volumes:
- /share/swoft:/var/www/swoft:rw
version: '3.0'
docker-compose create
为服务创建容器
docker-compose create [options] [SERVICE...]
选项说明[options]
--force-recreate
重新创建容器,即使配置和镜像没有改变,不兼容--no-recreate
参数。--no-recreate
如果容器已经存在则无需重新创建,不兼容--force-recreate
参数。--no-build
不创建镜像即使缺失--build
创建容器前生成镜像
docker-compose exec
docker-compose exec [options] SERVICE COMMAND [ARGS...]
选项参数[options]
-d
分离模式,以后台守护进程运行命令。--privileged
获取特权-T
禁用分配TTY,默认docker-compose exec
分配TTY。--index=index
当一个服务拥有多个容器时可通过该参数登录到该服务下的任何服务
例如:
$ docker-compose exec --index=1 web /bin/bash
docker-compose port
显示某个容器端口所映射的公共端口
docker-compose port [options] SERVICE PRIVATE_PORT
选项参数[options]
--protocol=proto
指定端口协议,默认为TCP,可选UDP。--index=index
若同意服务存在多个容器,指定命令对象容器的索引序号,默认为1。
docker-compose push
推送服务依赖的镜像
docker-compose push [options] [SERVICE...]
选项参数[options]
--ignore-push-failure
忽略推送镜像过程中的错误
docker-compose stop
显示各个容器运行的进程情况
docker-compose stop [options] [SERVICE...]
docker-compose unpause
恢复处于暂停状态中的服务
docker-compose unpause [SERVICE...]
docker-compose version
打印Docker Compose版本信息
root@default:/var/www/swoft# docker-compose version
docker-compose version 1.24.0, build 0aa59064
docker-py version: 3.7.2
CPython version: 3.6.8
OpenSSL version: OpenSSL 1.1.0j 20 Nov 2018
文档参考:
https://juejin.cn/post/7051057669896929288
https://www.jianshu.com/p/c51d92a9f91d
https://www.cnblogs.com/morang/p/17976704/devops-docker-elk-install-nginx-config
标签:容器,ehub,Compose,入门,#-,--,compose,docker,Docker From: https://www.cnblogs.com/hudean/p/18022630