Docker
安裝
# 安裝gcc相關環境
yum -y install gcc
yum -y install gcc-c++
## 解除安裝舊版本
yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine
# 安裝
yum install -y yum-utils
yum-config-manager \
--add-repo \
https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast
ce 社群版 ee 企業版
yum install docker-ce[docker-ee] docker-ce-cli[docker-ee-cli] containerd.io docker-compose-plugin
安指定版本
yum install docker-ce-<VERSION_STRING> docker-ce-cli-<VERSION_STRING> containerd.io docker-compose-plugin
解除安裝
sudo yum remove docker-ce docker-ce-cli containerd.io docker-compose-plugin
sudo rm -rf /var/lib/docker
sudo rm -rf /var/lib/containerd
啟動
systemctl start/stop/status/enable/disable docker
命令
查版本/查資訊
docker version/info
設定阿里雲加速
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://todnba9t.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
映象相關命令
檢視全部映象 [-a 全部; -q 只ID]
docker images [-aq]
hub上搜尋映象 [過濾*大於等於1000的]
docker search mysql [-f stars=1000]
hub上拉取映象 [指定版本 預設last]
docker pull mysql[:5.7]
刪除指定映象 [刪除多個]
docker rmi -f 映象ID [映象ID 映象ID]
刪除所有映象
docker rmi -f $(docker images -aq)
容器相關命令
互動方式執行映象
docker run [引數] image
# --name="名字" # 指定容器名字
# -d # 後臺執行
# -it # 互動方式執行並進入容器
# -p # 指定埠
# -p # ip:主機埠:容器埠 配置主機埠對映到容器埠
# -p # 主機埠:容器埠
# -p # 容器埠
# -P # 隨機指定埠
# eg: docker run -it centos /bin/bash
退出容器並停止 Ctrl+P+Q 不停止退出
exit
列出執行中容器 [全部帶歷史 ][只ID][最近建立的兩個]
docker ps [-a][-q][-n=2]
刪除指定的容器,不能刪除正在執行的容器,強制刪除使用 rm -f
docker rm 容器ID
刪除全部
docker rm -f $(docker ps -aq)
啟動停止重啟殺死
docker start/stop/restart/kill 容器ID
其他
檢視日誌
docker logs -tf 容器ID
#num為要顯示的日誌條數
docker logs --tail num 容器ID
看容器程序資訊
docker top 容器ID
看容器後設資料資訊
docker inspect 容器id
進入容器
docker exec 容器ID # 進入容器後開啟一個新的終端,可以在裡面操作 docker exec -it bd2a1db199b7 /bin/bash
docker attach 容器ID # 進入容器正在執行的終端,不會啟動新的程序 docker attach bd2a1db199b7
複製容器檔案到主機
docker cp 容器id:/容器內路徑 目的主機路徑 # docker cp bd2a1db199b7:/home/test.java /home
檢視容器cpu資訊
docker stats
docker 安裝 nginx
docker search nginx
docker pull nginx
docker run --name nginx01 -d -p 3344:80 nginx
docker 安裝 tomcat
docker pull tomcat:9.0
docker run -it --rm tomcat:9.0 ##直接啟動關閉就刪除 測試用
docker run --name tomcat01 -d -p 3344:8080 tomcat:9.0
docker 安裝 es+kibana
docker run -d --name es01 -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms64m -Xmx512m" elasticsearch:7.3.0
docker 安裝 mysql
docker pull mysql:5.7
docker run -d -p 3310:3306 -e MYSQL_ROOT_PASSWORD=123456 --name mysql01 -v /home/mysql/conf:/etc/mysql/conf.d -v /home/mysql/data:/var/lib/mysql mysql:5.7
打包自己的映象
docker commit -a="wendy" -m="tomcat with root page" 027db740109a tomcat8-with-root-page:1.0
容器卷
容器卷掛載
##本機路徑:容器路徑 docker inspect 容器id 中Mounts塊檢視掛載是否成功
docker run -it -v /home/share:/home centos
# -v 容器內路徑 # 匿名掛載 -v /etc/nginx
# -v 卷名:容器內路徑 # 具名掛載 -v juming:/etc/nginx
# -v /宿主機路徑:容器內路徑 # 指定路徑掛載 -v /home/nginx:/etc/nginx
# -v 路徑:路徑:ro/rw # 指定許可權(ro->readonly rw->readwrite) -v juming:/etc/nginx:ro
檢視掛載資訊
# 檢視匿名/具名掛載卷 [詳情,具體哪個目錄][刪全部][刪一個][新建]
docker volume ls [inspect 名字][prune][rm][create]
[root@localhost /]# docker volume ls
DRIVER VOLUME NAME
local 9c234d2dc4b5124fe85a11cc38fe066fd6e6b2d5be90587b0e48c31af91369d4
local juming
[root@localhost /]# docker volume inspect 9c234d2dc4b5124fe85a11cc38fe066fd6e6b2d5be90587b0e48c31af91369d4
容器間共享卷
# 先啟動一個有掛載的容器 ["volume1","volume2"]
docker run -it --name docker01 wendy-centos:1.0 /bin/bash
## --volume-from一個有掛載的容器 就可以實現資料共享 (複製模式共享,刪除docker01也不影響docker02)
docker run -it --name docker02 --volume-from docker01 wendy-centos:1.0 /bin/bash
DockerFile
dockerfile 掛載
dockerfile1 檔案內容
FROM centos
VOLUME ["volume1","volume2"] // 匿名掛載
CMD echo "-----end-----"
CMD /bin/bash
構建
docker build -f dockerfile1 -t wendy-centos:1.0 .
構建dockerfile命令
docker build -f dockerfile1 -t wendy-centos:1.0 .
dockerfile 常用指令
FROM # 基礎映象,一切從這裡構建
MAINTAINER # 映象是誰寫的 名字<郵箱>
RUN # 映象構建的時候需要執行的命令
ADD # 為映象新增內容(壓縮包)
WORKDIR # 映象的工作目錄
VOLUME # 掛載目錄
EXPOSE # 暴露埠
CMD # 指定這個容器啟動的時候要執行的命令,只有最後一個會生效,相當替換
ENTRYPOINT # 指定這個容器啟動的時候要執行的命令,追加
ONBUILD # 當構建一個被整合dockerfile這個時候會執行ONBUILD 觸發指令
COPY # 類似ADD 將我們檔案複製到映象中
ENV # 構建時設定環境變數
初步構建
構建一個含有vim和ifconfig命令的centos
[root@localhost docker-test-v]# cat mydockerfile-centos
FROM centos:7
MAINTAINER wendy<zhiwen.ji@qq.com>
ENV MYPATH /usr/local
WORKDIR $MYPATH
RUN yum -y install vim
RUN yum -y install net-tools
EXPOSE 8888
CMD echo $MYPATH
CMD echo "-----end-----"
CMD /bin/bash
構建命令
docker build -f mydockerfile-centos -t mydfcentos:0.1 .
正常centos和構建centos對比
正常:
[root@localhost ~]# docker run -it centos:7 /bin/bash
[root@8d051480d5af /]# ls
anaconda-post.log bin dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var
[root@8d051480d5af /]# vim a
bash: vim: command not found
[root@8d051480d5af /]# ifconfig
bash: ifconfig: command not found
構建:
[root@localhost docker-test-v]# docker run -it mydfcentos:0.1 /bin/bash
[root@c26533c126c0 local]# pwd
/usr/local
[root@c26533c126c0 local]# ls
bin etc games include lib lib64 libexec sbin share src
[root@c26533c126c0 local]# vim a
[root@c26533c126c0 local]# ls
a bin etc games include lib lib64 libexec sbin share src
[root@c26533c126c0 local]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.17.0.3 netmask 255.255.0.0 broadcast 172.17.255.255
ether 02:42:ac:11:00:03 txqueuelen 0 (Ethernet)
RX packets 8 bytes 648 (648.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
loop txqueuelen 1 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
檢視構建步驟歷史
# docker history 映象ID
[root@localhost docker-test-v]# docker history 35c22c10d0f2
IMAGE CREATED CREATED BY SIZE COMMENT
35c22c10d0f2 13 minutes ago /bin/sh -c #(nop) CMD ["/bin/sh" "-c" "/bin… 0B
34421c0487cf 13 minutes ago /bin/sh -c #(nop) CMD ["/bin/sh" "-c" "echo… 0B
684cecdf472a 13 minutes ago /bin/sh -c #(nop) CMD ["/bin/sh" "-c" "echo… 0B
1901c612ec6a 13 minutes ago /bin/sh -c #(nop) EXPOSE 8888 0B
bece07184685 13 minutes ago /bin/sh -c yum -y install net-tools 177MB
f4f90fc61e6d 13 minutes ago /bin/sh -c yum -y install vim 232MB
8f0931e4ba0a 13 minutes ago /bin/sh -c #(nop) WORKDIR /usr/local 0B
113d7c9b5a7b 13 minutes ago /bin/sh -c #(nop) ENV MYPATH=/usr/local 0B
da8cae163faf 13 minutes ago /bin/sh -c #(nop) MAINTAINER wendy<zhiwen.j… 0B
eeb6ee3f44bd 10 months ago /bin/sh -c #(nop) CMD ["/bin/bash"] 0B
<missing> 10 months ago /bin/sh -c #(nop) LABEL org.label-schema.sc… 0B
<missing> 10 months ago /bin/sh -c #(nop) ADD file:b3ebbe8bd304723d4… 204MB
CMD和ENTRYPOINT區別展示
CMD
[root@localhost docker-test-v]# vim mydf-cmd-test
FROM centos
CMD ["ls","-a"]
[root@localhost docker-test-v]# docker build -f mydf-cmd-test -t centos-cmd-test .
Sending build context to Docker daemon 4.096kB
Step 1/2 : FROM centos
---> 5d0da3dc9764
Step 2/2 : CMD ["ls","-a"]
---> Running in 2b875583c5b5
Removing intermediate container 2b875583c5b5
---> fa2601e5666e
Successfully built fa2601e5666e
Successfully tagged centos-cmd-test:latest
[root@localhost docker-test-v]# docker run fa2601e5666e
.
..
.dockerenv
bin
dev
etc
home
lib
lib64
lost+found
media
mnt
opt
proc
root
run
sbin
srv
sys
tmp
usr
var
[root@localhost docker-test-v]# docker run fa2601e5666e -l
docker: Error response from daemon: OCI runtime create failed: runc create failed: unable to start container process: exec: "-l": executable file not found in $PATH: unknown.
ERRO[0000] error waiting for container: context canceled
ENTRYPOINT
[root@localhost docker-test-v]# vim mydf-entry-test
FROM centos
ENTRYPOINT ["ls","-a"]
[root@localhost docker-test-v]# docker build -f mydf-entry-test -t centos-entry-test .
Sending build context to Docker daemon 5.12kB
Step 1/2 : FROM centos
---> 5d0da3dc9764
Step 2/2 : ENTRYPOINT ["ls","-a"]
---> Running in 1adc0700047e
Removing intermediate container 1adc0700047e
---> 784d65e0819a
Successfully built 784d65e0819a
Successfully tagged centos-entry-test:latest
[root@localhost docker-test-v]# docker run 784d65e0819a
.
..
.dockerenv
bin
dev
etc
home
lib
lib64
lost+found
media
mnt
opt
proc
root
run
sbin
srv
sys
tmp
usr
var
[root@localhost docker-test-v]# docker run 784d65e0819a -l
total 0
drwxr-xr-x. 1 root root 6 Aug 11 06:27 .
drwxr-xr-x. 1 root root 6 Aug 11 06:27 ..
-rwxr-xr-x. 1 root root 0 Aug 11 06:27 .dockerenv
lrwxrwxrwx. 1 root root 7 Nov 3 2020 bin -> usr/bin
drwxr-xr-x. 5 root root 340 Aug 11 06:27 dev
drwxr-xr-x. 1 root root 66 Aug 11 06:27 etc
drwxr-xr-x. 2 root root 6 Nov 3 2020 home
lrwxrwxrwx. 1 root root 7 Nov 3 2020 lib -> usr/lib
lrwxrwxrwx. 1 root root 9 Nov 3 2020 lib64 -> usr/lib64
drwx------. 2 root root 6 Sep 15 2021 lost+found
drwxr-xr-x. 2 root root 6 Nov 3 2020 media
drwxr-xr-x. 2 root root 6 Nov 3 2020 mnt
drwxr-xr-x. 2 root root 6 Nov 3 2020 opt
dr-xr-xr-x. 115 root root 0 Aug 11 06:27 proc
dr-xr-x---. 2 root root 162 Sep 15 2021 root
drwxr-xr-x. 11 root root 163 Sep 15 2021 run
lrwxrwxrwx. 1 root root 8 Nov 3 2020 sbin -> usr/sbin
drwxr-xr-x. 2 root root 6 Nov 3 2020 srv
dr-xr-xr-x. 13 root root 0 Aug 11 02:52 sys
drwxrwxrwt. 7 root root 171 Sep 15 2021 tmp
drwxr-xr-x. 12 root root 144 Sep 15 2021 usr
drwxr-xr-x. 20 root root 262 Sep 15 2021 var
製作Tomcat映象
-
準備壓縮包
apache-tomcat-9.0.58.tar.gz jdk-8u211-linux-x64.tar.gz
-
準備dockerfile
readme.txt
構建說明Dockerfile
官方命名 在build時就會自動去找這個命名 就不用-f 指定檔名了ADD 會自動解壓縮
FROM centos MAINTAINER wendy<zhiwen.ji@qq.com> COPY readme.txt /usr/local/readme.txt ADD apache-tomcat-9.0.58.tar.gz /usr/local/ ADD jdk-8u211-linux-x64.tar.gz /usr/local/ RUN yum -y install vim ENV MYPATH /usr/local/ WORKDIR $MYPATH ENV JAVA_HOME /usr/local/jdk1.8.0_211/ ENV CLASSPATH $JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar ENV CATALINA_HOME /usr/local/apache-tomcat-9.0.58/ ENV CATALINA_BASE /usr/local/apache-tomcat-9.0.58/ ENV PATH $PATH:$JAVA_HOME/bin:$CATALINA_HOME/bin:$CATALINA_HOME/lib EXPOSE 8080 CMD /usr/local/apache-tomcat-9.0.58/bin/startup.sh && tail -f /usr/local/apache-tomcat-9.0.58/logs/catalina.out
-
構建
docker build -t diy-tomcat .
-
執行
docker run --name diytomcat01 -d -p 9090:8080 -v /usr/local/docker-tomcat/test/:/usr/local/apache-tomcat-9.0.58/webapps/test/ -v /usr/local/docker-tomcat/logs/:/usr/local/apache-tomcat-9.0.58/logs/ diy-tomcat
-
測試掛載
本機可以看到logs目錄下日誌,並在test建立一個應用(只有html測試),訪問可以看到就是成功了
[root@localhost docker-tomcat]# ls apache-tomcat-9.0.58.tar.gz Dockerfile jdk-8u211-linux-x64.tar.gz logs readme.txt test [root@localhost docker-tomcat]# ls logs/ catalina.2022-08-11.log host-manager.2022-08-11.log localhost_access_log.2022-08-11.txt catalina.out localhost.2022-08-11.log manager.2022-08-11.log [root@localhost docker-tomcat]# ls test/ index.html WEB-INF
釋出映象
釋出到DockerHub
先註冊dockerhub賬號,再登陸
dokcer login -u zhiwenj
password: ****
釋出,作者/名稱:版本號
docker push zhiwenj/diy-tomcat:1.0
報錯:An image does not exist locally with the tag: wendy/diy-tomcat
因為本地是latest,沒帶版本號,所以需要重寫定一個版本號
docker tag 330f41ec0790 zhiwenj/diy-tomcat:1.0
然後重新發布
釋出到阿里雲
註冊 - 登陸 - 設定registry密碼 - 建立名稱空間 - 建立倉庫 - 檢視說明 - 推送
docker login --username=wenenenenen registry.cn-hangzhou.aliyuncs.com
docker tag [ImageId] registry.cn-hangzhou.aliyuncs.com/wendy-docker-test/test-01:[映象版本號]
docker push registry.cn-hangzhou.aliyuncs.com/wendy-docker-test/test-01:[映象版本號]
命令小結
Docker網路
理解docker0
- 本機迴環地址 2. 虛擬機器地址 3. docker0 這是Docker服務啟動後自動生成的
[root@localhost ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:ed:0c:8f brd ff:ff:ff:ff:ff:ff
inet 192.168.137.3/24 brd 192.168.137.255 scope global dynamic ens33
valid_lft 1199sec preferred_lft 1199sec
inet6 fe80::e24c:801:d03e:bf59/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 02:42:b1:96:f1:91 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
啟動一個Tomcat容器,執行ip addr
報錯,因為容器切割了ip
命令,需要手動安裝一下,容器內執行 apt update && apt install -y iproute2
, 再次執行 docker exec -it tomcat01 ip addr
發現容器得到了一個新的網路:12: eth0@if13,ip地址:172.17.0.3。這是Docker在容器啟動時為其分配的
[root@localhost ~]# docker run -d --name tomcat01 tomcat
[root@localhost ~]# docker exec -it tomcat01 ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
12: eth0@if13: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:11:00:03 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.17.0.3/16 brd 172.17.255.255 scope global eth0
valid_lft forever preferred_lft forever
使用主機ping容器埠172.17.0.3,發現可以ping通
[root@localhost ~]# ping 172.17.0.3
PING 172.17.0.3 (172.17.0.3) 56(84) bytes of data.
64 bytes from 172.17.0.3: icmp_seq=1 ttl=64 time=0.062 ms
64 bytes from 172.17.0.3: icmp_seq=2 ttl=64 time=0.039 ms
^C
--- 172.17.0.3 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.039/0.050/0.062/0.013 ms
-
linux可以ping通docker容器內部,因為docker0的ip地址為172.17.0.1,容器為172.17.0.3。
-
原理:我們每啟動一個docker容器,docker就會給容器分配一個預設的可用ip,我們只要安裝了docker,就會有一個網路卡docker0(bridge)。網路卡採用橋接模式,並使用veth-pair技術(veth-pair就是一堆虛擬裝置介面,成對出現,一段連著協議,一段彼此相連,充當一個橋樑。)。
-
這時我們退出容器,回到主機再次觀察主機的ip地址:
[root@localhost ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:ed:0c:8f brd ff:ff:ff:ff:ff:ff
inet 192.168.137.3/24 brd 192.168.137.255 scope global dynamic ens33
valid_lft 1658sec preferred_lft 1658sec
inet6 fe80::e24c:801:d03e:bf59/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP
link/ether 02:42:b1:96:f1:91 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:b1ff:fe96:f191/64 scope link
valid_lft forever preferred_lft forever
13: vethf58de89@if12: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP
link/ether 22:8b:fc:74:f1:d6 brd ff:ff:ff:ff:ff:ff link-netnsid 1
inet6 fe80::208b:fcff:fe74:f1d6/64 scope link
valid_lft forever preferred_lft forever
- 發現了一個新網路13: vethda1df4b@if12,對應容器內網路地址的12: eth@if13。
- 容器和容器之間是可以互相ping通的:原理 :容器1→Docker0→容器2
- docker中的所有網路介面都是虛擬的 ,轉發效率高。刪除容器後,對應的網橋也隨之刪除
--link命令
每次啟動容器都會隨機分配ip,那麼就要經常改變應用中的配置檔案,想要根據服務名或者容器ID之類的固定名稱去ping,可以使用--link
在容器啟動命令中加入一個選項:--link,使得我們可以根據容器名來訪問容器
[root@localhost ~]# docker run -d --name tomcat02 --link tomcat01 tomcat:8
5c0c73dd4c0b8eba0713e4290c93daf8638335838f9d7902e4f6ff6a4e6adff1
[root@localhost ~]# docker exec -it tomcat02 ping tomcat01
OCI runtime exec failed: exec failed: unable to start container process: exec: "ping": executable file not found in $PATH: unknown
# 還是容器切割沒有ping命令 先進容器安裝一下 apt update && apt install -y inetutils-ping
[root@localhost ~]# docker exec -it tomcat02 ping tomcat01
PING tomcat01 (172.17.0.3): 56 data bytes
64 bytes from 172.17.0.3: icmp_seq=0 ttl=64 time=0.073 ms
64 bytes from 172.17.0.3: icmp_seq=1 ttl=64 time=0.057 ms
^C--- tomcat01 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.057/0.065/0.073/0.000 ms
然而反向就不可以ping通,這是因為--link的本質是把需要連線的容器名/id寫入啟動容器的配置檔案host中,即增加了一個ip和容器名/id的對映:
[root@localhost ~]# docker exec -it tomcat02 cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.3 tomcat01 097453019a95
172.17.0.2 5c0c73dd4c0b
目前已經不建議使用這種方式。
自定義網路
檢視網路
[root@localhost ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
a07b22356262 bridge bridge local
71deafd68afe host host local
835e96fb07ef none null local
[root@localhost ~]# docker network inspect a07b22356262
[
{
"Name": "bridge",
"Id": "a07b22356262426a754dc5b5c7ee3bf10286dc21f8e983e557d3e3565864b21b",
"Created": "2022-08-12T09:28:03.322619652+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.17.0.0/16",
"Gateway": "172.17.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"097453019a95b422d58cb0b7a5b5ebd02e5371aa0db18076e86727410662f86d": {
"Name": "tomcat01",
"EndpointID": "d111c8fcbc21eefdb1c3b91382c1afd0a148a2452ff2bcced7d6d25bcda77212",
"MacAddress": "02:42:ac:11:00:03",
"IPv4Address": "172.17.0.3/16",
"IPv6Address": ""
},
"5c0c73dd4c0b8eba0713e4290c93daf8638335838f9d7902e4f6ff6a4e6adff1": {
"Name": "tomcat02",
"EndpointID": "70c3fde79f53812bd15a135a84424f73b8c83b76b28c132806e30dfa76e1d1c1",
"MacAddress": "02:42:ac:11:00:02",
"IPv4Address": "172.17.0.2/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
},
"Labels": {}
}
]
docker中的網路模式有
- bridge 橋接(docker預設)
- none 不配置網路
- host 和宿主機共享網路(用的少)
docker run 命令其實預設帶有一個引數--net bridge,此處的bridge指的就是docker0網路的名字(docker network ls 中的NAME)。如果我們不想使用docker0,就可以建立一個新的網路
docker run -d --name tomcat01 tomcat:8
等同於
docker run -d --name tomcat01 tomcat:8 --net bridge
# --driver bridge 模式 不寫也行 預設就是橋接
# --subnet 192.168.0.0/16 子網 /16 能有255*255個子網 /24 只能有255個
# --gateway 192.168.0.1 閘道器 自我理解應該是用來橋接的橋
[root@localhost ~]# docker network create --driver bridge --subnet 192.168.0.0/16 --gateway 192.168.0.1 mynet
30a3d06a966fb5e612ee19b69779a6386fcfc4b0853318d7d368b5db10b35687
[root@localhost ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
a07b22356262 bridge bridge local
71deafd68afe host host local
30a3d06a966f mynet bridge local
835e96fb07ef none null local
啟動容器時使用自定義的網路
[root@localhost ~]# docker run -d --name tomcat01 --net mynet tomcat:8
2ae7d056f24ecd09a10cda5203e78fea2de793138582fab50a62c0eb55927553
[root@localhost ~]# docker run -d --name tomcat02 --net mynet tomcat:8
2052b146aa34f66f4de9044992cc0fe4538885dbf45d00e7a1273d2bbcae4bca
[root@localhost ~]# docker network inspect mynet
[
{
"Name": "mynet",
"Id": "30a3d06a966fb5e612ee19b69779a6386fcfc4b0853318d7d368b5db10b35687",
"Created": "2022-08-12T17:49:24.376465717+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": {},
"Config": [
{
"Subnet": "192.168.0.0/16",
"Gateway": "192.168.0.1"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {
"2052b146aa34f66f4de9044992cc0fe4538885dbf45d00e7a1273d2bbcae4bca": {
"Name": "tomcat02",
"EndpointID": "793864aa1c60c1b7774c39b019cea7da1d5f3b8330eb53a17c291d2d66c4f269",
"MacAddress": "02:42:c0:a8:00:03",
"IPv4Address": "192.168.0.3/16",
"IPv6Address": ""
},
"2ae7d056f24ecd09a10cda5203e78fea2de793138582fab50a62c0eb55927553": {
"Name": "tomcat01",
"EndpointID": "4df95d14036118cc973684032d9418b25ea6f163dca774e42efb76b21d00b154",
"MacAddress": "02:42:c0:a8:00:02",
"IPv4Address": "192.168.0.2/16",
"IPv6Address": ""
}
},
"Options": {},
"Labels": {}
}
]
## 使用自定義網路的兩個Tomcat是互相可以ping通的 ping服務名、容器名或者容器ID 也行
apt update && apt install -y inetutils-ping
# tomcat01容器內 安裝ping命令報錯Temporary failure resolving 'deb.debian.org'
# 是因為自定義網路連不上網了 解決:/etc/docker/daemon.json 加上dns配置
{
"registry-mirrors": ["https://todnba9t.mirror.aliyuncs.com"],
"dns": ["8.8.8.8","114.114.114.114"]
}
# 測試互相ping
[root@localhost ~]# docker exec -it tomcat01 ping tomcat02
PING tomcat02 (192.168.0.3): 56 data bytes
64 bytes from 192.168.0.3: icmp_seq=0 ttl=64 time=0.062 ms
64 bytes from 192.168.0.3: icmp_seq=1 ttl=64 time=0.061 ms
^C--- tomcat02 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.061/0.061/0.062/0.000 ms
[root@localhost ~]# docker exec -it tomcat02 ping tomcat01
PING tomcat01 (192.168.0.2): 56 data bytes
64 bytes from 192.168.0.2: icmp_seq=0 ttl=64 time=0.048 ms
64 bytes from 192.168.0.2: icmp_seq=1 ttl=64 time=0.059 ms
^C--- tomcat01 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.048/0.053/0.059/0.000 ms
不同網路、網段打通
# 新建tomcat-d01 d02 預設是使用docker0網路的
[root@localhost /]# docker run --name tomcat-d01 -d tomcat:8
30adef1930b3749c663a0a75e3c8cfe23dae5aad75f376e2e7026739004fd386
[root@localhost /]# docker run --name tomcat-d02 -d tomcat:8
c839f8e0a627cf4da0880ed82c5c62ffdedee17b3a05b8182ccacebb2c889a38
# 打通命令 docker netword connect 網路名 容器名/ID
[root@localhost /]# docker exec -it tomcat-d01 ping tomcat01
ping: unknown host
[root@localhost /]# docker network connect mynet tomcat-d01
[root@localhost /]# docker exec -it tomcat-d01 ping tomcat01
PING tomcat01 (192.168.0.2): 56 data bytes
64 bytes from 192.168.0.2: icmp_seq=0 ttl=64 time=0.064 ms
64 bytes from 192.168.0.2: icmp_seq=1 ttl=64 time=0.060 ms
^C--- tomcat01 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.060/0.062/0.064/0.000 ms
# 反過來也能ping通 是雙向的
[root@localhost /]# docker exec -it tomcat01 ping tomcat-d01
PING tomcat-d01 (192.168.0.4): 56 data bytes
64 bytes from 192.168.0.4: icmp_seq=0 ttl=64 time=0.047 ms
64 bytes from 192.168.0.4: icmp_seq=1 ttl=64 time=0.061 ms
64 bytes from 192.168.0.4: icmp_seq=2 ttl=64 time=0.060 ms
^C--- tomcat-d01 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.047/0.056/0.061/0.000 ms
# 而tomcat-d02 沒有打透過 依然ping不通
[root@localhost /]# docker exec -it tomcat-d02 ping tomcat01
ping: unknown host
[root@localhost /]# docker exec -it tomcat-d02 ping tomcat02
ping: unknown host
## 打通後docker network inspect mynet 發現是把容器tomcat-d01 放到了mynet網路裡
# 這種方式稱為一個容器兩個IP 類似阿里雲的公網私網
。。。。。。
"Containers": {
"2052b146aa34f66f4de9044992cc0fe4538885dbf45d00e7a1273d2bbcae4bca": {
"Name": "tomcat02",
"EndpointID": "7b80dd866a6c9c36e189dff2ea1a535179983ac005d35e5f93d476840b507f9a",
"MacAddress": "02:42:c0:a8:00:03",
"IPv4Address": "192.168.0.3/16",
"IPv6Address": ""
},
"2ae7d056f24ecd09a10cda5203e78fea2de793138582fab50a62c0eb55927553": {
"Name": "tomcat01",
"EndpointID": "d9d9ed925d3d550046aefb237570326e8b9fccf37883bc83e71570c8e1342f5e",
"MacAddress": "02:42:c0:a8:00:02",
"IPv4Address": "192.168.0.2/16",
"IPv6Address": ""
},
"30adef1930b3749c663a0a75e3c8cfe23dae5aad75f376e2e7026739004fd386": {
"Name": "tomcat-d01",
"EndpointID": "cdeb36cf8ec0967b1d10208d4ff31ffcf05702592e86bb58444fa9aaf1d5d088",
"MacAddress": "02:42:c0:a8:00:04",
"IPv4Address": "192.168.0.4/16",
"IPv6Address": ""
}
}
。。。。。。
搭建redis叢集
採用三主三從 分片配置叢集
# 建立redis叢集自定義網路
docker network create --subnet 172.18.0.0/16 redis
# 指令碼建立 6臺 redis的配置
for port in $(seq 1 6);
do \
mkdir -p /usr/local/docker-redis/node-${port}/conf
touch /usr/local/docker-redis/node-${port}/conf/redis.conf
cat << EOF >/usr/local/docker-redis/node-${port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.18.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done
# 啟動 6臺 redis容器 指令碼迴圈啟動
for port in $(seq 1 6);
do \
docker run -p 637${port}:6379 -p 1637${port}:16379 --name redis-${port} \
-v /usr/local/docker-redis/node-${port}/data:/data \
-v /usr/local/docker-redis/node-${port}/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.18.0.1${port} redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf; \
done
# 手動替換啟動
docker run -p 6371:6379 -p 16371:16379 --name redis-1 \
-v /usr/local/docker-redis/node-1/data:/data \
-v /usr/local/docker-redis/node-1/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.18.0.11 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf; \
# 建立叢集 隨便進一個redis 不是/bin/bash命令 是sh
[root@localhost docker-redis]# docker exec -it redis-1 sh
/data # redis-cli --cluster create 172.18.0.11:6379 172.18.0.12:6379 172.18.0.13:6379 172.18.0.14:6379 172.18.0.15:6379 172.18.0.1
6:6379 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 172.18.0.15:6379 to 172.18.0.11:6379
Adding replica 172.18.0.16:6379 to 172.18.0.12:6379
Adding replica 172.18.0.14:6379 to 172.18.0.13:6379
M: 09b1eef2538fdd2c0335342b9de6438a0ba9f40b 172.18.0.11:6379
slots:[0-5460] (5461 slots) master
M: eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67 172.18.0.12:6379
slots:[5461-10922] (5462 slots) master
M: dd35a300ad3ad466b977832a884fe5aa48a9c563 172.18.0.13:6379
slots:[10923-16383] (5461 slots) master
S: 43cfdff052b5137c539a9b3fe185f86239d154d3 172.18.0.14:6379
replicates dd35a300ad3ad466b977832a884fe5aa48a9c563
S: 6e318419c846ecb3b8895deb738b8a1a9bb8c672 172.18.0.15:6379
replicates 09b1eef2538fdd2c0335342b9de6438a0ba9f40b
S: 8c9d8ed5ab1b267c606829f499abe2c55d962869 172.18.0.16:6379
replicates eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
....
>>> Performing Cluster Check (using node 172.18.0.11:6379)
M: 09b1eef2538fdd2c0335342b9de6438a0ba9f40b 172.18.0.11:6379
slots:[0-5460] (5461 slots) master
1 additional replica(s)
M: eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67 172.18.0.12:6379
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
M: dd35a300ad3ad466b977832a884fe5aa48a9c563 172.18.0.13:6379
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: 8c9d8ed5ab1b267c606829f499abe2c55d962869 172.18.0.16:6379
slots: (0 slots) slave
replicates eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67
S: 43cfdff052b5137c539a9b3fe185f86239d154d3 172.18.0.14:6379
slots: (0 slots) slave
replicates dd35a300ad3ad466b977832a884fe5aa48a9c563
S: 6e318419c846ecb3b8895deb738b8a1a9bb8c672 172.18.0.15:6379
slots: (0 slots) slave
replicates 09b1eef2538fdd2c0335342b9de6438a0ba9f40b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
# 測試叢集 -c 叢集模式
/data # redis-cli -c
# 叢集資訊
127.0.0.1:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:222
cluster_stats_messages_pong_sent:227
cluster_stats_messages_sent:449
cluster_stats_messages_ping_received:222
cluster_stats_messages_pong_received:222
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:449
# 節點資訊 三主三從
127.0.0.1:6379> cluster nodes
eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67 172.18.0.12:6379@16379 master - 0 1660557471100 2 connected 5461-10922
dd35a300ad3ad466b977832a884fe5aa48a9c563 172.18.0.13:6379@16379 master - 0 1660557471505 3 connected 10923-16383
09b1eef2538fdd2c0335342b9de6438a0ba9f40b 172.18.0.11:6379@16379 myself,master - 0 1660557470000 1 connected 0-5460
8c9d8ed5ab1b267c606829f499abe2c55d962869 172.18.0.16:6379@16379 slave eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67 0 1660557471000 6 connected
43cfdff052b5137c539a9b3fe185f86239d154d3 172.18.0.14:6379@16379 slave dd35a300ad3ad466b977832a884fe5aa48a9c563 0 1660557470593 4 connected
6e318419c846ecb3b8895deb738b8a1a9bb8c672 172.18.0.15:6379@16379 slave 09b1eef2538fdd2c0335342b9de6438a0ba9f40b 0 1660557471606 5 connected
# 測試從機頂替 set一個值 然後停止該master 然後get 發現redis-4已經頂上redis-3成為master
127.0.0.1:6379> set a b
-> Redirected to slot [15495] located at 172.18.0.13:6379
OK
172.18.0.13:6379> get a
"b"
172.18.0.13:6379> get a
Could not connect to Redis at 172.18.0.13:6379: Host is unreachable
(65.06s)
not connected> exit
/data # redis-cli -c
127.0.0.1:6379> get a
-> Redirected to slot [15495] located at 172.18.0.14:6379
"b"
172.18.0.14:6379> cluster nodes
8c9d8ed5ab1b267c606829f499abe2c55d962869 172.18.0.16:6379@16379 slave eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67 0 1660557709000 6 connected
eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67 172.18.0.12:6379@16379 master - 0 1660557709136 2 connected 5461-10922
6e318419c846ecb3b8895deb738b8a1a9bb8c672 172.18.0.15:6379@16379 slave 09b1eef2538fdd2c0335342b9de6438a0ba9f40b 0 1660557708222 5 connected
09b1eef2538fdd2c0335342b9de6438a0ba9f40b 172.18.0.11:6379@16379 master - 0 1660557709239 1 connected 0-5460
dd35a300ad3ad466b977832a884fe5aa48a9c563 172.18.0.13:6379@16379 master,fail - 1660557596311 1660557595808 3 connected
43cfdff052b5137c539a9b3fe185f86239d154d3 172.18.0.14:6379@16379 myself,master - 0 1660557707000 7 connected 10923-16383
部署springboot jar包
-
編寫springboot
-
mvn package 打包
[root@localhost application]# ls Dockerfile geotools-tutorial-1.0.jar
-
編寫Dockerfile
[root@localhost application]# cat Dockerfile
FROM java:8 COPY *.jar /geotools.jar EXPOSE 8081 ENTRYPOINT ["java","-jar","geotools.jar"]
-
build docker映象
docker build -t geotools:1.0 .
-
執行
docker run --name geo01 -d -p 8081:8081 geotools:1.0
Docker Compose
簡介
Dockerfile build run 手動操作,單個容器的操作,微服務模式下,太多的話需要一個個去操作。
Docker Compose 輕鬆高效的容器管理,可以定義執行多個容器。
概念:
- 服務services : 容器,應用。(web、redis、mysql。。。)
- 專案project : 一組關聯的容器。
官網介紹:
Compose 是一個用於定義和執行多容器 Docker 應用程式的工具。使用 Compose,您可以使用 YAML 檔案來配置應用程式的服務。然後,使用一個命令,您可以從您的配置中建立並啟動所有服務。要了解有關 Compose 的所有功能的更多資訊,請參閱功能列表。
Compose 適用於所有環境:生產、登臺、開發、測試以及 CI 工作流程。您可以在常見用例中瞭解有關每個案例的更多資訊。
使用 Compose 基本上是一個三步過程:
- 使用
Dockerfile
定義應用程式的環境,以便可以在任何地方複製它。- 在
docker-compose.yml
中定義構成應用程式的服務,以便它們可以在隔離環境中一起執行。- 執行
docker-compose up
,Docker compose command 啟動並執行您的整個應用程式。您也可以使用 Compose Standalone(docker-compose
二進位制檔案)執行docker-compose up
。
安裝
-
下載
curl -SL https://github.com/docker/compose/releases/download/v2.7.0/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose
-
授權
chmod 777 /usr/local/bin/docker-compose
-
檢視
docker-compose version
-
解除安裝
# 刪除docker-compose 就行 rm $DOCKER_CONFIG/cli-plugins/docker-compose # 查詢cli外掛位置 docker info --format '{{range .ClientInfo.Plugins}}{{if eq .Name "compose"}}{{.Path}}{{end}}{{end}}'
入門案例
-
建立資料夾
mkdir composetest cd composetest
-
建立py程式
app.py
import time import redis from flask import Flask app = Flask(__name__) cache = redis.Redis(host='redis', port=6379) def get_hit_count(): retries = 5 while True: try: return cache.incr('hits') except redis.exceptions.ConnectionError as exc: if retries == 0: raise exc retries -= 1 time.sleep(0.5) @app.route('/') def hello(): count = get_hit_count() return 'Hello World! I have been seen {} times.\n'.format(count) ## 這是官網上沒有的 對應Dockerfile的修改 if __name__ == "__main__": app.run(host="0.0.0.0", debug=True)
-
建立程式依賴
requirements.txt
flask redis
-
編寫
Dockerfile
# syntax=docker/dockerfile:1 FROM python:3.7-alpine WORKDIR /code ENV FLASK_APP=app.py ENV FLASK_RUN_HOST=0.0.0.0 RUN apk add --no-cache gcc musl-dev linux-headers COPY requirements.txt requirements.txt RUN pip install -r requirements.txt EXPOSE 5000 COPY . . CMD ["flask", "run"] # 因為報錯終止了 修改成下面的 FROM python:3.6-alpine ADD . /code WORKDIR /code RUN pip install -r requirements.txt CMD ["python", "app.py"]
-
編寫
docker-compose.yml
配置檔案version: "3.9" services: web: build: . ports: - "8000:5000" ## 這是官網上沒有的 對應Dockerfile的修改 volumes: - .:/code redis: image: "redis:alpine"
-
構建
[root@localhost composetest]# docker-compose up [+] Running 2/0 ⠿ Container composetest-web-1 Created 0.0s ⠿ Container composetest-redis-1 Created 0.0s Attaching to composetest-redis-1, composetest-web-1 composetest-redis-1 | 1:C 16 Aug 2022 08:59:31.057 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo composetest-redis-1 | 1:C 16 Aug 2022 08:59:31.057 # Redis version=6.2.6, bits=64, commit=00000000, modified=0, pid=1, just started composetest-redis-1 | 1:C 16 Aug 2022 08:59:31.057 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf composetest-redis-1 | 1:M 16 Aug 2022 08:59:31.057 * monotonic clock: POSIX clock_gettime composetest-redis-1 | 1:M 16 Aug 2022 08:59:31.058 * Running mode=standalone, port=6379. composetest-redis-1 | 1:M 16 Aug 2022 08:59:31.058 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. composetest-redis-1 | 1:M 16 Aug 2022 08:59:31.058 # Server initialized composetest-redis-1 | 1:M 16 Aug 2022 08:59:31.058 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect. composetest-redis-1 | 1:M 16 Aug 2022 08:59:31.058 * Loading RDB produced by version 6.2.6 composetest-redis-1 | 1:M 16 Aug 2022 08:59:31.058 * RDB age 151 seconds composetest-redis-1 | 1:M 16 Aug 2022 08:59:31.058 * RDB memory usage when created 0.77 Mb composetest-redis-1 | 1:M 16 Aug 2022 08:59:31.058 # Done loading RDB, keys loaded: 1, keys expired: 0. composetest-redis-1 | 1:M 16 Aug 2022 08:59:31.058 * DB loaded from disk: 0.000 seconds composetest-redis-1 | 1:M 16 Aug 2022 08:59:31.058 * Ready to accept connections composetest-web-1 | * Serving Flask app 'app' (lazy loading) composetest-web-1 | * Environment: production composetest-web-1 | WARNING: This is a development server. Do not use it in a production deployment. composetest-web-1 | Use a production WSGI server instead. composetest-web-1 | * Debug mode: on composetest-web-1 | * Running on all addresses. composetest-web-1 | WARNING: This is a development server. Do not use it in a production deployment. composetest-web-1 | * Running on http://172.24.0.2:5000/ (Press CTRL+C to quit) composetest-web-1 | * Restarting with stat composetest-web-1 | * Debugger is active! composetest-web-1 | * Debugger PIN: 116-781-253
-
測試
[root@localhost ~]# curl localhost:8000 Hello World! I have been seen 7 times. [root@localhost ~]# curl localhost:8000 Hello World! I have been seen 8 times. [root@localhost ~]# docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 8df26bac74ff redis:alpine "docker-entrypoint.s…" 5 minutes ago Up 2 minutes 6379/tcp composetest-redis-1 20b1f77343d8 composetest_web "python app.py" 5 minutes ago Up 2 minutes 0.0.0.0:8000->5000/tcp composetest-web-1 [root@localhost ~]# docker images REPOSITORY TAG IMAGE ID CREATED SIZE composetest_web latest eaaccc9f548d 48 minutes ago 55.5MB redis alpine 3900abf41552 8 months ago 32.4MB [root@localhost ~]# docker network ls NETWORK ID NAME DRIVER SCOPE eb29590a53a3 bridge bridge local 3cfdd97d09c3 composetest_default bridge local f454031e2478 host host local e98bf5a234b1 none null local
一些預設配置項:
- 預設服務名、容器名:資料夾名_服務名_num(就是叢集狀態下的副本數量)
- 預設網路名:資料夾名_default : 自動建立的網路,整個專案在一個網路下,可以用服務名訪問
-
停止
## 或者原始終端 Ctrl+C docker-compose stop ## docker-compose up -d 後臺啟動時候用stop 停止 但是不刪除容器 # docker-compose down --volumes 同時刪除掛載的卷就是資料 docker-compose down ## 停止並刪除容器
## yaml配置規則
詳見[官網文件](https://docs.docker.com/compose/compose-file/#compose-file)
```yaml
# 三層結構
version: '' # 當前docker-compose版本 和docker版本對應
service: # 服務 核心層
服務1: web
# 服務配置
images
build
network
volumes
.....
服務2: redis
.....
服務3: xxx
.....
# 其他層 全域性規則等
configs:
volumes:
......
WordPress案例
-
建專案,就是資料夾
mkdir my_wordpress cd my_wordpress
-
寫docker-compose.yml
services: db: # We use a mariadb image which supports both amd64 & arm64 architecture image: mariadb:10.6.4-focal # If you really want to use MySQL, uncomment the following line #image: mysql:8.0.27 command: '--default-authentication-plugin=mysql_native_password' volumes: - db_data:/var/lib/mysql restart: always environment: - MYSQL_ROOT_PASSWORD=somewordpress - MYSQL_DATABASE=wordpress - MYSQL_USER=wordpress - MYSQL_PASSWORD=wordpress expose: - 3306 - 33060 wordpress: image: wordpress:latest ports: - 80:80 restart: always environment: - WORDPRESS_DB_HOST=db - WORDPRESS_DB_USER=wordpress - WORDPRESS_DB_PASSWORD=wordpress - WORDPRESS_DB_NAME=wordpress volumes: db_data:
-
啟動
docker-compose up -d
-
訪問並配置部落格
計數器案例
springboot
+redis
實現計數器案例
程式
@RestController
public class HelloController {
@Resource
private RedisTemplate<String, String> redisTemplate;
@GetMapping("/count")
public String count() {
Long count = redisTemplate.opsForValue().increment("count");
return "public static void main ---> "+count;
}
}
Dockerfile
FROM java:8
COPY *.jar /geotools.jar
EXPOSE 8081
ENTRYPOINT ["java","-jar","geotools.jar"]
docker-compose.yml
version: '3.6'
services:
geotools:
build: .
image: geotools
depends_on:
- redis
ports:
- "8081:8081"
redis:
image: "redis:alpine"
執行
docker-compose up -d
測試
[root@bogon ~]# curl localhost:8081/count
public static void main ---> 17
[root@bogon ~]# curl localhost:8081/count
public static void main ---> 18
Docker Swarm
搭建
叢集環境準備,使用阿里雲伺服器或者虛擬機器,四臺 。每臺一核兩G就夠了,然後全部安裝docker,使用xshell同步操作四個。
官網文件:
Docker Engine 1.12 引入了 swarm 模式,使您能夠建立一個由一個或多個 Docker 引擎組成的叢集,稱為 swarm。一個 swarm 由一個或多個節點組成:在 swarm 模式下執行 Docker Engine 1.12 或更高版本的物理機或虛擬機器。
有兩種型別的節點 : managers and workers.
If you haven’t already, read through the swarm mode overview and key concepts.
檢視swarm命令
[root@localhost ~]# docker swarm --help
Usage: docker swarm COMMAND
Manage Swarm
Commands:
ca Display and rotate the root CA
init Initialize a swarm
join Join a swarm as a node and/or manager
join-token Manage join tokens
leave Leave the swarm
unlock Unlock swarm
unlock-key Manage the unlock key
update Update the swarm
Run 'docker swarm COMMAND --help' for more information on a command.
流程:首先初始化(init
)一臺機器成為manager
節點,並暴露(--advertise-addr
)自己的地址,讓其他節點加入join
進來選擇成為manager
或者是worker
# 初始化一個manager 當前機器docker-1 ip為192.168.137.4
[root@localhost ~]# docker swarm init --advertise-addr 192.168.137.4
Swarm initialized: current node (slclpnonzlpn8lse1of09e6zl) is now a manager.
To add a worker to this swarm, run the following command:
docker swarm join --token SWMTKN-1-4euindy8toduoh3va8vyqz7xdjn6rkgn3p77g4tiomif6f27bx-06l4erft7xek04ytewiega313 192.168.137.4:2377
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
# 生成worker join的token
[root@localhost ~]# docker swarm join-token worker
To add a worker to this swarm, run the following command:
docker swarm join --token SWMTKN-1-4euindy8toduoh3va8vyqz7xdjn6rkgn3p77g4tiomif6f27bx-06l4erft7xek04ytewiega313 192.168.137.4:2377
# 生成manager join的token
[root@localhost ~]# docker swarm join-token manager
To add a manager to this swarm, run the following command:
docker swarm join --token SWMTKN-1-4euindy8toduoh3va8vyqz7xdjn6rkgn3p77g4tiomif6f27bx-bahthzqdkjntmde2ghl0mw0jk 192.168.137.4:2377
# 加入docker-1叢集 成為一個worker 當前機器docker-2 ip為192.168.137.5
# 報錯 Error response from daemon: rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: connection error: desc = "transport: Error while dialing dial tcp 192.168.137.4:2377: connect: no route to host"
# 需要關閉防火牆或者開啟埠 否則埠docker-1的2377埠聯不通
# 關閉防火牆
# systemctl stop firewalld
# 開放埠
# firewall-cmd --zone=public --add-port=2377/tcp --permanent
# firewall-cmd --reload
[root@localhost ~]# docker swarm join --token SWMTKN-1-4euindy8toduoh3va8vyqz7xdjn6rkgn3p77g4tiomif6f27bx-06l4erft7xek04ytewiega313 192.168.137.4:2377
This node joined a swarm as a worker.
# 加入docker-1叢集 成為一個worker 當前機器docker-3 ip為192.168.137.6
[root@localhost ~]# docker swarm join --token SWMTKN-1-4euindy8toduoh3va8vyqz7xdjn6rkgn3p77g4tiomif6f27bx-06l4erft7xek04ytewiega313 192.168.137.4:2377
This node joined a swarm as a worker.
# 加入docker-1叢集 成為一個manager 當前機器docker-4 ip為192.168.137.7
# 報錯 Error response from daemon: manager stopped: can't initialize raft node: rpc error: code = Unknown desc = could not connect to prospective new cluster member using its advertised address: rpc error: code = Unavailable desc = all SubConns are in TransientF
# 成為manager就需要關閉防火牆或者開啟埠 只開啟docker-1的不行 這個也需要開啟 方式同上
[root@bogon ~]# docker swarm join --token SWMTKN-1-4euindy8toduoh3va8vyqz7xdjn6rkgn3p77g4tiomif6f27bx-bahthzqdkjntmde2ghl0mw0jk 192.168.137.4:2377
This node joined a swarm as a manager.
# 最終在docker-1 檢視節點
[root@localhost ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
3cy2vkwbe2kuefw3goqa3mcow localhost Ready Active Reachable 19.03.12
ijsh1hquejkzghwxz7al17jiu localhost.localdomain Ready Active 19.03.12
rtigefxbfuql7o3dh53px14yn * localhost.localdomain Ready Active Leader 19.03.12
xgwd0fwqmjdo27hte2yye3p6o localhost.localdomain Ready Active 19.03.12
Raft協議
雙主雙從: 假設一個主節點掛了,另一個主節點也不可用!!
Raft一致性協議:理解:就是保證絕大多數節點是存貨的才可用,就是高可用理念,雙主雙從掛一個的剩一個的話,還談什麼高可用。所以叢集的數量最起碼大於三臺。 兩主只要掛一個就都不可用,三主掛一個另外兩個還可用,掛兩個就都不可用了。高可用就是 > 1。
測試:雙主雙從掛一個,另一個也不可用
# docker-1 docker-4 是主 docker-2 docker-3 是從
[root@localhost ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
3cy2vkwbe2kuefw3goqa3mcow localhost Ready Active Reachable 19.03.12
ijsh1hquejkzghwxz7al17jiu localhost.localdomain Ready Active 19.03.12
rtigefxbfuql7o3dh53px14yn * localhost.localdomain Ready Active Leader 19.03.12
xgwd0fwqmjdo27hte2yye3p6o localhost.localdomain Ready Active 19.03.12
# 關閉 docker-1
[root@localhost ~]# systemctl stop docker
# docker-4 檢視節點
[root@localhost ~]# docker node ls
Error response from daemon: rpc error: code = DeadlineExceeded desc = context deadline exceeded
# 開啟 docker-1
[root@localhost ~]# systemctl start docker
# docker-1 或者 docker-4檢視節點 發現docker-4成了Leader
[root@localhost ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
3cy2vkwbe2kuefw3goqa3mcow * localhost Ready Active Leader 19.03.12
rtigefxbfuql7o3dh53px14yn localhost Ready Active Reachable 19.03.12
xgwd0fwqmjdo27hte2yye3p6o localhost Ready Active 19.03.12
ijsh1hquejkzghwxz7al17jiu localhost.localdomain Ready Active 19.03.12
測試:三主一從掛一個,另外兩個可用,掛兩個都不可用
# 先把一個worker docker-3離開再新增成為manager
# docker-3
[root@localhost ~]# docker swarm leave
Node left the swarm.
[root@localhost ~]# docker swarm join --token SWMTKN-1-24ysfnawimd0who3788enz230baj1grsb0gubouwsvm8njun77-ep7bmlbflhv8yeyd2nnmxdl5s 192.168.137.4:2377
This node joined a swarm as a manager.
[root@localhost ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
3cy2vkwbe2kuefw3goqa3mcow localhost Ready Active Leader 19.03.12
rtigefxbfuql7o3dh53px14yn localhost Ready Active Reachable 19.03.12
xgwd0fwqmjdo27hte2yye3p6o localhost Ready Active 19.03.12
64b53pz8t50l46jv5wt5cs7of localhost.localdomain Down Active 19.03.12
cmfh7kn7ojmoxlxbg5w5ptf35 * localhost.localdomain Ready Active Reachable 19.03.12
ijsh1hquejkzghwxz7al17jiu localhost.localdomain Down Active 19.03.12
# 停掉 docker-1
[root@localhost ~]# systemctl stop docker
# docker-3 檢視
[root@localhost ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
3cy2vkwbe2kuefw3goqa3mcow localhost Ready Active Leader 19.03.12
rtigefxbfuql7o3dh53px14yn localhost Down Active Unreachable 19.03.12
xgwd0fwqmjdo27hte2yye3p6o localhost Ready Active 19.03.12
64b53pz8t50l46jv5wt5cs7of localhost.localdomain Down Active 19.03.12
cmfh7kn7ojmoxlxbg5w5ptf35 * localhost.localdomain Ready Active Reachable 19.03.12
ijsh1hquejkzghwxz7al17jiu localhost.localdomain Down Active 19.03.12
# docker-4檢視
[root@localhost ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
3cy2vkwbe2kuefw3goqa3mcow * localhost Ready Active Leader 19.03.12
rtigefxbfuql7o3dh53px14yn localhost Down Active Unreachable 19.03.12
xgwd0fwqmjdo27hte2yye3p6o localhost Ready Active 19.03.12
64b53pz8t50l46jv5wt5cs7of localhost.localdomain Down Active 19.03.12
cmfh7kn7ojmoxlxbg5w5ptf35 localhost.localdomain Ready Active Reachable 19.03.12
ijsh1hquejkzghwxz7al17jiu localhost.localdomain Down Active 19.03.12
# 再停掉docker-4
[root@localhost ~]# systemctl stop docker
# docker-3檢視
[root@localhost ~]# docker node ls
Error response from daemon: rpc error: code = DeadlineExceeded desc = context deadline exceeded
動態擴縮容
nginx
搭建為例企業級使用
docker
時,基本告別使用docker run
命令,docker-compose up
也是單機部署使用的,在swarm
裡,使用命令為docker service
。概念變化:啟動容器 -> 啟動服務 -> 啟動副本
redis
叢集就是一個redis
服務,有10個副本就是開啟了10個容器,動態擴縮容就是動態的增減副本。類似灰度釋出,金絲雀釋出的概念
# 當前 三主一從 docker-2從 docker-1 3 4 主
[root@docker-1 /]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
rtigefxbfuql7o3dh53px14yn * docker-1 Ready Active Leader 19.03.12
xgwd0fwqmjdo27hte2yye3p6o docker-2 Ready Active 19.03.12
cmfh7kn7ojmoxlxbg5w5ptf35 docker-3 Ready Active Reachable 19.03.12
3cy2vkwbe2kuefw3goqa3mcow docker-4 Ready Active Reachable 19.03.12
[root@docker-1 /]# docker service --help
Usage: docker service COMMAND
Manage services
Commands:
create Create a new service
inspect Display detailed information on one or more services
logs Fetch the logs of a service or task
ls List services
ps List the tasks of one or more services
rm Remove one or more services
rollback Revert changes to a service's configuration
scale Scale one or multiple replicated services
update Update a service
Run 'docker service COMMAND --help' for more information on a command.
# 建立一個服務 可想象為docker run 命令,不過是建立成了swarm叢集
[root@docker-1 /]# docker service create -p 8888:80 --name my-nginx nginx
3hh8ny611f3kms7hhutn1xzdd
overall progress: 1 out of 1 tasks
1/1: running
verify: Service converged
# 檢視服務 詳細:docker service inspect my-nginx
[root@docker-1 /]# docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
3hh8ny611f3k my-nginx replicated 1/1 nginx:latest *:8888->80/tcp
[root@docker-1 /]# docker service ps my-nginx
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
l702uvr4ogf7 my-nginx.1 nginx:latest docker-4 Running Running 3 minutes ago
# 現在可以在docker-1 2 3 4 上docker ps找一下看剛才啟動的nginx服務 其本身的副本 也就是容器是跑在哪裡的 發現是在docker-4裡面
[root@docker-4 ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
69985d9e1116 nginx:latest "/docker-entrypoint.…" 8 minutes ago Up 8 minutes 80/tcp my-nginx.1.l702uvr4ogf7ewdeynbz4xgng
docker run
容器單機啟動,不具有擴縮容功能
dokcer service
容器服務啟動,可以動態擴縮容,滾動更新訪問: 容器雖然執行docker-4裡面 ,但是訪問三臺主機任意一個都是可以訪問成功的
http://192.168.137.4:8888/
# 那麼意思就是預設情況下 create出來的服務 就只有一個副本 也就是隻會建立一個容器 如果現在訪問量增大 一個容器頂不住 需要增加叢集數量 這個時候就需要用到擴縮容了
# 更新服務的副本數為3 那麼現在docker ps會發現docker-1 3 4 都出現了nginx
[root@docker-1 /]# docker service update --replicas 3 my-nginx
my-nginx
overall progress: 3 out of 3 tasks
1/3: running
2/3: running
3/3: running
verify: Service converged
# 副本數量是不受伺服器數量限制的 只要伺服器硬體條件足夠就行 目前有docker-1 2 3 4 四臺虛擬機器 要更新服務為10個副本也是可以的 就是每臺虛擬機器上多跑幾個容器 就是docker容器的特性 一個映象可以執行多個容器
# 這時docker ps就發現docker-1執行了2個nginx容器 docker-2執行了3個 docker-3執行了2個 docker-4執行了3個
[root@docker-1 /]# docker service update --replicas 10 my-nginx
my-nginx
overall progress: 10 out of 10 tasks
1/10: running
2/10: running
3/10: running
4/10: running
5/10: running
6/10: running
7/10: running
8/10: running
9/10: running
10/10: running
verify: Service converged
# 如果流量變小了 不需要這麼多副本了 也可以動態更新更少的副本
# 這時docker ps發現只有docker-4上有執行的1個容器了
[root@docker-1 /]# docker service update --replicas 1 my-nginx
my-nginx
overall progress: 1 out of 1 tasks
1/1: running
verify: Service converged
另一個擴縮容命令
docker service scale 服務名=副本數
# 效果等同於 update命令
[root@docker-4 ~]# docker service scale my-nginx=3
my-nginx scaled to 3
overall progress: 3 out of 3 tasks
1/3: running
2/3: running
3/3: running
verify: Service converged
[root@docker-4 ~]# docker service scale my-nginx=2
my-nginx scaled to 2
overall progress: 2 out of 2 tasks
1/2: running
2/2: running
verify: Service converged
# 移除服務命令
[root@docker-4 ~]# docker service rm my-nginx
my-nginx
讓服務只在工作節點上執行,需要再建立時加上引數--mode
# --help 說明
# --mode string Service mode (replicated or global) (default "replicated")
# replicated: 指定幾個副本 就會建立幾個容器 初始化就是一個副本 只會建立一個容器
# global: 全域性都有 初始化在四臺虛擬機器上都有一個容器
docker service create --mode replicated --name mytom tomcat:9 預設的
docker service create --mode global --name mytom tomcat:9
概念總結
swarm
叢集的管理和編號,docker可以初始化一個swarm叢集,其他節點可以加入,有管理節點manager和工作節點worker
node
就是一個docker叢集節點,多個節點就組成了一個網路叢集
service
任務,可以在管理節點或者工作節點來執行,是swarm核心
task
容器內的命令,細節任務,容器的建立與維護
擴充:swarm網路模式
docker service inspect my-nginx
發現網路模式是:"PublishMode":"ingress"
ingress
是特殊的Overlay
網路,有負載均衡功能,雖然docker
在4臺機器上,但實際上網路是同一個。
以下有用到以後再學↓
方式:先找案例跑起來,再研究命令
Docker Stack
docker-compose 單機部署專案
docker stack 叢集部署專案
[root@docker-1 /]# docker stack --help
Usage: docker stack [OPTIONS] COMMAND
Manage Docker stacks
Options:
--orchestrator string Orchestrator to use
(swarm|kubernetes|all)
Commands:
deploy Deploy a new stack or update an existing stack
ls List stacks
ps List the tasks in the stack
rm Remove one or more stacks
services List the services in the stack
Run 'docker stack COMMAND --help' for more information on a command.
Docker Secret
安全相關
[root@docker-1 /]# docker secret --help
Usage: docker secret COMMAND
Manage Docker secrets
Commands:
create Create a secret from a file or STDIN as content
inspect Display detailed information on one or more secrets
ls List secrets
rm Remove one or more secrets
Run 'docker secret COMMAND --help' for more information on a command.
Docker Config
配置相關
[root@docker-1 /]# docker config --help
Usage: docker config COMMAND
Manage Docker configs
Commands:
create Create a config from a file or STDIN
inspect Display detailed information on one or more configs
ls List configs
rm Remove one or more configs
Run 'docker config COMMAND --help' for more information on a command.