sudo apt -y install nfs-kernel-server
mkdir -p /tmp/test
sudo mount -t nfs 192.168.10.208:/tmp/nfs /tmp/test
sudo apt update
sudo apt install -y git vim curl jq
sudo apt install -y openssh-server
ip addr
sudo apt install -y docker.io #安装Docker Engine
sudo service docker start #启动docker服务
sudo usermod -aG docker ${USER} #当前用户加入docker组
docker pull busybox #拉取busybox镜像
docker run busybox echo hello world
docker pull alpine
docker run -it alpine sh
# ---------------------------------------------------
docker pull ubuntu:18.04
docker run -it ubuntu:18.04 sh
# 下面的命令都是在容器内执行
cat /etc/os-release
apt update
apt install -y wget redis
redis-server &
# -h hostname
docker run -h srv alpine hostname
docker inspect nginx:alpine
docker history
# 用 -f 参数指定 Dockerfile 文件名
# 后面必须跟一个文件路径,叫做“构建上下文”(build’s context)
# 这里只是一个点号,表示当前路径
docker build -f Dockerfile.busybox .
docker save ngx-app:latest -o ngx.tar
docker load -i ngx.tar
# docker-compose
# intel x86_64
sudo curl -SL <https://github.com/docker/compose/releases/download/v2.6.1/docker-compose-linux-x86_64> \\
-o /usr/local/bin/docker-compose
# apple m1
sudo curl -SL <https://github.com/docker/compose/releases/download/v2.6.1/docker-compose-linux-aarch64> \\
-o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
docker-compose version
kubectl port-forward svc/ngx-svc 8080:80 &
curl 127.1:8080
kubectl rollout status deployment ngx-dep
kubectl describe deploy ngx-dep
kubectl rollout history deploy ngx-dep
kubectl rollout history deploy --revision=2
kubectl rollout undo deploy ngx-dep
Dockerfile
ARG IMAGE_BASE="node"
ARG IMAGE_TAG="alpine"
ENV PATH=$PATH:/tmp
ENV DEBUG=OFF
# Dockerfile.busybox
FROM busybox # 选择基础镜像
RUN apt-get update \\
&& apt-get install -y \\
build-essential \\
curl \\
make \\
unzip \\
&& cd /tmp \\
&& curl -fSL xxx.tar.gz -o xxx.tar.gz\\
&& tar xzf xxx.tar.gz \\
&& cd xxx \\
&& ./config \\
&& make \\
&& make clean
COPY setup.sh /tmp/ # 拷贝脚本到/tmp目录
RUN cd /tmp && chmod +x setup.sh \\ # 添加执行权限
&& ./setup.sh && rm setup.sh # 运行脚本然后再删除
EXPOSE 443 # 默认是tcp协议
EXPOSE 53/udp # 可以指定udp协议
CMD echo "hello world" # 启动容器时默认运行的命令
# Dockerfile
# docker build -t ngx-app .
# docker build -t ngx-app:1.0 .
ARG IMAGE_BASE="nginx"
ARG IMAGE_TAG="1.21-alpine"
FROM ${IMAGE_BASE}:${IMAGE_TAG}
COPY ./default.conf /etc/nginx/conf.d/
RUN cd /usr/share/nginx/html \\
&& echo "hello nginx" > a.txt
EXPOSE 8081 8082 8083
.dockerignore
# docker ignore
*.swp
*.sh
minikube
kubectl
kubectl api-resources
kubectl apply -f ngx-pod.yml
kubectl delete -f ngx-pod.yml
kubectl explain pod
kubectl explain pod.metadata
kubectl explain pod.spec
kubectl explain pod.spec.containers
kubectl run ngx --image=nginx:alpine --dry-run=client -o yaml
export out="--dry-run=client -o yaml"
kubectl run ngx --image=nginx:alpine $out
kubectl logs busy-pod
kubectl describe pod busy-pod
echo 'aaa' > a.txt
kubectl cp a.txt ngx-pod:/tmp
export out="--dry-run=client -o yaml" # 定义Shell变量
kubectl create job echo-job --image=busybox $out
# ttlSecondsAfterFinished
export out="--dry-run=client -o yaml" # 定义Shell变量
kubectl create cj echo-cj --image=busybox --schedule="" $out
# successfullJobsHistoryLimit
export out="--dry-run=client -o yaml" # 定义Shell变量
kubectl create cm info $out
kubectl create cm info --from-literal=k=v $out
kubectl get cm
kubectl describe cm info
kubectl create secret generic user --from-literal=name=root $out
kubectl apply -f secret.yml
kubectl get secret
kubectl describe secret user
kubectl explain pod.spec.containers.env.valueFrom
kubectl port-forward wp-pod 8080:80 &
export out="--dry-run=client -o yaml"
kubectl create deploy ngx-dep --image=nginx:alpine $out
kubectl scale --replicas=5 deploy ngx-dep
kubectl get pod -l app=nginx
kubectl get pod -l 'app in (ngx, nginx, ngx-dep)'
export out="--dry-run=client -o yaml"
# change "kind" to DaemonSet
kubectl create deploy redis-ds --image=redis:5-alpine $out
kubectl taint node master node-role.kubernetes.io/master:NoSchedule-node/master untainted
kubectl taint node master node-role.kubernetes.io/master:NoSchedule-node/master tainted
export out="--dry-run=client -o yaml"
kubectl expose deploy ngx-dep --port=80 --target-port=80 $out
kubectl get ns
export out="--dry-run=client -o yaml"
kubectl create ing ngx-ing --rule="ngx.test/=ngx-svc:80" --class=ngx-ink $out
curl --resolve ngx.test:8080:127.0.0.1 <http://ngx.test:8080>
kubectl create ns test-ns
kubectl get ns
kubectl port-forward svc/ngx-svc 8080:80 &
curl 127.1:8080
kubectl rollout status deployment ngx-dep
kubectl describe deploy ngx-dep
kubectl rollout history deploy ngx-dep
kubectl rollout history deploy --revision=2
kubectl rollout undo deploy ngx-dep
kubectl get pod -n test-ns
kubectl delete ns test-ns
export out="--dry-run=client -o yaml"
kubectl create quota dev-qt $out
kubectl apply -f quota-ns.yml
kubectl get quota -n dev-ns
kubectl describe quota -n dev-ns
kubectl create job echo1 -n dev-ns --image=busybox -- echo hello
kubectl create job echo2 -n dev-ns --image=busybox -- echo hello
kubectl run ngx --image=nginx:alpine -n dev-ns
kubectl explain limits
kubectl describe limitranges -n dev-ns
kubectl top node
kubectl top pod -n kube-system
# min,Pod 数量的最小值,也就是缩容的下限。
# max,Pod 数量的最大值,也就是扩容的上限。
# cpu-percent,CPU 使用率指标,当大于这个值时扩容,小于这个值时缩容。
export out="--dry-run=client -o yaml" # 定义Shell变量
kubectl autoscale deploy ngx-hpa-dep --min=2 --max=10 --cpu-percent=5 $out
kubectl create deploy ngx-dep --image=nginx:alpine --replicas=3
Service
对象. 名字空间.svc.cluster.local
类型:“ClusterIP”“ExternalName”“LoadBalancer”“NodePort”
NodePort 类型 :Service 除了会对后端的 Pod 做负载均衡之外,还会在集群里的每个节点上创建一个独立的端口,用这个端口对外提供服务
Yaml
# YAML数组(列表)
OS:
- linux
- macOS
- Windows
--------
{
"OS": ["linux", "macOS", "Windows"]
}
-------
# YAML对象(字典)
Kubernetes:
master: 1
worker: 3
------
# 复杂的例子,组合数组和对象
Kubernetes:
master:
- apiserver: running
- etcd: running
node:
- kubelet: running
- kube-proxy: down
- container-runtime: [docker, containerd, cri-o]
------
# Pod
# head
apiVersion: v1
kind: Pod
metadata:
name: ngx-pod
labels:
env: demo
owner: chrono
# body
spec:
containers:
- image: nginx:alpine
name: ngx
ports:
- containerPort: 80
---
# Job
apiVersion: batch/v1
kind: Job
metadata:
name: echo-job
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- image: busybox
name: echo-job
imagePullPolicy: IfNotPresent
command: ["/bin/echo"]
args: ["hello", "world"]
---
# Parallelism Job
apiVersion: batch/v1
kind: Job
metadata:
name: sleep-job
spec:
activeDeadlineSeconds: 15
backoffLimit: 2
completions: 4
parallelism: 2
template:
spec:
restartPolicy: OnFailure
containers:
- image: busybox
name: echo-job
imagePullPolicy: IfNotPresent
command:
- sh
- -c
- sleep $(($RANDOM % 10 + 1)) && echo done
---
# CronJob Job
apiVersion: batch/v1
kind: CronJob
metadata:
name: echo-cj
spec:
schedule: '*/1 * * * *'
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- image: busybox
name: echo-cj
imagePullPolicy: IfNotPresent
command: ["/bin/echo"]
args: ["hello", "world"]
---
# ConfigMap
apiVersion: v1
kind: ConfigMap
metadata:
name: info
data:
count: '10'
debug: 'on'
path: '/etc/systemd'
greeting: |
say hello to kubernetes.
---
# Secret ConfigMap Volume
apiVersion: v1
kind: Pod
metadata:
name: env-pod
spec:
volumes:
- name: cm-vol
configMap:
name: info
- name: sec-vol
secret:
secretName: user
containers:
- volumeMounts:
- mountPath: /tmp/cm-items
name: cm-vol
- mountPath: /tmp/sec-items
name: sec-vol
- env:
- name: COUNT
valueFrom:
configMapKeyRef:
name: info
key: count
- name: GREETING
valueFrom:
configMapKeyRef:
name: info
key: greeting
- name: USERNAME
valueFrom:
secretKeyRef:
name: user
key: name
- name: PASSWORD
valueFrom:
secretKeyRef:
name: user
key: pwd
image: busybox
name: busy
imagePullPolicy: IfNotPresent
command: ["/bin/sleep", "300"]
---
apiVersion: v1
kind: Pod
metadata:
name: vol-pod
spec:
volumes:
- name: cm-vol
configMap:
name: info
- name: sec-vol
secret:
secretName: user
containers:
- volumeMounts:
- mountPath: /tmp/cm-items
name: cm-vol
- mountPath: /tmp/sec-items
name: sec-vol
image: busybox
name: busy
imagePullPolicy: IfNotPresent
command: ["/bin/sleep", "300"]
---
apiVersion: v1
kind: ConfigMap
metadata:
name: maria-cm
data:
DATABASE: 'db'
USER: 'wp'
PASSWORD: '123'
ROOT_PASSWORD: '123'
---
apiVersion: v1
kind: Pod
metadata:
name: maria-pod
labels:
app: wordpress
role: database
spec:
containers:
- image: mariadb:10
name: maria
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3306
envFrom:
- prefix: 'MARIADB_'
configMapRef:
name: maria-cm
---
apiVersion: v1
kind: ConfigMap
metadata:
name: wp-cm
data:
HOST: '172.17.0.2'
USER: 'wp'
PASSWORD: '123'
NAME: 'db'
---
apiVersion: v1
kind: Pod
metadata:
name: wp-pod
labels:
app: wordpress
role: website
spec:
containers:
- image: wordpress:5
name: wp-pod
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
envFrom:
- prefix: 'WORDPRESS_DB_'
configMapRef:
name: wp-cm
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ngx-dep
name: ngx-dep
spec:
replicas: 2
selector:
matchLabels:
app: ngx-dep
template:
metadata:
labels:
app: ngx-dep
spec:
containers:
- image: nginx:alpine
name: nginx
---
# DaemonSet
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: redis-ds
labels:
app: redis-ds
spec:
selector:
matchLabels:
name: redis-ds
template:
metadata:
labels:
name: redis-ds
spec:
containers:
- image: redis:5-alpine
name: redis
ports:
- containerPort: 6379
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
operator: Exists
---
# service
apiVersion: v1
kind: Service
metadata:
name: ngx-svc
spec:
selector:
app: ngx-dep
ports:
- port: 80
targetPort: 80
protocol: TCP
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ngx-conf
data:
default.conf: |
server {
listen 80;
location / {
default_type text/plain;
return 200
'ver : $nginx_version\\nsrv : $server_addr:$server_port\\nhost: $hostname\\nuri : $request_method $host $request_uri\\ndate: $time_iso8601\\n';
}
}
---
# Nginx
apiVersion: apps/v1
kind: Deployment
metadata:
name: ngx-dep
spec:
replicas: 2
selector:
matchLabels:
app: ngx-dep
template:
metadata:
labels:
app: ngx-dep
spec:
volumes:
- name: ngx-conf-vol
configMap:
name: ngx-conf
containers:
- image: nginx:alpine
name: nginx
ports:
- containerPort: 80
volumeMounts:
- mountPath: /etc/nginx/conf.d
name: ngx-conf-vol
---
# ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ngx-ing
spec:
ingressClassName: ngx-ink
rules:
- host: ngx.test
http:
paths:
- path: /
pathType: Exact
backend:
service:
name: ngx-svc
port:
number: 80
---
# ingress class
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
name: ngx-ink
spec:
controller: nginx.org/ingress-controller
---
# pv
apiVersion: v1
kind: PersistentVolume
metadata:
name: host-10m-pv
spec:
# 存储类型的抽象,可以任意起
storageClassName: host-test
# 读写权限:ReadWriteOnce,ReadOnlyMany,ReadWriteMany
accessModes:
- ReadWriteOnce
capacity:
# 容量
# 国际标准
# Ki/Mi/Gi
storage: 10Mi
hostPath:
# 本地路径
path: /tmp/host-10m-pv/
---
# pvc
# 不表示实际的存储
# “申请”或者“声明”
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: host-5m-pvc
spec:
storageClassName: host-test
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Mi
---
# mount
apiVersion: v1
kind: Pod
metadata:
name: host-pvc-pod
spec:
volumes:
- name: host-pvc-vol
persistentVolumeClaim:
claimName: host-5m-pvc
containers:
- name: ngx-pvc-pod
image: nginx:alpine
ports:
- containerPort: 80
volumeMounts:
- name: host-pvc-vol
mountPath: /tmp
---
# NFS PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-1g-pv
spec:
storageClassName: nfs
accessModes:
- ReadWriteMany
capacity:
storage: 1Gi
nfs:
path: /tmp/nfs/1g-pv
server: 192.168.10.208
# NFS PVC
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-static-pvc
spec:
storageClassName: nfs
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
---
# NFS proisioner
spec:
template:
spec:
serviceAccountName: nfs-client-provisioner
containers:
...
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 192.168.10.208 #改IP地址
- name: NFS_PATH
value: /tmp/nfs #改共享目录名
volumes:
- name: nfs-client-root
nfs:
server: 192.168.10.208 #改IP地址
Path: /tmp/nfs #改共享目录名
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-client-retained
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
onDelete: "retain"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-client
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
archiveOnDelete: "false"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-dyn-10m-pvc
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Mi
---
apiVersion: v1
kind: Pod
metadata:
name: nfs-dyn-pod
spec:
volumes:
- name: nfs-dyn-10m-vol
persistentVolumeClaim:
claimName: nfs-dyn-10m-pvc
containers:
- name: nfs-dyn-test
image: nginx:alpine
ports:
- containerPort: 80
volumeMounts:
- name: nfs-dyn-10m-vol
mountPath: /tmp
---
# StatefulSet
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis-sts
spec:
# Pod 名. 服务名. 名字空间.svc.cluster.local
# redus-sts-0.redis-svc
# Pod 的 IP 地址可能会变,但这个有编号的域名由 Service 对象维护,是稳定不变的。
serviceName: redis-svc
replicas: 2
selector:
matchLabels:
app: redis-sts
template:
metadata:
labels:
app: redis-sts
spec:
containers:
- image: redis:5-alpine
name: redis
ports:
- containerPort: 6379
---
# Service
apiVersion: v1
kind: Service
# name 和 selector 的一致,selector和 statefulst 的service selector 一致
metadata:
name: redis-svc
# 如果给 statefulset 服务,可以在 Service 里添加一个字段 clusterIP: None ,
# 告诉 Kubernetes 不必再为这个对象分配 IP 地址
spec:
selector:
app: redis-sts
ports:
- port: 6379
protocol: TCP
targetPort: 6379
---
apiVersion: apps/v1
kind : StatefulSet
metadata:
name: redis-pv-sts
spec:
serviceName: redis-pv-svc
volumeClaimTemplates:
- metadata:
name: redis-100m-pvc
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Mi
replicas: 2
selector:
matchLabels:
app: redis-pv-sts
template:
metadata:
labels:
app: redis-pv-sts
spec:
containers:
- image: redis:5-alpine
name: redis
ports:
- containerPort: 6379
volumeMounts:
- name: redis-100m-pvc
mountPath: /data
---
# 更新说明
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ngx-dep
annotations:
kubernetes.io/change-cause: v1, ngx=1.21
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ngx-dep
annotations:
kubernetes.io/change-cause: update to v2, ngx=1.22
---
# 资源限制
apiVersion: v1
kind: Pod
metadata:
name: ngx-pod-resources
spec:
containers:
- image: nginx:alpine
name: ngx
# Kubernetes 里 CPU 的最小使用单位是 0.001,
# 为了方便表示用了一个特别的单位 m,也就是“milli”“毫”的意思,比如说 500m 就相当于 0.5。
resources:
requests:
cpu: 10m
memory: 100Mi
limits:
cpu: 20m
memory: 200Mi
---
# quota
apiVersion: v1
kind: Namespace
metadata:
name: dev-ns
---
apiVersion: v1
kind: ResourceQuota
metadata:
name: dev-qt
namespace: dev-ns
spec:
hard:
requests.cpu: 10
requests.memory: 10Gi
limits.cpu: 10
limits.memory: 20Gi
requests.storage: 100Gi
persistentvolumeclaims: 100
pods: 100
configmaps: 100
secrets: 100
services: 10
count/jobs.batch: 1
count/cronjobs.batch: 1
count/deployments.apps: 1
---
apiVersion: v1
kind: ResourceQuota
metadata:
name: cpu-mem-qt
namespace: dev-ns
spec:
hard:
requests.cpu: 10
requests.memory: 10Gi
limits.cpu: 10
limits.memory: 20Gi
---
apiVersion: v1
kind: ResourceQuota
metadata:
name: core-obj-qt
namespace: dev-ns
spec:
hard:
pods: 100
configmaps: 100
secrets: 100
services: 10
---
# limitrange
apiVersion: v1
kind: LimitRange
metadata:
name: dev-limits
namespace: dev-ns
spec:
limits:
- type: Container
defaultRequest:
cpu: 200m
memory: 50Mi
default:
cpu: 500m
memory: 100Mi
- type: Pod
max:
cpu: 800m
memory: 200Mi
---
# HorizontalPodAutoscaler
---
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: ngx-hpa
spec:
maxReplicas: 10
minReplicas: 2
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: ngx-hpa-dep
targetCPUUtilizationPercentage: 5
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ngx-hpa-dep
spec:
replicas: 1
selector:
matchLabels:
app: ngx-hpa-dep
template:
metadata:
labels:
app: ngx-hpa-dep
spec:
containers:
- image: nginx:alpine
name: nginx
ports:
- containerPort: 80
resources:
requests:
cpu: 50m
memory: 10Mi
limits:
cpu: 100m
memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: ngx-hpa-svc
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: ngx-hpa-dep