Home >> Kubernetes 快查表

Kubernetes 快查表

2024-08-12 11:24 AtmosphereMao

Kubernetes 快查表

# 时间同步
systemctl start chronyd && systemctl enable chronyd
date 
# 禁用firewalld
systemctl stop firewalld && systemctl disable firewalld
# 禁用selinux
apt install selinux-utils
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
# 禁用swap
# 永久关闭
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

# ubuntu 安装
# 使得 apt 支持 ssl 传输
apt-get update && apt-get install -y apt-transport-https
# 下载 gpg 密钥
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add - 
# 添加 k8s 镜像源
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
# 更新源列表
apt-get update
apt-get install -y kubelet=1.23.1-00 kubeadm=1.23.1-00 kubectl=1.23.1-00

images=$(kubeadm config images list | awk -F'/' '{print $NF}')
for i in ${images}; do docker pull registry.aliyuncs.com/google_containers/$i; done

# centos 安装
# 添加 k8s 镜像源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 安装
yum install -y kubeadm-1.23.17 kubectl-1.23.17 kubelet-1.23.17

# 自启动、日志
systemctl enable kubelet
journalctl -xefu kubelet

# nvidia-runtime
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
  && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
    sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
    sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list

sed -i -e '/experimental/ s/^#//g' /etc/apt/sources.list.d/nvidia-container-toolkit.list
apt-get update
apt-get install -y nvidia-container-toolkit nvidia-container-runtime
{
    "default-runtime": "nvidia",
    "runtimes": {
      "nvidia": {
          "path": "/usr/bin/nvidia-container-runtime",
          "runtimeArgs": []
      }
    },
    "exec-opts": ["native.cgroupdriver=systemd"],
}
systemctl daemon-reload
systemctl restart docker

# master
kubeadm init --kubernetes-version=1.23.17 --apiserver-advertise-address=192.168.71.2 --image-repository=registry.aliyuncs.com/google_containers --pod-network-cidr="10.244.0.0/16" --service-cidr="10.96.0.0/12"
kubeadm init --kubernetes-version=1.23.17 --service-cidr=2.1.0.0/16 --pod-network-cidr=2.244.0.0/16 --ignore-preflight-errors=all --apiserver-advertise-address=192.168.71.2 --v=10 --image-repository="registry.aliyuncs.com/google_containers"

kubeadm init --kubernetes-version=1.23.1 --apiserver-advertise-address=192.168.0.161 --image-repository=registry.aliyuncs.com/google_containers --pod-network-cidr="10.244.1.0/16" --service-cidr="10.96.1.0/12"
kubeadm init --kubernetes-version=1.22.17 --apiserver-advertise-address=192.168.0.151 --image-repository=registry.aliyuncs.com/google_containers --pod-network-cidr="10.244.2.0/16" --service-cidr="10.96.2.0/12"

kubeadm join 192.168.0.249:6443 --token gn5c9l.5hgjqfam9ywi9t05 \
    --discovery-token-ca-cert-hash sha256:a3db5261f60f6940c22540fef37b2f9c86cd2f728b9650bc887402d8dcab64a4 

# 无法启动kubelet
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a
systemctl enable kubelet
systemctl status kubelet

mkdir -p /root/.kube && \
cp /etc/kubernetes/admin.conf $HOME/.kube/config && \
chown $(id -u):$(id -g) $HOME/.kube/config

# 查看已加入的节点
kubectl get nodes
# 查看集群状态
kubectl get cs

# 容器报错"Failed to generate sandbox config for pod" err="open /run/systemd/resolve/resolv.conf
# https://github.com/kubernetes/kubeadm/issues/1124
sudo mkdir -p /run/systemd/resolve
sudo ln -s /etc/resolv.conf /run/systemd/resolve/resolv.conf

# node
kubeadm join 192.168.111.128:6443 --token akr5t0.0ah6zt4gblnidht5 \
    --discovery-token-ca-cert-hash sha256:7ddc89af332d59567c07b05ce23c352bfd0cde299085febb8fd10a9927aea322 

wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
grep image: kube-flannel.yml 
docker pull flannel/flannel:v0.25.5
docker pull flannel/flannel-cni-plugin:v1.5.1-flannel1
kubectl apply -f ./kube-flannel.yml

# 初始化 flannel
kubectl -n kube-system delete pod -l k8s-app=kube-proxy
kubectl -n kube-flannel delete pod -l k8s-app=flannel

# 更换 ipvs
apt-get install ipvsadm
mkdir -p /etc/sysconfig/modules
cat <<EOF > /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules
sh /etc/sysconfig/modules/ipvs.modules

# 查看内核模块
lsmod | grep ip_vs
lsmod | grep nf_conntrack
# master 修改kube-proxy
kubectl edit configmaps kube-proxy -n kube-system
mode: "ipvs"
kubectl get pod -n kube-system | grep kube-proxy
# 刷新 kube-proxy
kubectl get pod -n kube-system |grep kube-proxy |awk '{system("kubectl delete pod "$1" -n kube-system")}'
kubectl get cm kube-proxy -n kube-system -o yaml | grep mode

# k8s nvidia plugin
kubectl apply -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v0.14.5/nvidia-device-plugin.yml

# k8s gpu共享
https://docs.ucloud.cn/uk8s/administercluster/gpu-share
在limits下设置了ucloud.cn/gpu-mem: 1, 指定schedulerName: gpushare-scheduler

# pod
kubectl run my-nginx --image=nginx --dry-run=client -o yaml >01_pod_create.yaml
kubectl apply -f 01_pod_create.yaml
kubectl delete -f 01_pod_create.yaml
kubectl delete pod my-nginx

# deployment
kubectl create deployment my-nginx-deploy --image=nginx --dry-run=client -o yaml > 02_deployment_create.yaml
kuectl apply -f 02_deployment_create.yaml 
kubectl edit deployments.apps my-nginx-deploy
kubectl delete deployments.apps my-nginx-deploy 

# 直接在现有资源对象基础删修改属性 
kubectl edit deployments.apps my-nginx-deploy
# 直接动态调整数量
kubectl scale deployment my-nginx-deploy --replicas=10
# 直接动态调整属性
kubectl set image deployment my-nginx-deploy nginx=nginx

# 工作空间
kubectl create namespace my-ns --dry-run=client -o yaml > create_ns.yaml
apiVersion: v1
kind: Namespace
metadata:
  creationTimestamp: null
  name: my-ns
---
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: my-nginx1
  name: my-nginx1
  namespace: my-ns
spec:
  containers:
  - image: nginx
    name: my-nginx
  dnsPolicy: ClusterFirst
  restartPolicy: Always

kubectl apply -f create_ns.yaml
kubectl get pods -n my-ns
kubectl delete -f create_ns.yaml # kubectl delete namespace my-ns

# Service
kubectl expose deployment my-nginx --name=nginx-svc --port=80 --dry-run=client -o yaml > 01_service_create.yaml
kubectl apply -f 01_service_create.yaml
kubectl get svc

# 暴露端口
kubectl port-forward --address 0.0.0.0 my-nginx-c54945c55-b94s8 80:80

kubectl expose deployment my-nginx --type=NodePort --port=80 --target-port=80 --dry-run=client -o yaml > 01_service_create.yaml

# ConfigMap
apiVersion: v1
kind: ConfigMap
metadata:
  name: test-nginxconfg
data:
  default.conf: |
    server {
      listen 80;
      server_name localhost;
      location /nginx {
        proxy_pass localhost;
      }
      location / {
        root /usr/share/nginx/html;
      }
    }
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: test-nginx-index
data:
  index.html: "test 123\n"

# ConfigMap -- Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
  name: test-nginx-proxy
  labels:
    app: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
     metadata:
       labels:
         app: nginx
     spec:
       containers:
       - name: nginx
         image: nginx
         volumeMounts: # 挂载卷
         - name: nginxconf
           mountPath: /etc/nginx/conf.d/ # 挂载目录
           readOnly: true
         - name: nginxindex
           mountPath: /usr/share/nginx/html/ # 挂载目录
           readOnly: true
       volumes: # 卷
       - name: nginxconf
         configMap:
           name: test-nginxconf # configMap配置名
       - name: nginxindex
         configMap:
           name: test-nginx-index # configMap配置名

# secret
# ssl 证书生成
openssl genrsa -out tls.key 2048
openssl req -new -x509 -key tls.key -out tls.crt -subj "/CN=localhost"

# nginx-conf-tls/default.conf
server {
    listen 443 ssl;
    server_name localhost;
    ssl_certificate /etc/nginx/certs/tls.crt;
    ssl_certificate_key /etc/nginx/certs/tls.key;
    location / {
        root /usr/share/nginx/html;
    }
}

server {
  listen 80;
  server_name localhost;
  return 301 https://$host$request_uri;
}

kubectl create configmap nginx-ssl-conf --from-file=nginx-conf-tls/
kubectl get cm
kubectl create secret tls nginx-ssl-secret --cert=tls-key/tls.crt --key=tls-key/tls.key
kubectl get secret

# pod yaml
apiVersion: v1
kind: Pod
metadata:
  name: test-nginx-ssl
spec:
  containers:
  - image: nginx
    name: nginx-web
    volumeMounts:
    - name: nginxcerts
      mountPath: /etc/nginx/certs/
      readOnly: true
    - name: nginxconfs
      mountPath: /etc/nginx/conf.d/
      readOnly: true
  volumes:
  - name: nginxcerts
    secret:
      secretName: nginx-ssl-secret
  - name: nginxconfs
    configMap:
      name: nginx-ssl-conf                

# test
curl https://10.244.2.5 -k

# Ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: superopsmsb-ingress-mulhost
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  rules:
  - host: nginx.atmospheremao.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service: # 需转发
            name: nginx-svc
            port:
              number: 80
  - host: tomact.atmospheremao.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service: # 需转发的svc
            name: tomcat-svc
            port:
              number: 8080

curl -H "host: nginx.atmospheremao.com" 10.109.77.153
# 输出:nginx
curl -H "host: tomact.atmospheremao.com" 10.109.77.153
# 输出:tomcat

# Helm Install
wget https://get.helm.sh/helm-v3.14.2-linux-amd64.tar.gz
mkdir /data/server/helm/bin -p
tar -xf helm-v3.14.2-linux-amd64.tar.gz
mv linux-amd64/helm /data/server/helm/bin/
vim /etc/profile.d/helm.sh
#!/bin/bash
export PATH=$PATH:/data/server/helm/bin
source /etc/profile.d/helm.sh

# Helm Repo
helm repo add az-stable http://mirror.azure.cn/kubernetes/charts/
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo list # 查看仓库
helm repo update

# 安装myredis
helm install myredis bitnami/redis --set master.persistence.enabled=false --set replica.persistence.enable=false # 会返回REDIS PASSWORD运行建议
helm uninstall myredis
helm list

# Helm Install Redis Client
# 1. Run a Redis&reg; pod that you can use as a client:

   kubectl run --namespace default redis-client --restart='Never'  --env REDIS_PASSWORD=$REDIS_PASSWORD  --image docker.io/bitnami/redis:7.2.4-debian-12-r9 --command -- sleep infinity

#   Use the following command to attach to the pod:

   kubectl exec --tty -i redis-client \
   --namespace default -- bash

# 2. Connect using the Redis&reg; CLI:
   REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h myredis-master
   REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h myredis-replicas

# To connect to your database from outside the cluster execute the following commands:

    kubectl port-forward --namespace default svc/myredis-master 6379:6379 &
    REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h 127.0.0.1 -p 6379

# Remote Redis 远程上面创建的myredis
redis-cli -h myredis-master -a cEAJQAuu0d

评论


暂无评论


* 登录后即可评论

©2022 联系我们

粤ICP备2022023863号
500x500