https://github.com/kelseyhightower/kubernetes-the-hard-way/blob/master/docs/08-bootstrapping-kubernetes-controllers.md



### Provision the Kubernetes Control Plane

mkdir -p /etc/kubernetes/config


### Download and Install the Kubernetes Controller Binaries

wget \

  "https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/linux/amd64/kube-apiserver" \

  "https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/linux/amd64/kube-controller-manager" \

  "https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/linux/amd64/kube-scheduler" \

  "https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/linux/amd64/kubectl"


# Install the Kubernetes binaries

chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl

mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/



### Configure the Kubernetes API Server


mkdir -p /var/lib/kubernetes/

mv ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
  service-account-key.pem service-account.pem \
  encryption-config.yaml /var/lib/kubernetes/

# 인터널 IP 및 Master IP 가 Service 등록시 필요하여 변수처리 
INTERNAL_IP=$(ip addr | grep global | awk '{print $2}' | cut -d/ -f1)
MASTER_NODE=("192.168.1.21" "192.168.1.55" "192.168.1.56")


# Create the kube-apiserver.service systemd unit


cat <<EOF | sudo tee /etc/systemd/system/kube-apiserver.service

[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/kubernetes/kubernetes


[Service]

ExecStart=/usr/local/bin/kube-apiserver \\

  --advertise-address=${INTERNAL_IP} \\

  --allow-privileged=true \\

  --apiserver-count=3 \\

  --audit-log-maxage=30 \\

  --audit-log-maxbackup=3 \\

  --audit-log-maxsize=100 \\

  --audit-log-path=/var/log/audit.log \\

  --authorization-mode=Node,RBAC \\

  --bind-address=0.0.0.0 \\

  --client-ca-file=/var/lib/kubernetes/ca.pem \\

  --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\

  --etcd-cafile=/var/lib/kubernetes/ca.pem \\

  --etcd-certfile=/var/lib/kubernetes/kubernetes.pem \\

  --etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem \\

  --etcd-servers=https://${MASTER_NODE[0]}:2379,https://${MASTER_NODE[1]}:2379,https://${MASTER_NODE[2]}:2379 \\

  --event-ttl=1h \\

  --encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml \\

  --kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \\

  --kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem \\

  --kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem \\

  --kubelet-https=true \\

  --runtime-config=api/all=true \\

  --service-account-key-file=/var/lib/kubernetes/service-account.pem \\

  --service-cluster-ip-range=10.32.0.0/24 \\

  --service-node-port-range=30000-32767 \\

  --tls-cert-file=/var/lib/kubernetes/kubernetes.pem \\

  --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \\

  --v=2

Restart=on-failure

RestartSec=5


[Install]

WantedBy=multi-user.target

EOF



### Configure the Kubernetes Controller Manager


# Move the kube-controller-manager kubeconfig
mv kube-controller-manager.kubeconfig /var/lib/kubernetes/

# Create the kube-controller-manager.service systemd unit 
cat <<EOF | sudo tee /etc/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
  --bind-address=0.0.0.0 \\
  --cluster-cidr=10.200.0.0/16 \\
  --cluster-name=kubernetes \\
  --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \\
  --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \\
  --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\
  --leader-elect=true \\
  --root-ca-file=/var/lib/kubernetes/ca.pem \\
  --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \\
  --service-cluster-ip-range=10.32.0.0/24 \\
  --use-service-account-credentials=true \\
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF


### Configure the Kubernetes Scheduler
# Move the kube-scheduler kubeconfig
mv kube-scheduler.kubeconfig /var/lib/kubernetes/

# Create the kube-scheduler.yaml configuration file
cat <<EOF | sudo tee /etc/kubernetes/config/kube-scheduler.yaml
apiVersion: kubescheduler.config.k8s.io/v1alpha1
kind: KubeSchedulerConfiguration
clientConnection:
  kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig"
leaderElection:
  leaderElect: true
EOF


# Create the kube-scheduler.service systemd unit
cat <<EOF | sudo tee /etc/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
  --config=/etc/kubernetes/config/kube-scheduler.yaml \\
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF


### Start the Controller Services

systemctl daemon-reload
systemctl enable kube-apiserver kube-controller-manager kube-scheduler
systemctl start kube-apiserver kube-controller-manager kube-scheduler


### 확인작업


[root@master01 ~]# kubectl get componentstatuses --kubeconfig admin.kubeconfig

NAME                 STATUS    MESSAGE             ERROR

scheduler            Healthy   ok                  

controller-manager   Healthy   ok                  

etcd-2               Healthy   {"health":"true"}   

etcd-0               Healthy   {"health":"true"}   

etcd-1               Healthy   {"health":"true"}



[root@master01 ~]# curl -H "Host: kubernetes.default.svc.cluster.local" -i http://127.0.0.1:8080/healthz

HTTP/1.1 200 OK

Cache-Control: no-cache, private

Content-Type: text/plain; charset=utf-8

X-Content-Type-Options: nosniff

Date: Wed, 02 Dec 2020 05:44:08 GMT

Content-Length: 2



### kubernetes API 서버가 Worker node 의 kubelet API 에 Access 할수 있는 RBAC 권한 구성, Metric, Logs, EXEC 가 수행되려면 아래의 RBAC 이 필요함

# ClusterRole 생성 및 ClusterRoleBinding 구성



cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: ClusterRole

metadata:

  annotations:

    rbac.authorization.kubernetes.io/autoupdate: "true"

  labels:

    kubernetes.io/bootstrapping: rbac-defaults

  name: system:kube-apiserver-to-kubelet

rules:

  - apiGroups:

      - ""

    resources:

      - nodes/proxy

      - nodes/stats

      - nodes/log

      - nodes/spec

      - nodes/metrics

    verbs:

      - "*"

EOF


clusterrole.rbac.authorization.k8s.io/system:kube-apiserver-to-kubelet created



cat <<EOF | kubectl apply --kubeconfig admin.kubeconfig -f -

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: ClusterRoleBinding

metadata:

  name: system:kube-apiserver

  namespace: ""

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: system:kube-apiserver-to-kubelet

subjects:

  - apiGroup: rbac.authorization.k8s.io

    kind: User

    name: kubernetes

EOF


clusterrolebinding.rbac.authorization.k8s.io/system:kube-apiserver created



### The Kubernetes Frontend Load Balancer

GCloud 는 GCP 의 LB 를 통해 Frontend 를 구성하였으나, VirtualBox 에서는 그렇게 구성이 어려워, 따로 Haproxy 서버를 구성해서진행하였다. 


[root@master01 ~]# cat 051_haproxy_setting.sh 

#!/bin/bash


echo "START"


echo "SET OS ENV"

systemctl disable firewalld


sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux

sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

setenforce 0


sleep 2


echo ""


echo "INSTALL HAPROXY"

yum install -y haproxy

systemctl enable haproxy


sleep 2


echo ""



MASTER=("192.168.1.21" "192.168.1.55" "192.168.1.56")

INTERNAL_IP=$(ip addr | grep global | awk '{print $2}' | cut -d/ -f1)



### Kubernetes Multi Master Proxy Config ###


echo "HAPROXY Config"


cat << EOF >> /etc/haproxy/haproxy.cfg


frontend k8s

       bind ${INTERNAL_IP}:6443

       option tcplog

       mode tcp

       default_backend k8s-backend


backend k8s-backend

       mode        tcp

       balance     roundrobin

       option      tcp-check

       server      master01 ${MASTER[0]}:6443 check fall 3 rise 2

       server      master02 ${MASTER[1]}:6443 check fall 3 rise 2

       server      master03 ${MASTER[2]}:6443 check fall 3 rise 2

EOF


sleep 2


echo ""


echo "HAPROXY Service RESTART"

systemctl restart haproxy


netstat -lpn | grep 6443


echo ""

echo "HAPRORY INSTALL EDN"

echo ""


# 스크립트 작성 및 저장


[root@master01 ~]# cat 052_haproxy_install_exec.sh 

#!/bin/bash


TARGET=("haproxy")

HAPROXY_IP=$1


### install scripts copy

for ((i=0; i<1; i++)); do

  echo "${TARGET[i]} install scripts copy START"


  scp 051_haproxy_setting.sh ${TARGET[i]}:~/


  echo "${TARGET[i]} install scripts copy END"

  echo ""

  sleep 2

done


### install scripts execute


for ((i=0; i<1; i++)); do

  echo "${TARGET[i]} install scripts EXEC START"


  ssh ${TARGET[i]} sh ~/051_haproxy_setting.sh


  echo "${TARGET[i]} install scripts EXEC END"

  echo ""

  sleep 2

done


sleep 2

echo "kubernetes vip connect test"

curl --cacert /var/lib/kubernetes/ca.pem https://${HAPROXY_IP}:6443/version


# 스크립트 작성 및 저장 후 실행


sh 052_haproxy_install_exec.sh 192.168.1.47



스크립트 마지막에 확인관련 command 가 있긴하지만 추가 확인을 하게되면

[root@master01 ~]# curl --cacert /var/lib/kubernetes/ca.pem https://192.168.1.47:6443/version

{

  "major": "1",

  "minor": "18",

  "gitVersion": "v1.18.6",

  "gitCommit": "dff82dc0de47299ab66c83c626e08b245ab19037",

  "gitTreeState": "clean",

  "buildDate": "2020-07-15T16:51:04Z",

  "goVersion": "go1.13.9",

  "compiler": "gc",

  "platform": "linux/amd64"

}

이런정보들이 출력되어야 한다. 





+ Recent posts