Infrastructure and Cloud for Enthusiasts

Have a Question?

Categories
< All Topics
Print

Deploy Kubenetes Manager

#

Deploy Kubentes Manager

#

create user kube and add to visudo

deploy from kube

cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF

sudo yum install -y kubelet kubeadm kubectl docker
systemctl enable kubelet
systemctl start kubelet
systemctl enable docker
systemctl start docker

swapoff -a

[root@kubemaster01 ~]# vi /etc/fstab

#

/etc/fstab

Created by anaconda on Mon May 4 10:53:19 2020

#

Accessible filesystems, by reference, are maintained under ‘/dev/disk’

See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info

#
/dev/mapper/centos-root / xfs defaults 0 0
UUID=9405a639-422e-4bf1-b29b-f184694e5003 /boot xfs defaults 0 0

/dev/mapper/centos-swap swap swap defaults 0 0

[root@kubemaster01 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@kubemaster01 ~]# systemctl start docker

[root@kubemaster01 ~]# echo ‘1’ > /proc/sys/net/bridge/bridge-nf-call-iptables

[root@kubemaster01 ~]# kubeadm init

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubeadm join 172.16.80.41:6443 –token yuwzsf.ggiv86w1dmwkcwjr \
–discovery-token-ca-cert-hash sha256:213cbece723ea8a572c347e96fc8d93556d35137881754209e75d3038a742df0

You should now deploy a pod network to the cluster.
Run “kubectl apply -f [podnetwork].yaml” with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

[kubes@kubemaster01 .kube]$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
kubemaster01 NotReady master 7m28s v1.19.3

[kubes@kubemaster01 /]$ export kubever=$(kubectl version | base64 | tr -d ‘\n’)
[kubes@kubemaster01 /]$ kubectl apply -f “https://cloud.weave.works/k8s/net?k8s-version=$kubever”

serviceaccount/weave-net created
clusterrole.rbac.authorization.k8s.io/weave-net created
clusterrolebinding.rbac.authorization.k8s.io/weave-net created
role.rbac.authorization.k8s.io/weave-net created
rolebinding.rbac.authorization.k8s.io/weave-net created
daemonset.apps/weave-net created

[kubes@kubemaster01 /]$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
kubemaster01 Ready master 179m v1.19.3

[kubes@kubemaster01 /]$ kubectl get pods –all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-f9fd979d6-6tv9n 0/1 Running 0 179m
kube-system coredns-f9fd979d6-z5cfp 0/1 Running 0 179m
kube-system etcd-kubemaster01 1/1 Running 0 179m
kube-system kube-apiserver-kubemaster01 1/1 Running 0 179m
kube-system kube-controller-manager-kubemaster01 1/1 Running 0 179m
kube-system kube-proxy-6llrq 1/1 Running 0 179m
kube-system kube-scheduler-kubemaster01 1/1 Running 0 179m
kube-system weave-net-sg28s 1/2 Running 0 27s
[kubes@kubemaster01 /]$

###############################################################################################################################################################

#

Deploy Kubentes Worker Node

#

cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF

swapoff -a

[root@kubemaster01 ~]# echo ‘1’ > /proc/sys/net/bridge/bridge-nf-call-iptables

[kubes@kubeworker02 ~]$ yum install kubeadm docker -y

[kubes@kubeworker01 ~]$ sudo systemctl restart docker && systemctl enable docker

[kubes@kubeworker02 ~]$ sudo systemctl enable kubelet.service
[kubes@kubeworker02 ~]$ sudo systemctl start kubelet.service
[kubes@kubeworker02 ~]$ sudo systemctl status kubelet.service

[kubes@kubeworker01 ~]$ sudo kubeadm join –token yuwzsf.ggiv86w1dmwkcwjr –discovery-token-ca-cert-hash sha256:213cbece723ea8a572c347e96fc8d93556d35137881754209e75d3038a742df0 kubemaster01:6443

#
#

Web Gui

#

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml

cat <<EOF | kubectl apply -f –

apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
EOF

cat <<EOF | kubectl apply -f –

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:

  • kind: ServiceAccount
    name: admin-user
    namespace: kubernetes-dashboard
    EOF

[kubes@kubemaster01 ~]$ kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk ‘{print $1}’)
Name: admin-user-token-c2cfj
Namespace: kubernetes-dashboard
Labels:
Annotations: kubernetes.io/service-account.name: admin-user
kubernetes.io/service-account.uid: ef8bae7b-3c24-4912-a253-3e85a81b86b4

Type: kubernetes.io/service-account-token

Data

ca.crt: 1066 bytes
namespace: 20 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IlhPamlqU3VKbnU1Tk40NDdONndGcW9qU0luUmZGdDhMZzAtX1J1TUNReEkifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWMyY2ZqIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJlZjhiYWU3Yi0zYzI0LTQ5MTItYTI1My0zZTg1YTgxYjg2YjQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.UUG4CJZdmbNUaf_Jy_fKGODDB0TFpW2pFeXtmD-KjiYHTs0XGPGKJUH5ZfrNJCWEKg37P1XaGq60qBgXcTGxDjdiXh-H0JoN6R4p8oV_xLsQUI7X_Y0c2O6Q1yXnIkqpaEBAx7IQMjSko40i8Lu7n1vzCIq5i8BCg6rkD85Qmg2sX4ro7meU4y3t25je_mWtR_IJcsQKNdMs6lPt9me77T3gsxEnS82sYbfnsApS7fWeHZnSfHCVBtD–flLHDPL3pDd9K45TFWBW5SqfSSrr_5sShLLfFL22JodDAmh-zcHRfsjrgM5Jui0IBLYGFzExAvKzLOjPuxMJciPRjPFGg
[kubes@kubemaster01 ~]$

[kubes@kubemaster01 ~]$ kubectl proxy
Starting to serve on 127.0.0.1:8001

note you will have to ssh into the server to access the console

http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/

kubemaster01 IN A 172.16.70.41
kubeworker01 IN A 172.16.70.51
kubeworker02 IN A 172.16.70.52

Table of Contents