One: operating environment
operating system
centos7
firewall selinux
#systemctl stop firewalld && systemctl disable firewalld
#setenforce 0
#vim /etc/selinux/config
SELINUX=disabled
Set /etc/host to resolve to all hosts
192.168.50.128 k8s-master 192.168.50.135 k8sr-node1
Two: installation and deployment
2.1 Preparation before installation
Before installing and deploying the cluster, first synchronize the time of the three servers through NTP, otherwise, an error may be prompted in the subsequent operation
ntpdate -u 192.168.2.68 (my physical machine is configured with ntp)
Install redhat-ca.crt on the node node
yum install *rhsm* -y
2.2 etcd cluster configuration
master node configuration
1. Install kubernetes etcd
yum -y install kubernetes-master etcd
2. Configure etcd options
#vi /etc/etcd/etcd.conf
#[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="http://192.168.50.128:2380" ETCD_LISTEN_CLIENT_URLS="http://192.168.50.128:2379,http://127.0.0.1:2379" #ETCD_MAX_SNAPSHOTS="5" ETCD_MAX_WALS="5" ETCD_NAME="etcd1" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.50.128:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.50.128:2379" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="etcd1=http://192.168.50.128:2380,etcd2=http://192.168.50.135:2380" #ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" #ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true" #
nodes节点配置
1. Install and deploy kubernetes-node/etcd/flannel/docker
#yum -y install kubernetes-node etcd flannel docker
2. Configure etcd of k8s-node1
#vi /etc/etcd/etcd.conf
#[Member]
#ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="http://192.168.50.135:2380" ETCD_LISTEN_CLIENT_URLS="http://192.168.50.135:2379,http://127.0.0.1:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="etcd2" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.50.135:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.50.135:2379" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="etcd1=http://192.168.50.128:2380,etcd2=http://192.168.50.135:2380" #ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" #ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true"
启动etcd cluster
#systemctl start etcd.service
#systemctl status etcd.service
View etcd cluster status
[root@k8s-master ~]# systemctl start etcd.service
[root@k8s-master ~]# etcdctl cluster-health
member 272e2ecbe3d84558 is healthy: got healthy result from http://192.168.50.128:2379
member 94b5d90215d70e1e is healthy: got healthy result from http://192.168.50.135:2379
cluster is healthy
A simple explanation for several URLS:
[member]
ETCD_NAME :
ETCD_DATA_DIR:
ETCD_SNAPSHOT_COUNTER:
ETCD_HEARTBEAT_INTERVAL:
ETCD_ELECTION_TIMEOUT:
ETCD_LISTEN_PEER_URLS:
ETCD_LISTEN_CLIENT_URLS:
[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS:
ETCD_INITIAL_CLUSTER:
ETCD_ADVERTISE_CLIENT_URLS:
Two: Kubernetes cluster configuration
master node configuration
1. Modify the master configuration file
#vi /etc/kubernetes/apiserver
# The address on the local server to listen to. #KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" KUBE_API_ADDRESS="--address=0.0.0.0"
# The port on the local server to listen on. KUBE_API_PORT="--port=8080"
# Port minions listen on KUBELET_PORT="--kubelet-port=10250"
# Comma separated list of nodes in the etcd cluster KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.50.128:2379,http://192.168.50.135:2379"
# Address range to use for services KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
# default admission control policies #KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
# Add your own! KUBE_API_ARGS=""
2. Start the service
systemctl start kube-apiserver.service
systemctl start kube-controller-manager.service
systemctl start kube-scheduler.service
systemctl enable kube-apiserver.service
systemctl enable kube-controller-manager.service
systemctl enable kube-scheduler.service
nodes node configuration
1.Modify the configuration file of k8s-node1
cat /etc/kubernetes/config
# logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true"
# journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0"
# Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow-privileged=false"
# How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER="--master=http://192.168.50.128:8080"
2.Configuring kubelet
cat /etc/kubernetes/kubelet
### # kubernetes kubelet (minion) config
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) KUBELET_ADDRESS="--address=127.0.0.1"
# The port for the info server to serve on # KUBELET_PORT="--port=10250"
# You may leave this blank to use the actual hostname KUBELET_HOSTNAME="--hostname-override=192.168.50.135"
# location of the api-server KUBELET_API_SERVER="--api-servers=http://192.168.50.128:8080"
# pod infrastructure container KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
# Add your own! KUBELET_ARGS=""
3. Network configuration
Configure flannel on the k8s-node1 node
cat /etc/sysconfig/flanneld
# Flanneld configuration options
# etcd url location. Point this to the server where etcd runs FLANNEL_ETCD_ENDPOINTS="http://192.168.50.128:2379"
# etcd config key. This is the configuration key that flannel queries # For address range assignment FLANNEL_ETCD_PREFIX="/atomic.io/network"
# Any additional options that you want to pass #FLANNEL_OPTIONS=""
Start related services
systemctl start kubelet && systemctl start kube-proxy
systemctl enable kubelet && systemctl enable kube-proxy
Test on k8s-master
[root@k8s-master ~]# kubectl get nodes
NAME STATUS AGE
192.168.50.135 Ready 21h
[root@k8s-master ~]# etcdctl member list
272e2ecbe3d84558: name=etcd1 peerURLs=http://192.168.50.128:2380 clientURLs=http://192.168.50.128:2379 isLeader=true
94b5d90215d70e1e: name=etcd2 peerURLs=http://192.168.50.135:2380 clientURLs=http://192.168.50.135:2379 isLeader=false
[root@k8s-master ~]# etcdctl cluster-health
member 272e2ecbe3d84558 is healthy: got healthy result from http://192.168.50.128:2379
member 94b5d90215d70e1e is healthy: got healthy result from http://192.168.50.135:2379
cluster is healthy