Install K8S on Ubuntu 16.04
環境準備
---------------------------------------------------------------------------------------------------
command
iptables -P FORWARD ACCEPT
kubectl cluster-info
kubectl -s http://kub-master:8080 get componentstatuses
kubectl -s http://kub-master:8080 get nodes
-------------------------------------------------------------------------------------------------------
安裝 heapster 1.2.0
cd heapster-1.2.0/deploy/kube-config/influxdb
vim heapster-controller.yaml
-------------------------------------------------------------------------------------------------------
apiVersion: v1
kind: ReplicationController
metadata:
labels:
k8s-app: heapster
name: heapster
version: v6
name: heapster
namespace: kube-system
spec:
replicas: 1
selector:
k8s-app: heapster
version: v6
template:
metadata:
labels:
k8s-app: heapster
version: v6
spec:
containers:
- name: heapster
image: kubernetes/heapster:canary
imagePullPolicy: Always
command:
- /heapster
- --source=kubernetes:http://10.10.1.210:8080?inClusterConfig=false
- --sink=influxdb:http://monitoring-influxdb:8086
-----------------------------------------------------------------------------------------------------------------------------------------
kubectl create -f ./
server# Master Worker IP Address
k8s-master-1 Yes x 10.10.1.217
k8s-master-2 Yes x 10.10.1.218
k8s-node-219 x Yes 10.10.1.219
k8s-node-220 x Yes 10.10.1.220
ha-proxy 10.10.1.210
環境變數
P.S.每一台 node 上都需安裝
source master-1_config
-----------------------------------------------------------------------------------
HOSTNAME=$(hostname)
MY_INTERFACE=eth0
MY_ETH0_IP=`/sbin/ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
MASTER_1_IP=${MY_ETH0_IP}
MASTER_2_IP="10.10.1.218"
SERVICE_CLUSTER_IP_RANGE=172.16.0.0/16
FLANNEL_NET=172.17.0.0/16
HA_PROXY_IP=10.10.1.210
DNS_SERVER_IP="172.16.1.254"
ETCD_1_NAME=k8s-master-1
ETCD_2_NAME=k8s-master-2
ETCD_1_IP=${MASTER_1_IP}
ETCD_2_IP=${MASTER_2_IP}
-----------------------------------------------------------------------------------------
source master-2_config
-----------------------------------------------------------------------------------------
HOSTNAME=$(hostname)
MY_INTERFACE=eth0
MY_ETH0_IP=`/sbin/ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
MASTER_1_IP="10.10.1.217"
MASTER_2_IP=${MY_ETH0_IP}
SERVICE_CLUSTER_IP_RANGE=172.16.0.0/16
FLANNEL_NET=172.17.0.0/16
HA_PROXY_IP=10.10.1.210
DNS_SERVER_IP="172.16.1.254"
ETCD_1_NAME=k8s-master-1
ETCD_2_NAME=k8s-master-2
ETCD_1_IP=${MASTER_1_IP}
ETCD_2_IP=${MASTER_2_IP}
------------------------------------------------------------------------------------------
source ha_config
------------------------------------------------------------------------------------------
HOSTNAME=$(hostname)
MY_INTERFACE=eth0
MY_ETH0_IP=`/sbin/ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
MASTER_1_IP="10.10.1.217"
MASTER_2_IP="10.10.1.218"
SERVICE_CLUSTER_IP_RANGE=172.16.0.0/16
FLANNEL_NET=172.17.0.0/16
HA_PROXY_IP=10.10.1.210
DNS_SERVER_IP="172.16.1.254"
ETCD_1_NAME=k8s-master-1
ETCD_2_NAME=k8s-master-2
ETCD_1_IP=${MASTER_1_IP}
ETCD_2_IP=${MASTER_2_IP}
-------------------------------------------------------------------------------------------
安裝 etcd
P.S.每一台 master 上都需安裝
curl -L https://github.com/coreos/etcd/releases/download/v3.0.7/etcd-v3.0.7-linux-amd64.tar.gz -o etcd-v3.0.7-linux-amd64.tar.gz
tar xzvf etcd-v3.0.7-linux-amd64.tar.gz && cd etcd-v3.0.7-linux-amd64
mkdir -p /opt/etcd/bin
cp etcd* /opt/etcd/bin/
設定 etcd config
建立目錄準備
mkdir -p /var/lib/etcd/
mkdir -p /opt/etcd/config/
etcd config
-----------------------------------------------------------------------------------------
cat <<EOF | sudo tee /opt/etcd/config/etcd.conf
ETCD_DATA_DIR=/var/lib/etcd
ETCD_NAME=$(hostname -s)
ETCD_INITIAL_CLUSTER=${ETCD_1_NAME}=http://${ETCD_1_IP}:2380,${ETCD_2_NAME}=http://${ETCD_2_IP}:2380
ETCD_INITIAL_CLUSTER_STATE=new
ETCD_LISTEN_PEER_URLS=http://${MY_ETH0_IP}:2380
ETCD_INITIAL_ADVERTISE_PEER_URLS=http://${MY_ETH0_IP}:2380
ETCD_ADVERTISE_CLIENT_URLS=http://${MY_ETH0_IP}:2379
ETCD_LISTEN_CLIENT_URLS=http://${MY_ETH0_IP}:2379,http://127.0.0.1:2379
ETCD_HEARTBEAT_INTERVAL=6000
ETCD_ELECTION_TIMEOUT=30000
GOMAXPROCS=$(nproc)
EOF
--------------------------------------------------------------------------------------------
cat <<EOF | sudo tee /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
Documentation=https://github.com/coreos/etcd
After=network.target
[Service]
User=root
Type=notify
EnvironmentFile=-/opt/etcd/config/etcd.conf
ExecStart=/opt/etcd/bin/etcd
Restart=on-failure
RestartSec=10s
LimitNOFILE=40000
[Install]
WantedBy=multi-user.target
EOF
----------------------------------------------------------------------------------------------
sudo systemctl daemon-reload
sudo systemctl enable etcd
sudo systemctl start etcd
----------------------------------------------------------------------------------------------
verify etcd
ss -antl
systemctl status etcd.service
安裝 flannel
P.S.每一台 master, node 上都需安裝
下載 flannel binary & 安裝
curl -L https://github.com/coreos/flannel/releases/download/v0.6.1/flannel-v0.6.1-linux-amd64.tar.gz -o flannel.tar.gz
sudo mkdir -p /opt/flannel
sudo tar xzf flannel.tar.gz -C /opt/flannel
------------------------------------------------------------------------------------------------------------
安裝 Kubernetes
P.S.每一台 master, node 上都需安裝
下載 k8s binary
curl -L 'https://github.com/kubernetes/kubernetes/releases/download/v1.3.6/kubernetes.tar.gz' -o kubernetes.tar.gz
tar xvf kubernetes.tar.gz && cd kubernetes
sudo tar xf ./server/kubernetes-server-linux-amd64.tar.gz -C /opt/
準備 Credentials
P.S.每一台 master 都需安裝
產生 certificate [方法一]
Google 提供生成 certificate 的參考 http://kubernetes.io/docs/admin/authentication/#creating-certificates
sudo mkdir -p /srv/kubernetes/ && cd /srv/kubernetes/
sudo openssl genrsa -out ca.key 2048
sudo openssl req -x509 -new -nodes -key ca.key -subj "/CN=${HA_PROXY_IP}" -days 10000 -out ca.crt
sudo openssl genrsa -out server.key 2048
sudo openssl req -new -key server.key -subj "/CN=${HA_PROXY_IP}" -out server.csr
sudo openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -days 10000
第二種方法是 CoreOS 提供的方法,https://coreos.com/kubernetes/docs/latest/openssl.html。第二種方法其實跟方
法一大同小異,主要差別只是在於如果你未來會自訂cluster domain,可能會有一
些 certificate 上的問題((像是這個)[https://github.com/pires/kubernetes-elasticsearch-
cluster/issues/27]),而 CoreOS 這篇文章有提到在產生 certificate 時要怎麼指定
alternate names 的部分。
設定 k8s 相關 services
k8s API Server [MASTER]
P.S.每一台 master 都需安裝
-----------------------------------------------------------------------------------------------
cat <<EOF | sudo tee /etc/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
User=root
ExecStart=/opt/kubernetes/server/bin/kube-apiserver \
--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--etcd-servers=http://${MASTER_1_IP}:2379,http://${MASTER_2_IP}:2379 \
--logtostderr=true \
--allow-privileged=false \
--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE} \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,SecurityContextDeny,ResourceQuota \
--service-node-port-range=30000-32767 \
--advertise-address=${MY_ETH0_IP} \
--client-ca-file=/srv/kubernetes/ca.crt \
--tls-cert-file=/srv/kubernetes/server.crt \
--tls-private-key-file=/srv/kubernetes/server.key
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
------------------------------------------------------------------------------------------
k8s kube-controller-manager [MASTER]
P.S.每一台 master 都需安裝
------------------------------------------------------------------------------------------
cat <<EOF | sudo tee /etc/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
User=root
ExecStart=/opt/kubernetes/server/bin/kube-controller-manager \
--master=127.0.0.1:8080 \
--root-ca-file=/srv/kubernetes/ca.crt \
--service-account-private-key-file=/srv/kubernetes/server.key \
--logtostderr=true
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
-------------------------------------------------------------------------------------------
kube-scheduler [MASTER]
P.S.每一台 master 都需安裝
--------------------------------------------------------------------------------------------
cat <<EOF | sudo tee /etc/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
User=root
ExecStart=/opt/kubernetes/server/bin/kube-scheduler \
--logtostderr=true \
--master=127.0.0.1:8080
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
---------------------------------------------------------------------------------------------
flanneld
P.S.每一台 master, node 上都需安裝
1. Modify config: /etc/systemd/system/flanneld.service
---------------------------------------------------------------------------------------------
cat <<EOF | sudo tee /etc/systemd/system/flanneld.service
[Unit]
Description=Flanneld
Documentation=https://github.com/coreos/flannel
After=network.target
Before=docker.service
[Service]
User=root
ExecStart=/opt/flannel/flanneld \
--etcd-endpoints="http://${ETCD_1_IP}:2379,http://${ETCD_2_IP}:2379" \
--iface=${MY_ETH0_IP} \
--ip-masq
ExecStartPost=/bin/bash /opt/flannel/update_docker.sh
Restart=always
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
[Unit]
Description=Flanneld
Documentation=https://github.com/coreos/flannel
After=network.target
Before=docker.service
[Service]
User=root
ExecStart=/opt/flannel/flanneld \
--etcd-endpoints="http://${ETCD_1_IP}:2379,http://${ETCD_2_IP}:2379" \
--iface=${MY_ETH0_IP} \
--ip-masq
ExecStartPost=/bin/bash /opt/flannel/update_docker.sh
Restart=always
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
---------------------------------------------------------------------------------------------
2. Link systemd service to multiuser
ln -s /etc/systemd/system/flanneld.service /etc/systemd/system/multi-user.target.wants/flanneld.service
3. Add /opt/flannel/update_docker.sh
#!/bin/bash
source /run/flannel/subnet.env
sed -i "s|ExecStart=.*|ExecStart=\/usr\/bin\/dockerd -H tcp:\/\/127.0.0.1:4243 -H unix:\/\/\/var\/run\/docker.sock --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}|g" /lib/systemd/system/docker.service
systemctl daemon-reload
4. Restart Flannel
systemctl daemon-reload
systemctl restart flanneld
BTY
if you use docker 1.13.x, please modify /lib/systemd/system/docker.service,
add ExecStartPost=/sbin/iptables -P FORWARD ACCEPT under the Service section
---------------------------------------------------------------------------------------------------------------
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target docker.socket firewalld.service
Requires=docker.socket
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
#ExecStart=/usr/bin/dockerd -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock --bip=172.17.25.1/24 --mtu=1450
ExecStart=/usr/bin/dockerd -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock --bip=172.17.25.1/24 --mtu=1450
ExecStartPost=/sbin/iptables -P FORWARD ACCEPT
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
----------------------------------------------------------------------------------------------------------------
systemctl daemon-reload
systemctl restart docker
ln -s /etc/systemd/system/flanneld.service /etc/systemd/system/multi-user.target.wants/flanneld.service
3. Add /opt/flannel/update_docker.sh
#!/bin/bash
source /run/flannel/subnet.env
sed -i "s|ExecStart=.*|ExecStart=\/usr\/bin\/dockerd -H tcp:\/\/127.0.0.1:4243 -H unix:\/\/\/var\/run\/docker.sock --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}|g" /lib/systemd/system/docker.service
systemctl daemon-reload
4. Restart Flannel
systemctl daemon-reload
systemctl restart flanneld
BTY
if you use docker 1.13.x, please modify /lib/systemd/system/docker.service,
add ExecStartPost=/sbin/iptables -P FORWARD ACCEPT under the Service section
---------------------------------------------------------------------------------------------------------------
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target docker.socket firewalld.service
Requires=docker.socket
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
#ExecStart=/usr/bin/dockerd -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock --bip=172.17.25.1/24 --mtu=1450
ExecStart=/usr/bin/dockerd -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock --bip=172.17.25.1/24 --mtu=1450
ExecStartPost=/sbin/iptables -P FORWARD ACCEPT
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
[Install]
WantedBy=multi-user.target
----------------------------------------------------------------------------------------------------------------
systemctl daemon-reload
systemctl restart docker
設定 Flannel 和初始化
官方文件https://coreos.com/flannel/docs/latest/flannel-config.html
因為 flannel 預設的 backend type 預設是 udp,這邊我們如果想要使用 vxlan 作為 backend,就要自己塞 config 到 etcd 去給 flannel 用。
//僅在 Master 上操作
sudo /opt/etcd/bin/etcdctl set /coreos.com/network/config '{"Network":"'${FLANNEL_NET}'", "Backend": {"Type": "vxlan"}}'
啟動 service
PS.每一台 master 上都需執行
sudo systemctl daemon-reload
sudo systemctl enable kube-apiserver
sudo systemctl enable kube-controller-manager
sudo systemctl enable kube-scheduler
sudo systemctl enable flanneld //每個 node 都要
啟動 service
sudo systemctl start kube-apiserver
sudo systemctl start kube-controller-manager
sudo systemctl start kube-scheduler
sudo systemctl start flanneld //每個 node 都要
PS.systemd 看 error 可透過
journalctl -xe 或是 systemctl status [service name]
------------------------------------------------------------------------------------------------
[options: for troubleshooting]
設定 docker 支援 flannel
設定 docker 支援 flannel
取得 flannel 針對 host 分配的 subnet
P.S.每一台 master, node 上需安裝
sudo su
docker_support_flannel.sh
-------------------------------------------------------------------------------------------------
#!/bin/bash
source /run/flannel/subnet.env
sed -i "s|ExecStart=.*|ExecStart=\/usr\/bin\/dockerd -H tcp:\/\/127.0.0.1:4243 -H unix:\/\/\/var\/run\/docker.sock --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}|g" /lib/systemd/system/docker.service
rc=0
ip link show docker0 >/dev/null 2>&1 || rc="$?"
if [[ "$rc" -eq "0" ]]; then
ip link set dev docker0 down
ip link delete docker0
fi
-------------------------------------------------------------------------------------------------
PS. sed command 的部分,可以自行補上啟動 docker 時需要的參數,如:–insecure-registry=xx.xx.xx.xx:5000
重啟 docker
sudo systemctl daemon-reload
sudo systemctl enable docker
sudo systemctl restart docker
---------------------------------------------------------------------------------------------------
部署 k8s node
設定 k8s kubelet
P.S.每一台 node 上需安裝
---------------------------------------------------------------------------------------------------
cat <<EOF | sudo tee /etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
ExecStart=/opt/kubernetes/server/bin/kubelet \
--hostname-override=${MY_ETH0_IP} \
--api-servers=http://${HA_PROXY_IP}:8080 \
--logtostderr=true \
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
--------------------------------------------------------------------------------------------------
啟動 kubelet
sudo systemctl daemon-reload
sudo systemctl enable kubelet
sudo systemctl start kubelet
設定 k8s kube-proxy
P.S.每一台 node 上需安裝
--------------------------------------------------------------------------------------------------
cat <<EOF | sudo tee /etc/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
ExecStart=/opt/kubernetes/server/bin/kube-proxy \
--hostname-override=${MY_ETH0_IP} \
--master=http://${HA_PROXY_IP}:8080 \
--logtostderr=true
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
-----------------------------------------------------------------------------------------------
啟動
sudo systemctl daemon-reload
sudo systemctl enable kube-proxy
sudo systemctl start kube-proxy
----------------------------------------------------------------------------------------------
開始測試
部署完成
準備 kubectl 需要的 config
P.S.每一台 client node 上需安裝
vim kubectl.config
----------------------------------------------------------------------------------------------
KUBE_USER=""
KUBE_PASSWORD=""
DEFAULT_KUBECONFIG="${HOME}/.kube/config"
KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
HA_PROXY_IP=10.10.1.210
-----------------------------------------------------------------------------------------------
設定 ~/.kube/config
mkdir -p $(dirname "${KUBECONFIG}")
touch "${KUBECONFIG}"
export CONTEXT=ubuntu
KUBECONFIG="${KUBECONFIG}" /opt/kubernetes/server/bin/kubectl config set-cluster "${CONTEXT}" --server=http://${HA_PROXY_IP}:8080 --insecure-skip-tls-verify=true
KUBECONFIG="${KUBECONFIG}" /opt/kubernetes/server/bin/kubectl config set-credentials "${CONTEXT}" --username=${KUBE_USER} --password=${KUBE_PASSWORD}
KUBECONFIG="${KUBECONFIG}" /opt/kubernetes/server/bin/kubectl config set-context "${CONTEXT}" --cluster="${CONTEXT}" --user="${CONTEXT}"
KUBECONFIG="${KUBECONFIG}" /opt/kubernetes/server/bin/kubectl
----------------------------------------------------------------------------------------------------------------------------------------
安裝 Add-ons
kubedns (又名skydns)
vim kubedns_config
----------------------------------------------------------------------
DNS_DOMAIN="k8s.kidscrape"
DNS_REPLICAS=1
KUBE_APISERVER_URL=http://${HA_PROXY_IP}:8080
-------------------------------------------------------------------
從官方下載 skydns 最新的 yaml
curl -OL 'https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dns/skydns-rc.yaml.base'
ps. 如找不到 從kubernetes.tar.gz 裡找
帶入設定
sed -i "s/__PILLAR__DNS__REPLICAS__/${DNS_REPLICAS}/g" skydns-rc.yaml.base
sed -i "s/__PILLAR__DNS__DOMAIN__/${DNS_DOMAIN}/g" skydns-rc.yaml.base
啟動 skydns
/opt/kubernetes/server/bin/kubectl create -f skydns-rc.yaml.base
-------------------------------------------------------------------------------------------------------------
skydns service 的 yaml
curl -OL 'https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dns/skydns-svc.yaml.base'
ps. 如找不到 從kubernetes.tar.gz 裡找
帶入設定
sed -i "s/__PILLAR__DNS__SERVER__/${DNS_SERVER_IP}/g" skydns-svc.yaml.base
建立 skydns service
/opt/kubernetes/server/bin/kubectl create -f skydns-svc.yaml.base
---------------------------------------------------------------------------------------------------------------
安裝 kubernetes-dashboard
從官方下載 skydns 最新的 yaml
curl -OL 'https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml'
帶入設定
sed -i "s~# - --apiserver-host=http://my-address:port~- --apiserver-host=http://${HA_PROXY_IP}:8080~g" kubernetes-dashboard.yaml
啟動 kubernetes-dashboard
/opt/kubernetes/server/bin/kubectl create -f kubernetes-dashboard.yaml
----------------------------------------------------------------------------------------------------------------
kubernetes 測試
/opt/kubernetes/server/bin/kubectl get nodes
---------------------------------------------------------------------------------------------------
command
iptables -P FORWARD ACCEPT
kubectl cluster-info
kubectl -s http://kub-master:8080 get componentstatuses
kubectl -s http://kub-master:8080 get nodes
-------------------------------------------------------------------------------------------------------
安裝 heapster 1.2.0
cd heapster-1.2.0/deploy/kube-config/influxdb
vim heapster-controller.yaml
-------------------------------------------------------------------------------------------------------
apiVersion: v1
kind: ReplicationController
metadata:
labels:
k8s-app: heapster
name: heapster
version: v6
name: heapster
namespace: kube-system
spec:
replicas: 1
selector:
k8s-app: heapster
version: v6
template:
metadata:
labels:
k8s-app: heapster
version: v6
spec:
containers:
- name: heapster
image: kubernetes/heapster:canary
imagePullPolicy: Always
command:
- /heapster
- --source=kubernetes:http://10.10.1.210:8080?inClusterConfig=false
- --sink=influxdb:http://monitoring-influxdb:8086
-----------------------------------------------------------------------------------------------------------------------------------------
kubectl create -f ./
留言
張貼留言