其他分享
首页 > 其他分享> > k8s常用命令

k8s常用命令

作者:互联网

kubectl get damomset  -n kube-system
kubectl describe  pod node-problem-detector-2qhhq  -n kube-system

[root@fpNet-web-38 ~]# kubectl edit pod node-problem-detector-2qhhq  -n kube-system 

[root@fpNet-web-38 ~]# kubectl get secret -n kube-system
NAME TYPE DATA AGE
calico-etcd-secrets Opaque 3 3y
calico-kube-controllers-token-rr8zr kubernetes.io/service-account-token 3 3y
calico-node-token-vw27m kubernetes.io/service-account-token 3 3y
coredns-token-fbflr kubernetes.io/service-account-token 3 3y
dashboard-admin-token-bx8bm kubernetes.io/service-account-token 3 3y
default-token-4nttt kubernetes.io/service-account-token 3 3y
flannel-token-bddfh kubernetes.io/service-account-token 3 122d
heapster-token-wmwln kubernetes.io/service-account-token 3 3y
jenkins-admin-token-8c2nj kubernetes.io/service-account-token 3 2y
kube-dns-autoscaler-token-vz9xv kubernetes.io/service-account-token 3 3y
kubernetes-dashboard-certs Opaque 0 3y
kubernetes-dashboard-key-holder Opaque 2 3y
kubernetes-dashboard-token-xnpwx kubernetes.io/service-account-token 3 3y
kubesystem-harborsecretkey kubernetes.io/dockerconfigjson 1 65d
serving-ca kubernetes.io/tls 2 1y
tiller-token-4ndtb kubernetes.io/service-account-token 3 1y


[root@fpNet-web-38 ~]#kubectl get pod -n kube-system -o wide
[root@fpNet-web-38 ~]# kubectl exec -ti  node-problem-detector-cdtmc  -n kube-system -- /bin/sh
[root@fpNet-web-38 ~]# kubectl get pod -n default
[root@fpNet-web-38 ~]# kubectl label nodes ht3.node qt=m-quetion

[root@fpNet-web-38 ~]# kubectl describe pod hts-node-0
Name: hts-node-0
Namespace: default
Node: ht3.node/10.128.51.13
Start Time: Fri, 17 Dec 2021 01:00:24 +0800
Labels: app=hts
controller-revision-hash=hts-node-7db478ff95
name=hts-node
statefulset.kubernetes.io/pod-name=hts-node-0
Annotations: cattle.io/timestamp=2020-11-13T08:00:36Z
field.cattle.io/ports=[[{"containerPort":8088,"dnsName":"hts-node","kind":"ClusterIP","name":"8088tcp2","protocol":"TCP"}]]
Status: Running
IP: 172.17.68.253
Controlled By: StatefulSet/hts-node
Containers:
hts-node:
Container ID: docker://d5c53b9366e0e7047f2c1b235240232e5fb7d4a6a39e80c3986c42fbdf1b4b51
Image: registry.zgshfp.com.cn/hts/test/hts:1.0.2-SNAPSHOT
Image ID: docker-pullable://registry.zgshfp.com.cn/hts/test/hts@sha256:82bea07d55a8a558c6ce9ac7783b83b171e4fc0e96104b8faa4b2ab2ade93661
Port: 8088/TCP
Host Port: 0/TCP
State: Running
Started: Fri, 17 Dec 2021 23:42:54 +0800
Last State: Terminated
Reason: Error
Exit Code: 1
Started: Fri, 17 Dec 2021 23:41:12 +0800
Finished: Fri, 17 Dec 2021 23:41:33 +0800
Ready: True
Restart Count: 149
Limits:
cpu: 2
memory: 6Gi
Requests:
cpu: 500m
memory: 1Gi
Environment:
MY_ZONE: http://eureka-server-node-0.eureka-server:8761/eureka/
MY_HOST: hts-node-0 (v1:metadata.name)
Mounts:
/bill from guabill (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-lgcgp (ro)
Conditions:
Type Status
Initialized True
Ready True
PodScheduled True
Volumes:
guabill:
Type: NFS (an NFS mount that lasts the lifetime of a pod)
Server: 10.129.56.90
Path: /nfs/bill
ReadOnly: false
default-token-lgcgp:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-lgcgp
Optional: false
QoS Class: Burstable
Node-Selectors: role=ht
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events: <none>



[root@fpNet-web-38 ~]# kubectl get pod -n default -o wide
NAME READY STATUS RESTARTS AGE IP NODE
bnz-node-0 1/1 Running 0 50m 172.17.157.63 ht2.node
cfs-node-0 1/1 Running 230 38d 172.17.157.42 ht2.node
cfsqz-node-0 1/1 Running 458 39d 172.17.9.152 ht4.node
config-server-node-0 1/1 Running 0 37d 172.17.157.27 ht2.node
eai-node-0 1/1 Running 0 9d 172.17.157.49 ht2.node
ets-node-0 1/1 Running 230 38d 172.17.157.47 ht2.node
ets-node-1 1/1 Running 457 39d 172.17.9.151 ht4.node
eureka-server-node-0 1/1 Running 0 3d 172.17.157.9 ht2.node
gas-node-0 1/1 Running 186 38d 172.17.68.203 ht3.node



[root@fpNet-web-38 ~]#kubectl logs -p  madp-ccy-biz-76dd48cd8d-4kjnw -n ccy  --previous

[root@fpNet-web-38 ~]# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE
calico-kube-controllers-98989846-4d7kb 1/1 Running 0 68d 10.129.55.111 k1.master
calico-node-684tf 1/1 Running 10 2y 10.128.51.11 ht1.node
calico-node-7pgxg 1/1 Running 22 2y 10.128.51.13 ht3.node
calico-node-8gwnb 1/1 Running 1 51d 10.129.55.113 k3.master
calico-node-bpvhz 1/1 Running 7 120d 10.129.55.121 ht7.node
calico-node-dxt68 1/1 Running 4 76d 10.128.51.14 ht4.node
calico-node-hh5gr 1/1 Running 7 2y 10.129.55.111 k1.master
calico-node-ld4pd 1/1 Running 1 122d 10.129.55.130 ht5.node
calico-node-nfnh8 1/1 NodeLost 0 57d 10.129.55.120 ht10.node
calico-node-p6lnr 1/1 Running 4 75d 10.128.51.12 ht2.node
calico-node-qpmgd 1/1 Running 10 2y 10.129.55.112 k2.master
calico-node-rfns5 1/1 Running 1 122d 10.129.55.131 ht6.node
calico-node-vxckw 1/1 Running 0 55d 10.129.52.191 ht11.node



[root@fpNet-web-38 ~]#  kubectl get pod --all-namespaces | grep 'cattle-node-agent'


[root@fpNet-web-38 ~]#  kubectl label nodes ht10.node role=ht 

[root@fpNet-web-38 ~]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
ht1.node Ready k8s-node 2y v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-693.el7.x86_64 docker://18.3.0
ht10.node NotReady,SchedulingDisabled k8s-node 57d v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-1160.45.1.el7.x86_64 docker://18.3.0
ht11.node Ready,SchedulingDisabled k8s-node 55d v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-693.el7.x86_64 docker://18.3.0
ht12.node Ready,SchedulingDisabled k8s-node 55d v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-327.el7.x86_64 docker://18.3.0
ht13.node Ready,SchedulingDisabled k8s-node 55d v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-327.el7.x86_64 docker://18.3.0
ht2.node Ready k8s-node 75d v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-693.el7.x86_64 docker://18.3.0
ht20.node Ready k8s-node 4d v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-1160.45.1.el7.x86_64 docker://18.3.0
ht3.node Ready k8s-node 2y v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-693.el7.x86_64 docker://18.3.0
ht4.node Ready,SchedulingDisabled k8s-node 76d v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-693.el7.x86_64 docker://18.3.0
ht5.node Ready,SchedulingDisabled k8s-node 122d v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-1160.42.2.el7.x86_64 docker://18.3.0
ht6.node Ready,SchedulingDisabled k8s-node 122d v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-1160.42.2.el7.x86_64 docker://18.3.0
ht7.node Ready,SchedulingDisabled k8s-node 120d v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-327.el7.x86_64 docker://18.3.0
ht8.node Ready,SchedulingDisabled k8s-node 58d v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-1160.45.1.el7.x86_64 docker://18.3.0
k1.master Ready k8s-master 2y v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-862.11.6.el7.x86_64 docker://18.6.1
k2.master Ready k8s-master 2y v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-327.el7.x86_64 docker://18.3.0
k3.master Ready,SchedulingDisabled k8s-master 51d v1.10.13 <none> CentOS Linux 7 (Core) 3.10.0-327.el7.x86_64 docker://18.3.0

[root@fpNet-web-38 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
ht1.node Ready k8s-node 2y v1.10.13
ht10.node NotReady,SchedulingDisabled k8s-node 57d v1.10.13
ht11.node Ready,SchedulingDisabled k8s-node 55d v1.10.13
ht12.node Ready,SchedulingDisabled k8s-node 55d v1.10.13
ht13.node Ready,SchedulingDisabled k8s-node 55d v1.10.13
ht2.node Ready k8s-node 75d v1.10.13
ht20.node Ready k8s-node 4d v1.10.13
ht3.node Ready k8s-node 2y v1.10.13
ht4.node Ready,SchedulingDisabled k8s-node 76d v1.10.13
ht5.node Ready,SchedulingDisabled k8s-node 122d v1.10.13
ht6.node Ready,SchedulingDisabled k8s-node 122d v1.10.13
ht7.node Ready,SchedulingDisabled k8s-node 120d v1.10.13
ht8.node Ready,SchedulingDisabled k8s-node 58d v1.10.13
k1.master Ready k8s-master 2y v1.10.13
k2.master Ready k8s-master 2y v1.10.13
k3.master Ready,SchedulingDisabled k8s-master 51d v1.10.13



[root@fpNet-web-38 ~]# kubectl describe pod yyht-node-0
Name: yyht-node-0
Namespace: default
Node: ht2.node/10.128.51.12
Start Time: Tue, 18 Jan 2022 19:03:01 +0800
Labels: app=yyht
controller-revision-hash=yyht-node-7f59998888
name=yyht-node
statefulset.kubernetes.io/pod-name=yyht-node-0
Annotations: <none>
Status: Running
IP: 172.17.157.13
Controlled By: StatefulSet/yyht-node
Containers:
yyht-node:
Container ID: docker://d7880a81fd5bad35104cdcca56fd0c0fcf2ee1e1d5111114af8bb9b607d7fd37
Image: registry.zgshfp.com.cn/yyht/test/yyht:v01
Image ID: docker-pullable://registry.zgshfp.com.cn/yyht/test/yyht@sha256:453e197c464817ffe60a56ea5553b67a5ca1961bc1e48663554a4e81aa77e248
Port: 8280/TCP
Host Port: 0/TCP
State: Running
Started: Tue, 18 Jan 2022 19:03:57 +0800
Ready: True
Restart Count: 0
Limits:
cpu: 2
memory: 6Gi
Requests:
cpu: 2
memory: 6Gi
Environment:
MY_ZONE: http://yyht-server-node-0.yyht-server:8280/yyht/
MY_HOST: yyht-node-0 (v1:metadata.name)
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-lgcgp (ro)
Conditions:
Type Status
Initialized True
Ready True
PodScheduled True
Volumes:
default-token-lgcgp:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-lgcgp
Optional: false
QoS Class: Guaranteed
Node-Selectors: role=ht
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events: <none>


 

[root@fpNet-web-38 ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE
bnz-node-0 1/1 Running 0 48m 172.17.157.63 ht2.node
cfs-node-0 1/1 Running 230 38d 172.17.157.42 ht2.node
cfsqz-node-0 1/1 Running 458 39d 172.17.9.152 ht4.node
config-server-node-0 1/1 Running 0 37d 172.17.157.27 ht2.node
eai-node-0 1/1 Running 0 9d 172.17.157.49 ht2.node
ets-node-0 1/1 Running 230 38d 172.17.157.47 ht2.node
ets-node-1 1/1 Running 457 39d 172.17.9.151 ht4.node
eureka-server-node-0 1/1 Running 0 3d 172.17.157.9 ht2.node

[root@fpNet-web-38 ~]# kubectl exec -ti yyht-node-0 -- /bin/sh
# exit

 

[root@fpNet-web-38 ~]# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
ht20.node 60m 0% 1257Mi 0%
k1.master 255m 3% 10601Mi 73%
k2.master 187m 2% 10328Mi 71%
k3.master 88m 1% 2818Mi 19%

 

[root@fpNet-web-38 ~]# kubectl top pods
NAME CPU(cores) MEMORY(bytes)
mycat-server-69bc466898-7xsf5 0m 11Mi
mynginx-5dcdc64d96-tpx87 0m 4Mi
tas-node-0 8m 4535Mi

 

[root@fpNet-web-38 ~]# kubectl top pod --all-namespaces
NAMESPACE NAME CPU(cores) MEMORY(bytes)
cattle-prometheus exporter-kube-state-cluster-monitoring-75d858d74b-tgd94 2m 51Mi
cattle-prometheus exporter-node-cluster-monitoring-kgswg 0m 19Mi
cattle-prometheus exporter-node-cluster-monitoring-nhfql 0m 24Mi
cattle-prometheus exporter-node-cluster-monitoring-qhqtj 1m 26Mi
cattle-prometheus exporter-node-cluster-monitoring-zzmxh 0m 23Mi
cattle-prometheus grafana-cluster-monitoring-6469fccc5b-f5x7z 5m 64Mi
cattle-prometheus prometheus-operator-monitoring-operator-fdc87c4f9-wqx7r 2m 33Mi
cattle-system cattle-cluster-agent-6bb5d65fbb-6hqp2 4m 130Mi
cattle-system cattle-node-agent-kkjs9 0m 34Mi
cattle-system cattle-node-agent-p44fb 0m 43Mi
cattle-system cattle-node-agent-pg5sl 0m 38Mi
cattle-system cattle-node-agent-z4n9k 0m 37Mi
cattle-system rancher-587657dd88-bjtdk 6m 377Mi
cattle-system rancher-587657dd88-btzmx 12m 360Mi
cattle-system rancher-587657dd88-d6v9l 8m 325Mi
ccy open-api-77d75f49d-cbprm 8m 1373Mi
ccy open-gateway-758dc9ccb-52s8n 0m 483Mi
code-server code-server-node-6b7ff6f5ff-rf9mf 0m 58Mi
default mycat-server-69bc466898-7xsf5 0m 11Mi
default mynginx-5dcdc64d96-tpx87 0m 4Mi
default tas-node-0 9m 4535Mi
gxfp gx-fp-admin-677958b797-x9s9x 0m 804Mi
ingress-nginx nginx-ingress-controller-d9b4dd7fb-59g2d 6m 190Mi
kayak nginx-h5-6d8f965bd7-sbqqr 0m 42Mi
kayak nginx-kibana-594b6f4db5-scxzw 0m 9Mi
kayak nginx-web-6bfd7dd77b-qx5n8 0m 42Mi
kube-ops node-exporter-cjg82 0m 3Mi
kube-ops node-exporter-jfvnp 0m 10Mi
kube-ops node-exporter-rqqzn 0m 6Mi
kube-system calico-kube-controllers-98989846-4d7kb 1m 30Mi
kube-system calico-node-8gwnb 0m 6Mi
kube-system calico-node-hh5gr 0m 5Mi
kube-system calico-node-qpmgd 0m 12Mi
kube-system calico-node-z8cg8 0m 11Mi
kube-system coredns-5847d5f4b5-4n9ll 1m 27Mi
kube-system coredns-5847d5f4b5-jq7b5 2m 24Mi
kube-system heapster-564cbf588d-rqn94 5m 103Mi
kube-system kubernetes-dashboard-5bd858cfc8-pqhfg 0m 114Mi
kube-system node-exporter-gdxht 0m 5Mi
kube-system node-problem-detector-5fdb4 0m 18Mi
kube-system node-problem-detector-7dfzr 1m 23Mi
kube-system node-problem-detector-bdwv9 3m 14Mi

 

动态扩展

kubectl scale  pod ets-node  -n default  --replicas=3
kubectl scale  sts  yyht-node  -n default  --replicas=3

删除pod

kubectl delete pod mbs-node-1 -n default

覆盖标签
 kubectl label nodes ht3.node role=no  --overwrite

强制删除

 kubectl delete pod mbs-node-0 -n default  --grace-period=0 --force

查看某pod
kubectl describe  pod hts-node-0

查看某空间所有的pod 
kubectl get pod -o wide -n ccy

进入某pod
kubectl exec it pod open-oauth-5f7bdd5f7f-nbwwm --/bin/sh  -n ccy

得到所有的service--默认是default空间的 

[root@fpNet-web-38 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
bnz-server ClusterIP None <none> 8098/TCP 287d
cfs-server ClusterIP None <none> 8086/TCP 2y
cfsqz-server ClusterIP None <none> 8095/TCP 2y
config-server ClusterIP None <none> 7001/TCP 3y
config-server-np NodePort 172.17.193.179 <none> 7001:42130/TCP 37d
eai-server ClusterIP None <none> 8081/TCP 3y
ets-server ClusterIP None <none> 8066/TCP 2y
eureka-server ClusterIP None <none> 8761/TCP 3y
eureka-server-np NodePort 172.17.63.68 <none> 8761:38761/TCP 3y
gas-server ClusterIP None <none> 9030/TCP 3y
gas-server-np NodePort 172.17.239.137 <none> 9030:39030/TCP 3y
grd-api-server ClusterIP None <none> 8089/TCP 3y
grd-server ClusterIP None <none> 8089/TCP 3y
hts-server ClusterIP None <none> 8088/TCP 3y
kubernetes ClusterIP 172.17.0.1 <none> 443/TCP 3y
lss-server ClusterIP None <none> 8089/TCP 2y
mbs-server ClusterIP None <none> 8083/TCP 3y
monitor-server ClusterIP None <none> 9009/TCP 3y
monitor-server-np NodePort 172.17.87.148 <none> 9009:39009/TCP 3y
mts-server ClusterIP None <none> 8082/TCP 3y
mycat-server ClusterIP 172.17.198.18 <none> 8066/TCP 81d
openapi-server ClusterIP None <none> 8093/TCP 1y
ops-server ClusterIP None <none> 8090/TCP 2y
postgres-sonar ClusterIP None <none> 5432/TCP 1y
pps-server ClusterIP None <none> 8087/TCP 2y
pts-server ClusterIP None <none> 8080/TCP 2y
pts-server-np NodePort 172.17.99.166 <none> 8080:38180/TCP 2y
pts2-server ClusterIP None <none> 8080/TCP 3y
rabbitmq ClusterIP None <none> 5672/TCP,4369/TCP,25672/TCP 38d
rmq-cluster ClusterIP 172.17.120.12 <none> 9876/TCP 3y
rmq-cluster-svc ClusterIP None <none> 9876/TCP 3y
rmq-cluster-svc-np NodePort 172.17.145.254 <none> 9876:39876/TCP,8080:38080/TCP 3y
rts-server ClusterIP None <none> 8077/TCP 2y
sonarqube NodePort 172.17.22.179 <none> 9000:30003/TCP 1y
stp-server ClusterIP None <none> 8087/TCP 153d
stpyn-server ClusterIP None <none> 8089/TCP 6d
tas-server ClusterIP None <none> 8085/TCP 119d
tss-server ClusterIP None <none> 8088/TCP 3y
turbine-server ClusterIP None <none> 8080/TCP 3y
turbine-server-np NodePort 172.17.119.73 <none> 9010:39010/TCP 3y
uas-server ClusterIP None <none> 8099/TCP 3y
uss-server ClusterIP None <none> 8091/TCP 3y
uss-server-np NodePort 172.17.132.101 <none> 8091:38091/TCP 3y
uts-server ClusterIP None <none> 8080/TCP 3y
yyht-server NodePort 172.17.161.127 <none> 8280:38280/TCP 114d
zipkin-server ClusterIP None <none> 9411/TCP 3y
zipkin-server-np NodePort 172.17.65.82 <none> 9411:39411/TCP 3y
zk-cluster-svc ClusterIP None <none> 2888/TCP,3888/TCP 3y
zk-cluster-svc-np ClusterIP 172.17.168.92 <none> 2181/TCP 3y


进入某pod 

[root@fpNet-web-38 ~]# kubectl exec -it oauth-node-0 -- /bin/sh
# exit



标签:node,TCP,server,Ready,172.17,常用命令,ClusterIP,k8s
来源: https://www.cnblogs.com/aozhejin/p/15838918.html