其他分享
首页 > 其他分享> > k8s部署spinnaker

k8s部署spinnaker

作者:互联网

部署对象存储组件-minio

准备镜像:

[root@hdss7-200 data]# docker pull minio/minio:latest

latest: Pulling from minio/minio

4167d3e14976: Pull complete

2a388c8277af: Pull complete

7c4d58c6c246: Pull complete

50805c9d05a7: Pull complete

Digest: sha256:66d05810fcc68d43605af7d114cb76dc10082ac2d7196f61fc8487c7181b2955

Status: Downloaded newer image for minio/minio:latest

docker.io/minio/minio:latest

[root@hdss7-200 data]# docker images|grep minio

minio/minio                                latest                     0f8054b7e1dc        2 days ago          57.6MB

[root@hdss7-200 data]# docker tag 0f8054b7e1dc harbor.od.com/armory/minio:latest

[root@hdss7-200 data]# docker push harbor.od.com/armory/minio:latest

The push refers to repository [harbor.od.com/armory/minio]

8be9bd175663: Pushed

4ccb289fbb64: Pushed

ca492e84b87e: Pushed

531743b7098c: Pushed

latest: digest: sha256:66d05810fcc68d43605af7d114cb76dc10082ac2d7196f61fc8487c7181b2955 size: 1157


创建名称空间和secret:

[root@hdss7-21 ~]# kubectl  create ns armory

namespace/armory created

[root@hdss7-21 ~]# kubectl get ns

NAME              STATUS   AGE

app               Active   14d

armory            Active   3s

default           Active   17d

infra             Active   14d

kube-node-lease   Active   17d

kube-public       Active   17d

kube-system       Active   17d

prod              Active   7d3h

test              Active   7d3h

[root@hdss7-21 ~]# kubectl create secret docker-registry harbor --docker-server=harbor.od.com --docker-username=admin --docker-password=Harbor12345 -n armory

secret/harbor created

创建存储目录:

[root@hdss7-200 nfs-volume]# mkdir /data/nfs-volume/minio

准备资源配置清单:

[root@hdss7-200 minio]# mkdir /data/k8s-yaml/armory/minio

[root@hdss7-200 minio]# cat dp.yaml

kind: Deployment

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  labels:

    name: minio

  name: minio

  namespace: armory

spec:

  progressDeadlineSeconds: 600

  replicas: 1

  revisionHistoryLimit: 7

  selector:

    matchLabels:

      name: minio

  template:

    metadata:

      labels:

        app: minio

        name: minio

    spec:

      containers:

      - name: minio

        image: harbor.od.com/armory/minio:latest

        imagePullPolicy: IfNotPresent

        ports:

        - containerPort: 9000

          protocol: TCP

        args:

        - server

        - /data

        env:

        - name: MINIO_ACCESS_KEY

          value: admin

        - name: MINIO_SECRET_KEY

          value: admin123

        readinessProbe:

          failureThreshold: 3

          httpGet:

            path: /minio/health/ready

            port: 9000

            scheme: HTTP

          initialDelaySeconds: 10

          periodSeconds: 10

          successThreshold: 1

          timeoutSeconds: 5

        volumeMounts:

        - mountPath: /data

          name: data

      imagePullSecrets:

      - name: harbor

      volumes:

      - nfs:

          server: hdss7-200

          path: /data/nfs-volume/minio

        name: data

[root@hdss7-200 minio]# cat svc.yaml

apiVersion: v1

kind: Service

metadata:

  name: minio

  namespace: armory

spec:

  ports:

  - port: 80

    protocol: TCP

    targetPort: 9000

  selector:

    app: minio

[root@hdss7-200 minio]# cat ingress.yaml

kind: Ingress

apiVersion: extensions/v1beta1

metadata:

  name: minio

  namespace: armory

spec:

  rules:

  - host: minio.od.com

    http:

      paths:

      - path: /

        backend:

          serviceName: minio

          servicePort: 80

应用资源配置清单:

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/armory/minio/dp.yaml

deployment.extensions/minio created

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/armory/minio/svc.yaml

service/minio created

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/armory/minio/ingress.yaml

ingress.extensions/minio created

访问页面:admin/admin123

image.png部署redis:

[root@hdss7-200 minio]# docker pull redis:4.0.14

4.0.14: Pulling from library/redis

c499e6d256d6: Pull complete

bf1bc8a5a7e4: Pull complete

7564fb795604: Pull complete

056d97b2df12: Pull complete

1a0c6bde1a43: Pull complete

e40656a581e9: Pull complete

Digest: sha256:3f21afab41dabaa4f0cea09e6feb74fe0626dd148c2bcffe40f6f54c4778cac3

Status: Downloaded newer image for redis:4.0.14

docker.io/library/redis:4.0.14

[root@hdss7-200 minio]# docker images|grep redis

redis                                      4.0.14                     f54239c50400        12 days ago         89.2MB

goharbor/redis-photon                      v1.8.3                     cda8fa1932ec        6 months ago        109MB

[root@hdss7-200 minio]# docker tag f54239c50400 harbor.od.com/armory/redis:v4.0.14

[root@hdss7-200 minio]# docker push harbor.od.com/armory/redis:v4.0.14

The push refers to repository [harbor.od.com/armory/redis]

424ea102c723: Pushed

c6016794fbb4: Pushed

0a596e0c0d4f: Pushed

efdee1fc0ca1: Pushed

5327bb8ad385: Pushed

c3a984abe8a8: Pushed

v4.0.14: digest: sha256:951cddef0d9da0c5b002cddc36340833f5ffcc723889d52e6d1f75fb52923abb size: 1572

准白资源配置清单:

[root@hdss7-200 ~]# mkdir /data/k8s-yaml/armory/redis

[root@hdss7-200 redis]# cat dp.yaml

kind: Deployment

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  labels:

    name: redis

  name: redis

  namespace: armory

spec:

  replicas: 1

  revisionHistoryLimit: 7

  selector:

    matchLabels:

      name: redis

  template:

    metadata:

      labels:

        app: redis

        name: redis

    spec:

      containers:

      - name: redis

        image: harbor.od.com/armory/redis:v4.0.14

        imagePullPolicy: IfNotPresent

        ports:

        - containerPort: 6379

          protocol: TCP

      imagePullSecrets:

      - name: harbor

[root@hdss7-200 redis]# cat svc.yaml

apiVersion: v1

kind: Service

metadata:

  name: redis

  namespace: armory

spec:

  ports:

  - port: 6379

    protocol: TCP

    targetPort: 6379

  selector:

    app: redis

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/armory/redis/dp.yaml

deployment.extensions/redis created

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/armory/redis/svc.yaml

service/redis created

[root@hdss7-21 ~]# kubectl get pod -n armory

NAME                     READY   STATUS    RESTARTS   AGE

minio-5ff9567d9d-gktbv   1/1     Running   0          17m

redis-77ff686585-f8kfw   1/1     Running   0          11s

部署云驱动组件-clouddriver:

准备minio的credentials

[root@hdss7-200 ~]# mkdir /data/k8s-yaml/armory/clouddriver

[root@hdss7-200 ~]# cd /data/k8s-yaml/armory/clouddriver

[root@hdss7-200 clouddriver]# cat credentials

[default]

aws_access_key_id=admin

aws_secret_access_key=admin123

[root@hdss7-21 ~]# wget http://k8s-yaml.od.com/armory/clouddriver/credentials

--2020-04-12 17:48:40--  http://k8s-yaml.od.com/armory/clouddriver/credentials

正在解析主机 k8s-yaml.od.com (k8s-yaml.od.com)... 10.4.7.200

正在连接 k8s-yaml.od.com (k8s-yaml.od.com)|10.4.7.200|:80... 已连接。

已发出 HTTP 请求,正在等待回应... 200 OK

长度:65 [text/plain]

正在保存至: “credentials”


100%[========================================================================================================================================>] 65          --.-K/s 用时 0s


2020-04-12 17:48:40 (8.94 MB/s) - 已保存 “credentials” [65/65])


[root@hdss7-21 ~]# kubectl create secret generic credentials --from-file=./credentials -n armory

secret/credentials created

准备镜像:

[root@hdss7-200 minio]# docker pull docker.io/armory/spinnaker-clouddriver-slim:release-1.8.x-14c9664

release-1.8.x-14c9664: Pulling from armory/spinnaker-clouddriver-slim

8e3ba11ec2a2: Pull complete

311ad0da4533: Pull complete

391a6a6b3651: Pull complete

e06ac5cf1eba: Downloading [======================>                            ]  90.82MBe06ac5cf1eba: Pull complete

310ea36c6b4a: Pull complete

46928b6e4f49: Pull complete

91d309cd3217: Pull complete

Digest: sha256:968fcae9ba2bd5456cb672a85a776c7fe535ed4b6e000ad457f2486245d51273

Status: Downloaded newer image for armory/spinnaker-clouddriver-slim:release-1.8.x-14c9664

docker.io/armory/spinnaker-clouddriver-slim:release-1.8.x-14c9664

[root@hdss7-200 minio]# docker images|grep cloud

armory/spinnaker-clouddriver-slim          release-1.8.x-14c9664      edb2507fdb62        21 months ago       662MB

[root@hdss7-200 minio]# docker tag edb2507fdb62 harbor.od.com/armory/clouddriver:v1.8.x

[root@hdss7-200 minio]# docker push harbor.od.com/armory/clouddriver:v1.8.x

The push refers to repository [harbor.od.com/armory/clouddriver]

2d512d83ffd6: Pushed

658a102be22d: Pushed

6852d72bcb09: Pushed

c77bb5c0e352: Pushed

8bc7bbcd76b6: Pushed

298c3bb2664f: Pushed

73046094a9b8: Pushed

v1.8.x: digest: sha256:2057c4f391ef7ff6ac640ebe4a5c943f632982db66cc25e4bcabdbe5cc16c01c size: 1792

[root@hdss7-200 certs]# cp client-csr.json admin-csr.json

[root@hdss7-200 certs]# vim admin-csr.json

[root@hdss7-200 certs]#

[root@hdss7-200 certs]#

[root@hdss7-200 certs]# cat admin-csr.json

{

    "CN": "cluster-admin",

    "hosts": [

    ],

    "key": {

        "algo": "rsa",

        "size": 2048

    },

    "names": [

        {

            "C": "CN",

            "ST": "beijing",

            "L": "beijing",

            "O": "od",

            "OU": "ops"

        }

    ]

}

[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client admin-csr.json |cfssl-json -bare admin

2020/04/12 17:57:37 [INFO] generate received request

2020/04/12 17:57:37 [INFO] received CSR

2020/04/12 17:57:37 [INFO] generating key: rsa-2048

2020/04/12 17:57:38 [INFO] encoded CSR

2020/04/12 17:57:38 [INFO] signed certificate with serial number 555523144584417255992220142124735533255511444821

2020/04/12 17:57:38 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for

websites. For more information see the Baseline Requirements for the Issuance and Management

of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);

specifically, section 10.2.3 ("Information Requirements").

[root@hdss7-200 certs]# ll admin*

-rw-r--r-- 1 root root 1001 4月  12 17:57 admin.csr

-rw-r--r-- 1 root root  285 4月  12 17:55 admin-csr.json

-rw------- 1 root root 1675 4月  12 17:57 admin-key.pem

-rw-r--r-- 1 root root 1367 4月  12 17:57 admin.pem

准备k8s用户配置:

[root@hdss7-21 ~]# scp hdss7-200:/opt/certs/ca.pem .

[root@hdss7-21 ~]# scp hdss7-200:/opt/certs/admin.pem .

[root@hdss7-21 ~]# scp hdss7-200:/opt/certs/admin-key.pem .

制作user account:

[root@hdss7-21 ~]# kubectl config set-cluster myk8s --certificate-authority=./ca.pem --embed-certs=true --server=https://10.4.7.11:7443 --kubeconfig=config

Cluster "myk8s" set.

[root@hdss7-21 ~]# kubectl config set-credentials cluster-admin --client-certificate=./admin.pem --client-key=./admin-key.pem --embed-certs=true --kubeconfig=config

User "cluster-admin" set.

[root@hdss7-21 ~]# kubectl config set-context myk8s-context --cluster=myk8s --user=cluster-admin --kubeconfig=config

Context "myk8s-context" created.

[root@hdss7-21 ~]# kubectl config use-context myk8s-context --kubeconfig=config

Switched to context "myk8s-context".

[root@hdss7-21 ~]# kubectl create clusterrolebinding myk8s-admin --clusterrole=cluster-admin --user=cluster-admin

clusterrolebinding.rbac.authorization.k8s.io/myk8s-admin created

验证cluster-admin:

[root@hdss7-200 ~]# mkdir /root/.kube/

[root@hdss7-200 ~]# scp hdss7-21:/root/config .

config                                                100% 6197     1.9MB/s   00:00

[root@hdss7-200 ~]# ll

总用量 12

-rw-r--r-- 1 root root  964 4月   8 10:32 check.py

-rw------- 1 root root 6197 4月  12 18:25 config

-rw------- 1 root root    0 4月   3 17:17 nohup.out

[root@hdss7-200 ~]# mv config .kube/

[root@hdss7-200 ~]# scp hdss7-21:/usr/bin/kubectl .

kubectl                                                                                                                                         100%   41MB  30.9MB/s   00:01

[root@hdss7-200 ~]# mv kubectl  /usr/bin/

[root@hdss7-200 ~]# kubectl get pod -n infra

NAME                             READY   STATUS    RESTARTS   AGE

dubbo-monitor-6676dd74cc-fccl4   1/1     Running   0          6d6h

kafka-manager-6449545865-q5kbm   1/1     Running   0          5d20h

prometheus-6767456ffb-5fzfb      1/1     Running   0          6d4h

创建configmap配置:

[root@hdss7-21 ~]# mv config  default-kubeconfig

[root@hdss7-21 ~]# kubectl create cm default-kubeconfig --from-file=default-kubeconfig -n armory

configmap/default-kubeconfig created

image.png

准备资源配置清单:

[root@hdss7-200 clouddriver]# cat custom-config.yaml

kind: ConfigMap

apiVersion: v1

metadata:

  name: custom-config

  namespace: armory

data:

  clouddriver-local.yml: |

    kubernetes:

      enabled: true

      accounts:

        - name: cluster-admin

          serviceAccount: false

          dockerRegistries:

            - accountName: harbor

              namespace: []

          namespaces:

            - test

            - prod

          kubeconfigFile: /opt/spinnaker/credentials/custom/default-kubeconfig

      primaryAccount: cluster-admin

    dockerRegistry:

      enabled: true

      accounts:

        - name: harbor

          requiredGroupMembership: []

          providerVersion: V1

          insecureRegistry: true

          address: http://harbor.od.com

          username: admin

          password: Harbor12345

      primaryAccount: harbor

    artifacts:

      s3:

        enabled: true

        accounts:

        - name: armory-config-s3-account

          apiEndpoint: http://minio

          apiRegion: us-east-1

      gcs:

        enabled: false

        accounts:

        - name: armory-config-gcs-account

  custom-config.json: ""

  echo-configurator.yml: |

    diagnostics:

      enabled: true

  front50-local.yml: |

    spinnaker:

      s3:

        endpoint: http://minio

  igor-local.yml: |

    jenkins:

      enabled: true

      masters:

        - name: jenkins-admin

          address: http://jenkins.od.com

          username: admin

          password: admin123

      primaryAccount: jenkins-admin

  nginx.conf: |

    gzip on;

    gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/vnd.ms-fontobject application/x-font-ttf font/opentype image/svg+xml image/x-icon;


    server {

           listen 80;


           location / {

                proxy_pass http://armory-deck/;

           }


           location /api/ {

                proxy_pass http://armory-gate:8084/;

           }


           rewrite ^/login(.*)$ /api/login$1 last;

           rewrite ^/auth(.*)$ /api/auth$1 last;

    }

  spinnaker-local.yml: |

    services:

      igor:

        enabled: true

[root@hdss7-200 clouddriver]# cat dp.yaml

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  labels:

    app: armory-clouddriver

  name: armory-clouddriver

  namespace: armory

spec:

  replicas: 1

  revisionHistoryLimit: 7

  selector:

    matchLabels:

      app: armory-clouddriver

  template:

    metadata:

      annotations:

        artifact.spinnaker.io/location: '"armory"'

        artifact.spinnaker.io/name: '"armory-clouddriver"'

        artifact.spinnaker.io/type: '"kubernetes/deployment"'

        moniker.spinnaker.io/application: '"armory"'

        moniker.spinnaker.io/cluster: '"clouddriver"'

      labels:

        app: armory-clouddriver

    spec:

      containers:

      - name: armory-clouddriver

        image: harbor.od.com/armory/clouddriver:v1.8.x

        imagePullPolicy: IfNotPresent

        command:

        - bash

        - -c

        args:

        - bash /opt/spinnaker/config/default/fetch.sh && cd /home/spinnaker/config

          && /opt/clouddriver/bin/clouddriver

        ports:

        - containerPort: 7002

          protocol: TCP

        env:

        - name: JAVA_OPTS

          value: -Xmx512M

        envFrom:

        - configMapRef:

            name: init-env

        livenessProbe:

          failureThreshold: 5

          httpGet:

            path: /health

            port: 7002

            scheme: HTTP

          initialDelaySeconds: 600

          periodSeconds: 3

          successThreshold: 1

          timeoutSeconds: 1

        readinessProbe:

          failureThreshold: 5

          httpGet:

            path: /health

            port: 7002

            scheme: HTTP

          initialDelaySeconds: 180

          periodSeconds: 3

          successThreshold: 5

          timeoutSeconds: 1

        securityContext:

          runAsUser: 0

        volumeMounts:

        - mountPath: /etc/podinfo

          name: podinfo

        - mountPath: /home/spinnaker/.aws

          name: credentials

        - mountPath: /opt/spinnaker/credentials/custom

          name: default-kubeconfig

        - mountPath: /opt/spinnaker/config/default

          name: default-config

        - mountPath: /opt/spinnaker/config/custom

          name: custom-config

      imagePullSecrets:

      - name: harbor

      volumes:

      - configMap:

          defaultMode: 420

          name: default-kubeconfig

        name: default-kubeconfig

      - configMap:

          defaultMode: 420

          name: custom-config

        name: custom-config

      - configMap:

          defaultMode: 420

          name: default-config

        name: default-config

      - name: credentials

        secret:

          defaultMode: 420

          secretName: credentials

      - downwardAPI:

          defaultMode: 420

          items:

          - fieldRef:

              apiVersion: v1

              fieldPath: metadata.labels

            path: labels

          - fieldRef:

              apiVersion: v1

              fieldPath: metadata.annotations

            path: annotations

        name: podinfo

[root@hdss7-200 clouddriver]# cat init-env.yaml

kind: ConfigMap

apiVersion: v1

metadata:

  name: init-env

  namespace: armory

data:

  API_HOST: http://spinnaker.od.com/api

  ARMORY_ID: c02f0781-92f5-4e80-86db-0ba8fe7b8544

  ARMORYSPINNAKER_CONF_STORE_BUCKET: armory-platform

  ARMORYSPINNAKER_CONF_STORE_PREFIX: front50

  ARMORYSPINNAKER_GCS_ENABLED: "false"

  ARMORYSPINNAKER_S3_ENABLED: "true"

  AUTH_ENABLED: "false"

  AWS_REGION: us-east-1

  BASE_IP: 127.0.0.1

  CLOUDDRIVER_OPTS: -Dspring.profiles.active=armory,configurator,local

  CONFIGURATOR_ENABLED: "false"

  DECK_HOST: http://spinnaker.od.com

  ECHO_OPTS: -Dspring.profiles.active=armory,configurator,local

  GATE_OPTS: -Dspring.profiles.active=armory,configurator,local

  IGOR_OPTS: -Dspring.profiles.active=armory,configurator,local

  PLATFORM_ARCHITECTURE: k8s

  REDIS_HOST: redis://redis:6379

  SERVER_ADDRESS: 0.0.0.0

  SPINNAKER_AWS_DEFAULT_REGION: us-east-1

  SPINNAKER_AWS_ENABLED: "false"

  SPINNAKER_CONFIG_DIR: /home/spinnaker/config

  SPINNAKER_GOOGLE_PROJECT_CREDENTIALS_PATH: ""

  SPINNAKER_HOME: /home/spinnaker

  SPRING_PROFILES_ACTIVE: armory,configurator,local

[root@hdss7-200 clouddriver]# cat svc.yaml

apiVersion: v1

kind: Service

metadata:

  name: armory-clouddriver

  namespace: armory

spec:

  ports:

  - port: 7002

    protocol: TCP

    targetPort: 7002

  selector:

    app: armory-clouddriver

[root@hdss7-200 clouddriver]# kubectl apply -f init-env.yaml

configmap/init-env created

[root@hdss7-200 clouddriver]# kubectl apply -f default-config.yaml

configmap/default-config created

[root@hdss7-200 clouddriver]# kubectl apply -f custom-config.yaml

configmap/custom-config created

[root@hdss7-200 clouddriver]# kubectl apply -f  dp.yaml

deployment.extensions/armory-clouddriver created

[root@hdss7-200 clouddriver]# kubectl apply -f svc.yaml

service/armory-clouddriver created

[root@hdss7-200 clouddriver]# kubectl get pod -n armory

NAME                                  READY   STATUS    RESTARTS   AGE

armory-clouddriver-684644767d-9fxvk   0/1     Running   0          53s

minio-5ff9567d9d-gktbv                1/1     Running   0          99m

redis-77ff686585-f8kfw                1/1     Running   0          82m

到minio容器中检测clouddriver是否启动成功:

image.png

部署数据持久化组件-front50:

[root@hdss7-200 ~]# docker pull docker.io/armory/spinnaker-front50-slim:release-1.8.x-93febf2

release-1.8.x-93febf2: Pulling from armory/spinnaker-front50-slim

Digest: sha256:92309ff0c8d676b7dafbeb09bb78babbba669dffd7ed8878438f91d53cfb02f6

Status: Image is up to date for armory/spinnaker-front50-slim:release-1.8.x-93febf2

docker.io/armory/spinnaker-front50-slim:release-1.8.x-93febf2

[root@hdss7-200 ~]# docker images|grep front

armory/spinnaker-front50-slim              release-1.8.x-93febf2      0d353788f4f2        19 months ago       273MB

[root@hdss7-200 ~]# docker tag 0d353788f4f2 harbor.od.com/armory/front50:v1.8.x

[root@hdss7-200 ~]# dockerk push harbor.od.com/armory/front50:v1.8.x

-bash: dockerk: 未找到命令

[root@hdss7-200 ~]# docker push harbor.od.com/armory/front50:v1.8.x

The push refers to repository [harbor.od.com/armory/front50]

dfaf560918e4: Pushed

44956a013f38: Pushed

78bd58e6921a: Pushed

12c374f8270a: Pushed

0c3170905795: Pushed

df64d3292fd6: Mounted from public/traefik

v1.8.x: digest: sha256:b2da7cfd07d831f0399a253541453fbf9a374fb9de9ecfcc5bf2a2fd97839bba size: 1579

准备资源配置清单:

[root@hdss7-200 ~]# mkdir /data/k8s-yaml/armory/front50

[root@hdss7-200 ~]# cd /data/k8s-yaml/armory/front50

[root@hdss7-200 front50]# cat dp.yaml

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  labels:

    app: armory-front50

  name: armory-front50

  namespace: armory

spec:

  replicas: 1

  revisionHistoryLimit: 7

  selector:

    matchLabels:

      app: armory-front50

  template:

    metadata:

      annotations:

        artifact.spinnaker.io/location: '"armory"'

        artifact.spinnaker.io/name: '"armory-front50"'

        artifact.spinnaker.io/type: '"kubernetes/deployment"'

        moniker.spinnaker.io/application: '"armory"'

        moniker.spinnaker.io/cluster: '"front50"'

      labels:

        app: armory-front50

    spec:

      containers:

      - name: armory-front50

        image: harbor.od.com/armory/front50:v1.8.x

        imagePullPolicy: IfNotPresent

        command:

        - bash

        - -c

        args:

        - bash /opt/spinnaker/config/default/fetch.sh && cd /home/spinnaker/config

          && /opt/front50/bin/front50

        ports:

        - containerPort: 8080

          protocol: TCP

        env:

        - name: JAVA_OPTS

          value: -javaagent:/opt/front50/lib/jamm-0.2.5.jar -Xmx1000M

        envFrom:

        - configMapRef:

            name: init-env

        livenessProbe:

          failureThreshold: 3

          httpGet:

            path: /health

            port: 8080

            scheme: HTTP

          initialDelaySeconds: 600

          periodSeconds: 3

          successThreshold: 1

          timeoutSeconds: 1

        readinessProbe:

          failureThreshold: 3

          httpGet:

            path: /health

            port: 8080

            scheme: HTTP

          initialDelaySeconds: 180

          periodSeconds: 5

          successThreshold: 8

          timeoutSeconds: 1

        volumeMounts:

        - mountPath: /etc/podinfo

          name: podinfo

        - mountPath: /home/spinnaker/.aws

          name: credentials

        - mountPath: /opt/spinnaker/config/default

          name: default-config

        - mountPath: /opt/spinnaker/config/custom

          name: custom-config

      imagePullSecrets:

      - name: harbor

      volumes:

      - configMap:

          defaultMode: 420

          name: custom-config

        name: custom-config

      - configMap:

          defaultMode: 420

          name: default-config

        name: default-config

      - name: credentials

        secret:

          defaultMode: 420

          secretName: credentials

      - downwardAPI:

          defaultMode: 420

          items:

          - fieldRef:

              apiVersion: v1

              fieldPath: metadata.labels

            path: labels

          - fieldRef:

              apiVersion: v1

              fieldPath: metadata.annotations

            path: annotations

        name: podinfo

[root@hdss7-200 front50]# cat svc.yaml

apiVersion: v1

kind: Service

metadata:

  name: armory-front50

  namespace: armory

spec:

  ports:

  - port: 8080

    protocol: TCP

    targetPort: 8080

  selector:

    app: armory-front50

[root@hdss7-200 front50]# kubectl apply -f dp.yaml

deployment.extensions/armory-front50 created

[root@hdss7-200 front50]# kubectl apply -f svc.yaml

service/armory-front50 created

[root@hdss7-200 front50]# kubectl get pod -n armory

NAME                                  READY   STATUS              RESTARTS   AGE

armory-clouddriver-684644767d-9fxvk   1/1     Running             0          16m

armory-front50-fc74f5794-2tjhl        0/1     ContainerCreating   0          15s

部署任务编排组件-orca:

[root@hdss7-200 front50]# docker pull docker.io/armory/spinnaker-orca-slim:release-1.8.x-de4ab55

release-1.8.x-de4ab55: Pulling from armory/spinnaker-orca-slim

4fe2ade4980c: Already exists

6fc58a8d4ae4: Already exists

d3e6d7e9702a: Already exists

6c70af887bc7: Pull complete

c4b6e637d6e8: Pull complete

da01b2afaa26: Pull complete

[root@hdss7-200 front50]# docker images |grep orca

armory/spinnaker-orca-slim                 release-1.8.x-de4ab55      5103b1f73e04        19 months ago       141MB

[root@hdss7-200 front50]# docker tag 5103b1f73e04 harbor.od.com/armory/orca:v1.8.x

[root@hdss7-200 front50]# docker push harbor.od.com/armory/orca:v1.8.x

The push refers to repository [harbor.od.com/armory/orca]

fc691dbda20f: Pushed

df3bd4d73885: Pushed

c5165988c0bd: Pushed

12c374f8270a: Mounted from armory/front50

0c3170905795: Mounted from armory/front50

df64d3292fd6: Mounted from armory/front50

v1.8.x: digest: sha256:4be2da614968e0722d766c67d30e16701b93718950736785a1cd1664572ccd32 size: 1578

[root@hdss7-200 armory]# mkdir /data/k8s-yaml/armory/orca

[root@hdss7-200 armory]# cd /data/k8s-yaml/armory/orca

[root@hdss7-200 orca]# cat dp.yaml

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  labels:

    app: armory-orca

  name: armory-orca

  namespace: armory

spec:

  replicas: 1

  revisionHistoryLimit: 7

  selector:

    matchLabels:

      app: armory-orca

  template:

    metadata:

      annotations:

        artifact.spinnaker.io/location: '"armory"'

        artifact.spinnaker.io/name: '"armory-orca"'

        artifact.spinnaker.io/type: '"kubernetes/deployment"'

        moniker.spinnaker.io/application: '"armory"'

        moniker.spinnaker.io/cluster: '"orca"'

      labels:

        app: armory-orca

    spec:

      containers:

      - name: armory-orca

        image: harbor.od.com/armory/orca:v1.8.x

        imagePullPolicy: IfNotPresent

        command:

        - bash

        - -c

        args:

        - bash /opt/spinnaker/config/default/fetch.sh && cd /home/spinnaker/config

          && /opt/orca/bin/orca

        ports:

        - containerPort: 8083

          protocol: TCP

        env:

        - name: JAVA_OPTS

          value: -Xmx1000M

        envFrom:

        - configMapRef:

            name: init-env

        livenessProbe:

          failureThreshold: 5

          httpGet:

            path: /health

            port: 8083

            scheme: HTTP

          initialDelaySeconds: 600

          periodSeconds: 5

          successThreshold: 1

          timeoutSeconds: 1

        readinessProbe:

          failureThreshold: 3

          httpGet:

            path: /health

            port: 8083

            scheme: HTTP

          initialDelaySeconds: 180

          periodSeconds: 3

          successThreshold: 5

          timeoutSeconds: 1

        volumeMounts:

        - mountPath: /etc/podinfo

          name: podinfo

        - mountPath: /opt/spinnaker/config/default

          name: default-config

        - mountPath: /opt/spinnaker/config/custom

          name: custom-config

      imagePullSecrets:

      - name: harbor

      volumes:

      - configMap:

          defaultMode: 420

          name: custom-config

        name: custom-config

      - configMap:

          defaultMode: 420

          name: default-config

        name: default-config

      - downwardAPI:

          defaultMode: 420

          items:

          - fieldRef:

              apiVersion: v1

              fieldPath: metadata.labels

            path: labels

          - fieldRef:

              apiVersion: v1

              fieldPath: metadata.annotations

            path: annotations

        name: podinfo

[root@hdss7-200 orca]# cat svc.yaml

apiVersion: v1

kind: Service

metadata:

  name: armory-orca

  namespace: armory

spec:

  ports:

  - port: 8083

    protocol: TCP

    targetPort: 8083

  selector:

    app: armory-orca

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/armory/orca/dp.yaml

deployment.extensions/armory-orca created

[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/armory/orca/svc.yaml

service/armory-orca created

[root@hdss7-21 ~]# kubectl get pod -n armory

NAME                                  READY   STATUS    RESTARTS   AGE

armory-clouddriver-684644767d-9fxvk   1/1     Running   0          31m

armory-front50-fc74f5794-2tjhl        1/1     Running   1          15m

armory-orca-679d659f59-lltph          0/1     Running   0          10s

image.png

部署消息总线组件-echo:

[root@hdss7-200 orca]# docker pull docker.io/armory/echo-armory:c36d576-release-1.8.x-617c567

[root@hdss7-200 orca]# docker images|grep echo

armory/echo-armory                         c36d576-release-1.8.x-617c567               415efd46f474        21 months ago       287MB

[root@hdss7-200 orca]# docker tag 415efd46f474 harbor.od.com/armory/echo:v1.8.x

[root@hdss7-200 orca]# docker push harbor.od.com/armory/echo:v1.8.x

[root@hdss7-200 echo]# cat dp.yaml

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  labels:

    app: armory-echo

  name: armory-echo

  namespace: armory

spec:

  replicas: 1

  revisionHistoryLimit: 7

  selector:

    matchLabels:

      app: armory-echo

  template:

    metadata:

      annotations:

        artifact.spinnaker.io/location: '"armory"'

        artifact.spinnaker.io/name: '"armory-echo"'

        artifact.spinnaker.io/type: '"kubernetes/deployment"'

        moniker.spinnaker.io/application: '"armory"'

        moniker.spinnaker.io/cluster: '"echo"'

      labels:

        app: armory-echo

    spec:

      containers:

      - name: armory-echo

        image: harbor.od.com/armory/echo:v1.8.x

        imagePullPolicy: IfNotPresent

        command:

        - bash

        - -c

        args:

        - bash /opt/spinnaker/config/default/fetch.sh && cd /home/spinnaker/config

          && /opt/echo/bin/echo

        ports:

        - containerPort: 8089

          protocol: TCP

        env:

        - name: JAVA_OPTS

          value: -javaagent:/opt/echo/lib/jamm-0.2.5.jar -Xmx512M

        envFrom:

        - configMapRef:

            name: init-env

        livenessProbe:

          failureThreshold: 3

          httpGet:

            path: /health

            port: 8089

            scheme: HTTP

          initialDelaySeconds: 600

          periodSeconds: 3

          successThreshold: 1

          timeoutSeconds: 1

        readinessProbe:

          failureThreshold: 3

          httpGet:

            path: /health

            port: 8089

            scheme: HTTP

          initialDelaySeconds: 180

          periodSeconds: 3

          successThreshold: 5

          timeoutSeconds: 1

        volumeMounts:

        - mountPath: /etc/podinfo

          name: podinfo

        - mountPath: /opt/spinnaker/config/default

          name: default-config

        - mountPath: /opt/spinnaker/config/custom

          name: custom-config

      imagePullSecrets:

      - name: harbor

      volumes:

      - configMap:

          defaultMode: 420

          name: custom-config

        name: custom-config

      - configMap:

          defaultMode: 420

          name: default-config

        name: default-config

      - downwardAPI:

          defaultMode: 420

          items:

          - fieldRef:

              apiVersion: v1

              fieldPath: metadata.labels

            path: labels

          - fieldRef:

              apiVersion: v1

              fieldPath: metadata.annotations

            path: annotations

        name: podinfo

[root@hdss7-200 echo]# cat svc.yaml

apiVersion: v1

kind: Service

metadata:

  name: armory-echo

  namespace: armory

spec:

  ports:

  - port: 8089

    protocol: TCP

    targetPort: 8089

  selector:

    app: armory-echo

[root@hdss7-200 echo]# kubectl apply -f dp.yaml

deployment.extensions/armory-echo created

[root@hdss7-200 echo]# kubectl apply -f svc.yaml

service/armory-echo created

[root@hdss7-200 echo]# kubectl get pod -n armory

NAME                                  READY   STATUS              RESTARTS   AGE

armory-clouddriver-684644767d-9fxvk   1/1     Running             0          49m

armory-echo-5659f7495c-ls8qg          0/1     ContainerCreating   0          16s


部署流水线交互组件-igor:

[root@hdss7-200 orca]# docker pull docker.io/armory/spinnaker-igor-slim:release-1.8-x-new-install-healthy-ae2b329

[root@hdss7-200 orca]# docker images|grep igor

armory/spinnaker-igor-slim                 release-1.8-x-new-install-healthy-ae2b329   23984f5b43f6        21 months ago       135MB

[root@hdss7-200 orca]# docker tag 23984f5b43f6 harbor.od.com/armory/igor:v1.8.x

[root@hdss7-200 orca]# docker push harbor.od.com/armory/igor:v1.8.x

[root@hdss7-200 igor]# kubectl apply -f dp.yaml

deployment.extensions/armory-igor created

[root@hdss7-200 igor]# kubectl apply -f svc.yaml

service/armory-igor created

[root@hdss7-200 igor]# kubectl get pod -n armory

NAME                                  READY   STATUS              RESTARTS   AGE

armory-clouddriver-684644767d-9fxvk   1/1     Running             0          53m

armory-echo-5659f7495c-ls8qg          1/1     Running             0          4m21s

armory-front50-fc74f5794-2tjhl        1/1     Running             1          36m

armory-igor-7dfcf9bb94-25x47          0/1     ContainerCreating   0          13s

部署API提供组件:

[root@hdss7-200 igor]# docker pull docker.io/armory/gate-armory:dfafe73-release-1.8.x-5d505ca

[root@hdss7-200 igor]# docker images|grep gate

armory/gate-armory                         dfafe73-release-1.8.x-5d505ca               b092d4665301        21 months ago       179MB

[root@hdss7-200 igor]# docker tag b092d4665301 harbor.od.com/armory/gate:v1.8.x

[root@hdss7-200 igor]# docker push harbor.od.com/armory/gate:v1.8.x

[root@hdss7-200 gate]# kubectl apply -f dp.yaml

deployment.extensions/armory-gate created

[root@hdss7-200 gate]# kubectl apply -f svc.yaml

service/armory-gate created

[root@hdss7-200 gate]# kubectl get pod -n armory|grep gate

armory-gate-66787df75b-96tkk          0/1     Running   0          18s

部署deck:

[root@hdss7-200 igor]# docker images|grep deck

armory/deck-armory                         d4bf0cf-release-1.8.x-0a33f94               9a87ba3b319f        21 months ago       518MB

[root@hdss7-200 igor]# docker tag 9a87ba3b319f harbor.od.com/armory/deck:v1.8.x

[root@hdss7-200 igor]# docker push harbor.od.com/armory/deck:v1.8.x

[root@hdss7-200 deck]# kubectl apply -f dp.yaml

deployment.extensions/armory-deck created

[root@hdss7-200 deck]# kubectl apply -f svc.yaml

service/armory-deck created

[root@hdss7-200 deck]# kubectl get pod -n armory

NAME                                  READY   STATUS              RESTARTS   AGE

armory-clouddriver-684644767d-9fxvk   1/1     Running             0          67m

armory-deck-759754bc45-xrb87          0/1     ContainerCreating   0          19s


部署nginx:

[root@hdss7-200 igor]# docker pull nginx:1.12.2

[root@hdss7-200 ~]# mkdir /data/k8s-yaml/armory/nginx

[root@hdss7-200 ~]# cd /data/k8s-yaml/armory/nginx

[root@hdss7-200 nginx]# cat dp.yaml

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  labels:

    app: armory-nginx

  name: armory-nginx

  namespace: armory

spec:

  replicas: 1

  revisionHistoryLimit: 7

  selector:

    matchLabels:

      app: armory-nginx

  template:

    metadata:

      annotations:

        artifact.spinnaker.io/location: '"armory"'

        artifact.spinnaker.io/name: '"armory-nginx"'

        artifact.spinnaker.io/type: '"kubernetes/deployment"'

        moniker.spinnaker.io/application: '"armory"'

        moniker.spinnaker.io/cluster: '"nginx"'

      labels:

        app: armory-nginx

    spec:

      containers:

      - name: armory-nginx

        image: harbor.od.com/armory/nginx:v1.12.2

        imagePullPolicy: Always

        command:

        - bash

        - -c

        args:

        - bash /opt/spinnaker/config/default/fetch.sh nginx && nginx -g 'daemon off;'

        ports:

        - containerPort: 80

          name: http

          protocol: TCP

        - containerPort: 443

          name: https

          protocol: TCP

        - containerPort: 8085

          name: api

          protocol: TCP

        livenessProbe:

          failureThreshold: 3

          httpGet:

            path: /

            port: 80

            scheme: HTTP

          initialDelaySeconds: 180

          periodSeconds: 3

          successThreshold: 1

          timeoutSeconds: 1

        readinessProbe:

          failureThreshold: 3

          httpGet:

            path: /

            port: 80

            scheme: HTTP

          initialDelaySeconds: 30

          periodSeconds: 3

          successThreshold: 5

          timeoutSeconds: 1

        volumeMounts:

        - mountPath: /opt/spinnaker/config/default

          name: default-config

        - mountPath: /etc/nginx/conf.d

          name: custom-config

      imagePullSecrets:

      - name: harbor

      volumes:

      - configMap:

          defaultMode: 420

          name: custom-config

        name: custom-config

      - configMap:

          defaultMode: 420

          name: default-config

        name: default-config

[root@hdss7-200 nginx]# cat svc.yaml

apiVersion: v1

kind: Service

metadata:

  name: armory-nginx

  namespace: armory

spec:

  ports:

  - name: http

    port: 80

    protocol: TCP

    targetPort: 80

  - name: https

    port: 443

    protocol: TCP

    targetPort: 443

  - name: api

    port: 8085

    protocol: TCP

    targetPort: 8085

  selector:

    app: armory-nginx

[root@hdss7-200 nginx]# cat ingress.yaml

apiVersion: extensions/v1beta1

kind: Ingress

metadata:

  labels:

    app: spinnaker

    web: spinnaker.od.com

  name: armory-nginx

  namespace: armory

spec:

  rules:

  - host: spinnaker.od.com

    http:

      paths:

      - backend:

          serviceName: armory-nginx

          servicePort: 80

[root@hdss7-200 nginx]# kubectl apply -f dp.yaml

deployment.extensions/armory-nginx created

[root@hdss7-200 nginx]# kubectl apply -f svc.yaml

service/armory-nginx created

[root@hdss7-200 nginx]# kubectl apply -f ingress.yaml

ingress.extensions/armory-nginx created

image.png

标签:hdss7,200,spinnaker,name,部署,armory,k8s,root
来源: https://blog.51cto.com/13520772/2486707