Newer
Older
- deploy Client
variables:
DOCKER_TLS_CERTDIR: ""
DOCKER_HOST: "tcp://localhost:2375"
DOCKER_DRIVER: overlay2
HELM_EXPERIMENTAL_OCI: "1"
CICHART_IMAGE: "$CI_REGISTRY_IMAGE/cichart:latest"
- local: "/helpers/docker_base.yaml"
- local: "/helpers/helm_base.yaml"
build ci:
stage: .pre
extends: .docker_based_job
- docker build --tag $CICHART_IMAGE .
- docker push $CICHART_IMAGE
rules:
# no need to build if another project triggered us
- if: $CI_PIPELINE_SOURCE == "pipeline"
- changes:
- cichart/Dockerfile
when: always
- if: $BUILD_CI_IMAGE
when: always
build sidecar rabbit init:
extends: .build_image
variables:
IMAGE_NAME: cspp-geo-rabbit-init
IMAGE_DIR: sidecars/cspp-geo-rabbit-init
rules:
# no need to build if another project triggered us
- if: $CI_PIPELINE_SOURCE == "pipeline"
- changes:
- sidecars/cspp-geo-rabbit-init/Dockerfile
- sidecars/cspp-geo-rabbit-init/declare_exchange.py
when: always
create geotiff storage:
extends: .helm_based_job
stage: create storage
script:
- ns=$(./helpers/get_namespace.sh)
# copy secret kubeconfig to the mounted (pwd) directory
- cp $kubekorner_k3s_config .
- kubeconfig=$(basename $kubekorner_k3s_config)
- ./helpers/create_pvc.sh "$ns" "ci_geosphere-test/geotiff-pvc.yaml" "cspp-geo-geo2grid" "$kubeconfig"
# this job doesn't actually need any artifacts from previous jobs
dependencies: []
- ci_geosphere-test/geotiff-pvc.yaml
when: always
- if: $CREATE_STORAGE
when: always
create shapefile storage:
extends: .helm_based_job
stage: create storage
script:
- ns=$(./helpers/get_namespace.sh)
# copy secret kubeconfig to the mounted (pwd) directory
- cp $kubekorner_k3s_config .
- kubeconfig=$(basename $kubekorner_k3s_config)
- ./helpers/create_pvc.sh "$ns" "ci_geosphere-test/shapefiles-pvc.yaml" "geosphere-tile-gen-shapefiles" "$kubeconfig"
# this job doesn't actually need any artifacts from previous jobs
dependencies: []
rules:
- changes:
- ci_geosphere-test/shapefiles-pvc.yaml
when: always
- if: $CREATE_STORAGE
when: always
variables:
SUBCOMP_REPOS_BASE: "https://gitlab.ssec.wisc.edu/cspp_geo/geosphere"
# required:
SUBCOMP_REPOS: ""
SUBCOMP_CHART_DIR: ""
SUBCOMP_BRANCH: "master"
script:
- repos_url="${SUBCOMP_REPOS_BASE}/${SUBCOMP_REPOS}"
- git clone --depth 1 --branch ${SUBCOMP_BRANCH} $repos_url
- cd ${SUBCOMP_REPOS}
# most recent docker image should be tagged with most recent SHA
- docker_tag=$(git rev-parse --short=8 HEAD)
- echo $docker_tag
- cd chart
# make the docker tag available in later stages
- echo export docker_tag=$docker_tag >${SUBCOMP_CHART_DIR}/cibuild.env
- |-
sed -i "s/^appVersion: .*\$/appVersion: ${docker_tag}/g" ${SUBCOMP_CHART_DIR}/Chart.yaml
# debug:
- helm template ${SUBCOMP_CHART_DIR} ${SUBCOMP_CHART_DIR}
- ${SUBCOMP_REPOS}/chart/${SUBCOMP_CHART_DIR}
# XXX: Do we always want to clone the subcomponent
# rules:
# - if: '$GEOSPHERE_SUBCOMP == ""'
# when: always
# - if: '$GEOSPHERE_SUBCOMP == $SUBCOMP_NAME'
# when: always
# - when: never
get_chart_grb:
extends: .get_chart_tmpl
variables:
SUBCOMP_REPOS: "geosphere-grb"
get_chart_geo2grid:
extends: .get_chart_tmpl
variables:
SUBCOMP_REPOS: "geosphere-geo2grid"
SUBCOMP_CHART_DIR: "cspp-geo-geo2grid"
get_chart_tilegen:
extends: .get_chart_tmpl
variables:
SUBCOMP_REPOS: "geosphere-tile-gen"
SUBCOMP_CHART_DIR: "geosphere-tile-gen"
get_chart_mapserver:
extends: .get_chart_tmpl
variables:
SUBCOMP_REPOS: "geosphere-mapserver"
SUBCOMP_CHART_DIR: "geosphere-mapserver"
get_chart_mapcache:
extends: .get_chart_tmpl
variables:
SUBCOMP_REPOS: "geosphere-mapcache"
SUBCOMP_CHART_DIR: "geosphere-mapcache"
get_chart_client:
extends: .get_chart_tmpl
variables:
SUBCOMP_REPOS: "geosphere-client"
SUBCOMP_CHART_DIR: "geosphere-client"
get_chart_client_test:
extends: .get_chart_tmpl
variables:
SUBCOMP_REPOS: "geosphere-client"
SUBCOMP_CHART_DIR: "geosphere-client"
SUBCOMP_BRANCH: "develop"
name: geosphere-test
url: http://geosphere-test.ssec.wisc.edu
extends: .helm_based_job
- ns=$(./helpers/get_namespace.sh)
# copy secret kubeconfig to the mounted (pwd) directory
- cp $kubekorner_k3s_config .
- kubeconfig=$(basename $kubekorner_k3s_config)
# get password from any previous installation
# if we don't do this the password will get out of sync
- sec_info=$(kubectl --kubeconfig $kubeconfig get secret --namespace geosphere-test geosphere-rabbit-rabbitmq || echo "")
- auth_sec="geosphere-rabbit-rabbitmq"
- if [ "$sec_info" != "" ]; then
pw=$(kubectl --kubeconfig $kubeconfig get secret --namespace $ns $auth_sec -o jsonpath="{.data.rabbitmq-password}" | base64 -d);
ec=$(kubectl --kubeconfig $kubeconfig get secret --namespace $ns $auth_sec -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 -d);
EXTRA_ARGS="--set auth.password=$pw --set auth.erlangCookie=$ec";
fi
# install third-party rabbitmq server
- helm repo add bitnami "https://charts.bitnami.com/bitnami"
- helm upgrade -v 2 --install --kubeconfig $kubeconfig -f ci_geosphere-test/values-geosphere-rabbit.yaml $EXTRA_ARGS --namespace $ns geosphere-rabbit bitnami/rabbitmq
# do a little waiting for the rabbitmq pod to be ready so future stages
# don't fail to communicate with it
- kubectl wait -n geosphere-test pod/geosphere-rabbit-rabbitmq-0 --timeout 60s --for=condition=Ready
# this job doesn't actually need any artifacts from previous jobs
dependencies: []
rules:
- if: '$kubekorner_k3s_config == ""'
when: never
# no need to build if another project triggered us
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- changes:
- ci_geosphere-test/values-geosphere-rabbit.yaml
when: always
- if: $DEPLOY_RABBIT
when: always
name: geosphere-test
url: http://geosphere-test.ssec.wisc.edu
- ns=$(./helpers/get_namespace.sh)
- cd geosphere-grb/chart
# copy secret kubeconfig to the mounted (pwd) directory
- cp $kubekorner_k3s_config .
- kubeconfig=$(basename $kubekorner_k3s_config)
- echo "Deploying version $docker_tag to cluster namespace $ns"
# copy extra values files to the local directory (where helm has access via docker mount)
- cp ../../ci_geosphere-test/values-grb-g16.yaml .
- helm upgrade -v 2 --install --kubeconfig $kubeconfig --namespace $ns --set persistence.enabled=true --set persistence.storageClass=longhorn -f values-grb-g16.yaml cspp-geo-grb cspp-geo-grb/
rules:
- if: '$kubekorner_k3s_config == ""'
when: never
- when: on_success
deploy_geo2grid_g16_radf:
environment:
name: geosphere-test
url: http://geosphere-test.ssec.wisc.edu
- ns=$(./helpers/get_namespace.sh)
- cd geosphere-geo2grid/chart
- source cspp-geo-geo2grid/cibuild.env
# copy secret kubeconfig to the mounted (pwd) directory
- cp $kubekorner_k3s_config .
- kubeconfig=$(basename $kubekorner_k3s_config)
- echo "Deploying version $docker_tag to cluster namespace $ns"
# copy extra values files to the local directory (where helm has access via docker mount)
- cp ../../ci_geosphere-test/values-geo2grid-g16-radf.yaml .
# namespace names are the same as domain names
- helm upgrade -v 2 --install --kubeconfig $kubeconfig --namespace $ns -f values-geo2grid-g16-radf.yaml cspp-geo-geo2grid cspp-geo-geo2grid/
dependencies:
- get_chart_geo2grid
rules:
- if: '$kubekorner_k3s_config == ""'
when: never
- when: on_success
# environment:
# name: geosphere-test
# url: http://geosphere-test.ssec.wisc.edu
variables:
DEPLOY_SUFFIX: ""
extends: .helm_based_job
stage: deploy tile gen
script:
- ./helpers/deploy_tile_gen.sh "$DEPLOY_SUFFIX"
dependencies:
- get_chart_tilegen
rules:
- if: '$kubekorner_k3s_config == ""'
when: never
- when: on_success
extends: .deploy_tile_gen
variables:
DEPLOY_SUFFIX: "-g16-radf"
extends: .deploy_tile_gen
variables:
DEPLOY_SUFFIX: "-g16-radc"
#deploy_tile_gen_g16_radm1:
# extends: .deploy_tile_gen
# variables:
# DEPLOY_SUFFIX: "-g16-radm1"
#
#deploy_tile_gen_g16_radm2:
# extends: .deploy_tile_gen
# variables:
# DEPLOY_SUFFIX: "-g16-radm2"
name: geosphere-test
url: http://geosphere-test.ssec.wisc.edu
script:
- ns=$(./helpers/get_namespace.sh)
- cd geosphere-mapserver/chart
- source geosphere-mapserver/cibuild.env
# copy secret kubeconfig to the mounted (pwd) directory
- cp $kubekorner_k3s_config .
- kubeconfig=$(basename $kubekorner_k3s_config)
- echo "Deploying version $docker_tag to cluster namespace $ns"
# copy extra values files to the local directory (where helm has access via docker mount)
- cp ../../ci_geosphere-test/values-mapserver.yaml .
# namespace names are the same as domain names
- helm upgrade -v 2 --install --kubeconfig $kubeconfig --namespace $ns -f values-mapserver.yaml geosphere-mapserver geosphere-mapserver/
dependencies:
- get_chart_mapserver
rules:
- if: '$kubekorner_k3s_config == ""'
when: never
- when: on_success
name: geosphere-test
url: http://geosphere-test.ssec.wisc.edu
extends: .helm_based_job
stage: deploy WMTS
script:
- ns=$(./helpers/get_namespace.sh)
- cd geosphere-mapcache/chart
- source geosphere-mapcache/cibuild.env
# copy secret kubeconfig to the mounted (pwd) directory
- cp $kubekorner_k3s_config .
- kubeconfig=$(basename $kubekorner_k3s_config)
- echo "Deploying version $docker_tag to cluster namespace $ns"
# copy extra values files to the local directory (where helm has access via docker mount)
- cp ../../ci_geosphere-test/values-mapcache.yaml .
# namespace names are the same as domain names
- helm upgrade -v 2 --install --kubeconfig $kubeconfig --namespace $ns -f values-mapcache.yaml geosphere-mapcache geosphere-mapcache/
dependencies:
- get_chart_mapcache
rules:
- if: '$kubekorner_k3s_config == ""'
when: never
- when: on_success
deploy_client_test:
environment:
name: geosphere-test
url: http://geosphere-test.ssec.wisc.edu
extends: .helm_based_job
stage: deploy Client
script:
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
- cd geosphere-client/chart
- source geosphere-client/cibuild.env
# copy secret kubeconfig to the mounted (pwd) directory
- cp $kubekorner_k3s_config .
- kubeconfig=$(basename $kubekorner_k3s_config)
- echo "Deploying version $docker_tag to cluster namespace $ns"
# copy extra values files to the local directory (where helm has access via docker mount)
- cp ../../ci_geosphere-test/values-client.yaml .
# namespace names are the same as domain names
- helm upgrade -v 2 --install --kubeconfig $kubeconfig --namespace $ns -f values-client.yaml geosphere-client geosphere-client/
dependencies:
- get_chart_client_test
rules:
- if: '$kubekorner_k3s_config == ""'
when: never
- when: on_success
deploy_client_production:
environment:
name: geosphere
url: http://geosphere.ssec.wisc.edu
extends: .helm_based_job
stage: deploy Client
script:
- ns="geosphere"
- cd geosphere-client/chart
- source geosphere-client/cibuild.env
# copy secret kubeconfig to the mounted (pwd) directory
- cp $kubekorner_k3s_config .
- kubeconfig=$(basename $kubekorner_k3s_config)
- echo "Deploying version $docker_tag to cluster namespace $ns"
# copy extra values files to the local directory (where helm has access via docker mount)
- cp ../../ci_geosphere/values-client.yaml .
# namespace names are the same as domain names
- helm upgrade -v 2 --install --kubeconfig $kubeconfig --namespace $ns -f values-client.yaml geosphere-client geosphere-client/
dependencies:
- get_chart_client
rules:
- if: '$kubekorner_k3s_config == ""'
when: never
- when: on_success