diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 35085b0f33139ba2a0da383b276c987008ba1ced..2f827538c4a6a62ac401c1c995e37d71e9cef22a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -19,7 +19,6 @@ variables: CICHART_IMAGE: "$CI_REGISTRY_IMAGE/cichart:latest" GEOSPHERE_TAG_REGEX: "^r[0-9]+_[0-9]+" GEOSPHERE_TEST_TAG_REGEX: "^test-r[0-9]+_[0-9]+" - GCP_TAG_REGEX: "^gcp-r[0-9]+_[0-9]+" # TESTS_BRANCH_REGEX: "^master" # only run tests for branches in the main repository TESTS_PROJECT_NAMESPACE: "cspp_geo/geosphere" @@ -38,6 +37,7 @@ include: - local: "/ci_geosphere/gitlab-ci.yaml" - local: "/ci_geosphere-test/gitlab-ci.yaml" - local: "/ci_gcp/gitlab-ci.yaml" + - local: "/ci_aws/gitlab-ci.yaml" build ci: diff --git a/README.md b/README.md index ce8780acf12d146c90e6c9730308ee9ed5770ce9..5d193f0463c9fdb0d98f6787a9a7d8aa073ec33f 100644 --- a/README.md +++ b/README.md @@ -102,6 +102,9 @@ and the locations of the other repositories related to GeoSphere. * **admin/**: Kubernetes resources that should be manually installed to define cluster resources available to the main geosphere application(s). These are **not** run as part of continuous integration. +* **ci_aws/**: Helm chart "values" YAML files to configure GeoSphere for + deployment on Amazon Web Services (AWS). This directory also contains + the GitLab CI configurations specific to this deployment. * **ci_gcp/**: Helm chart "values" YAML files to configure GeoSphere for deployment on Google Cloud Platform (GCP). This directory also contains the GitLab CI configurations specific to this deployment. diff --git a/admin/AWS_README.md b/admin/AWS_README.md new file mode 100644 index 0000000000000000000000000000000000000000..b2a6982539d589cfdd3019cd447ae27b29a8a229 --- /dev/null +++ b/admin/AWS_README.md @@ -0,0 +1,104 @@ +# Amazon Web Services - Elastic Kubernetes Service + +This document describes administration procedures for creating and using a +Kubernetes cluster on Amazon Web Services. + +Disclaimer: This document is *NOT* a substitute for the AWS documentation. +Please read their documentation for updated and accurate information. + +## Accounts and Projects + +Accounts related to SSEC work or other University of Wisconsin work must be +created by DoIT. See https://it.wisc.edu/services/amazon-web-services/ for +more information. by working with DoIT you should get access to an AWS account +for your "project". You can then login to the AWS Console by going to the URL +near the top of this page: https://kb.wisc.edu/public-cloud/page.php?id=65490. + +NOTE: Once logged in, be careful not to create resources on AWS unless you +know what you are doing, even in tutorials/examples. Otherwise you may end up +accidentally charging to your business account. + +## Service Accounts and Kubectl + +It is recommended that you make a separate "Identity and Access Management" +(IAM) account for controlling access to specific parts of your project. This +is useful for automating access to your future Kubernetes (EKS) clusters or +other services from Continuous Integration (CI) jobs. Note that there is a +difference between generic AWS IAM accounts and the service accounts used +inside a Kubernetes cluster. + +To create a new service/utility account, go to the AWS Console, click on +"Services" in the top left, and find "IAM" under +"Security, Identity, & Compliance". On the left of the IAM page choose "Users" +and then "Add users". Follow the prompts to create a new user **and** new +group (ex. "geosphere-ci" in an "automated-admins" group). For EKS management +make sure to add the `AmazonEKSClusterPolicy` to the group. + +When you are done creating the user make sure to record the Access Key ID and +Access Key Secret as you'll need that to use the account from CI. You will +likely want to assign these values with `aws configure` and/or the following +environment variables: + +```bash +export AWS_ACCESS_KEY_ID=<access key id> +export AWS_SECRET_ACCESS_KEY=<access secret> +export AWS_DEFAULT_REGION="us-east-2" +``` + +## Creating a cluster + +From the AWS Console, choose the Elastic Kubernetes Service. Enter a unique +descriptive name for the cluster. + +For "Cluster Service Role", switch to the IAM console using the provided link. +See the bottom of this page for information on how to create the role: +https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html + +After setting the role, configure the rest of the cluster settings (defaults +are typically a good start), and create your cluster. + +On the cluster Configuration page, go to "Compute" and create a new node group. +A node group will also need its own role. See the bottom of this page for more +information: +https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html + +You will then get to choose what type of systems will be in this node group. +There are a lot of choices so research will be needed to get the best machine +for your use case. Consider adding Kubernetes labels to distinguish this group +of nodes from others you'll create. For example, "geosphere-node=database" for +nodes that should have databases created on them due to fast network interfaces +and other system resources. + +## Connecting to the cluster remotely + +By default, AWS EKS only gives full permissions to a cluster to the account +that created it. If you created the cluster as described above with the AWS +console then this was likely your NetID account. To allow a service account +to have access to the cluster resources (ex. `kubectl get pods`) you'll need +to edit the `aws-auth` ConfigMap and add your service user. See this page for +more information: +https://aws.amazon.com/premiumsupport/knowledge-center/eks-kubernetes-object-access-error/ + +For full control over the cluster use `system:masters` as one of the groups +assigned to your user. + +You can update your kubectl to easily access the AWS cluster by running: + +```bash +aws eks update-kubeconfig --region $AWS_DEFAULT_REGION --name <aws cluster name> +``` + +## Block Storage + +By default your AWS cluster likely has a single storage class called `gp2`. +You can verify this by running: + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +gp2 (default) kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 40h +``` + +If you want to access other types of storage you'll need to create your own +storage class using the "aws-ebs" provisioner: +https://kubernetes.io/docs/concepts/storage/storage-classes/#aws-ebs diff --git a/ci_aws/geotiff-pvc.yaml b/ci_aws/geotiff-pvc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6dba61334840615e361d348bbd8dd5420d56b785 --- /dev/null +++ b/ci_aws/geotiff-pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: cspp-geo-geo2grid + labels: {} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 4Ti + storageClassName: "gp2" diff --git a/ci_aws/gitlab-ci.yaml b/ci_aws/gitlab-ci.yaml new file mode 100644 index 0000000000000000000000000000000000000000..452656579e287c5c9330c7a1197e6965b5dfcb73 --- /dev/null +++ b/ci_aws/gitlab-ci.yaml @@ -0,0 +1,230 @@ +# This file is included as part of the main repository .gitlab-ci.yml file + +test aws connection: + extends: .helm_based_job + stage: test + # don't need any artifacts for this to run + dependencies: [] + script: + - aws ec2 describe-instances + - if [ "${GEOSPHERE_DEPLOY_AWS_CLUSTER_NAME}" != "" ]; then + kubectl get all; + fi + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + +aws create geotiff storage: + extends: .helm_based_job + stage: create storage + script: + - ns=$(./helpers/get_namespace.sh) + - ./helpers/create_pvc.sh "$ns" "ci_aws/geotiff-pvc.yaml" "cspp-geo-geo2grid" + # this job doesn't actually need any artifacts from previous jobs + dependencies: [] + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + # this will always be true for tags + - changes: + - ci_aws/geotiff-pvc.yaml + - if: $CREATE_STORAGE + +#aws create postgres storage: +# extends: .helm_based_job +# stage: create storage +# script: +# - ns=$(./helpers/get_namespace.sh) +# - ./helpers/create_pvc.sh "$ns" "ci_aws/postgres-pvc.yaml" "geosphere-postgis" +# # this job doesn't actually need any artifacts from previous jobs +# dependencies: [] +# rules: +# - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ +# when: never +# - changes: +# - ci_aws/postgres-pvc.yaml +# - if: $CREATE_STORAGE + +aws create shapefile storage: + extends: .helm_based_job + stage: create storage + script: + - ns=$(./helpers/get_namespace.sh) + - ./helpers/create_pvc.sh "$ns" "ci_aws/shapefiles-pvc.yaml" "geosphere-tile-gen-shapefiles" + # this job doesn't actually need any artifacts from previous jobs + dependencies: [] + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + # this will always be true for tags + - changes: + - ci_aws/shapefiles-pvc.yaml + - if: $CREATE_STORAGE + +aws deploy rabbit: + extends: .helm_based_job + stage: deploy infrastructure + script: + - ./helpers/deploy_rabbitmq.sh ci_aws + # this job doesn't actually need any artifacts from previous jobs + dependencies: [] + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + # no need to build if another project triggered us + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - changes: + - ci_aws/values-geosphere-rabbit.yaml + - if: $DEPLOY_RABBIT + +aws deploy postgres: + extends: .helm_based_job + stage: deploy infrastructure + script: + - ./helpers/deploy_postgis.sh ci_aws + # this job doesn't actually need any artifacts from previous jobs + dependencies: [] + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + # no need to build if another project triggered us + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - changes: + - ci_aws/values-postgis.yaml + - if: $DEPLOY_POSTGIS + +aws deploy grb: + extends: .helm_based_job + stage: deploy GRB + script: + - ns=$(./helpers/get_namespace.sh) + - cd geosphere-grb/chart + - source cspp-geo-grb/cibuild.env + # copy private ssh key to the chart for inclusion in the secret + - cp $GRB_PROXY_SSH_KEY cspp-geo-grb/secrets/grb_ssh_proxy_rsa + - echo "Deploying version $docker_tag to cluster namespace $ns" + - helm upgrade -v 2 --install --namespace $ns -f ../../ci_aws/values-grb-g16.yaml cspp-geo-grb cspp-geo-grb/ + dependencies: + - get_chart_grb + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + +aws deploy geo2grid g16 radf: + extends: .deploy_geo2grid + variables: + VALUES_DIR: "ci_aws" + DEPLOY_SUFFIX: "-g16-radf" + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + +aws deploy geo2grid g16 radc: + extends: .deploy_geo2grid + variables: + VALUES_DIR: "ci_aws" + DEPLOY_SUFFIX: "-g16-radc" + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + +aws deploy geo2grid g16 radm1: + extends: .deploy_geo2grid + variables: + VALUES_DIR: "ci_aws" + DEPLOY_SUFFIX: "-g16-radm1" + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + +aws deploy geo2grid g16 radm2: + extends: .deploy_geo2grid + variables: + VALUES_DIR: "ci_aws" + DEPLOY_SUFFIX: "-g16-radm2" + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + +aws deploy tile gen g16 radf: + extends: .deploy_tile_gen + variables: + VALUES_DIR: "ci_aws" + DEPLOY_SUFFIX: "-g16-radf" + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + +aws deploy tile gen g16 radc: + extends: .deploy_tile_gen + variables: + VALUES_DIR: "ci_aws" + DEPLOY_SUFFIX: "-g16-radc" + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + +aws deploy tile gen g16 radm1: + extends: .deploy_tile_gen + variables: + VALUES_DIR: "ci_aws" + DEPLOY_SUFFIX: "-g16-radm1" + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + +aws deploy tile gen g16 radm2: + extends: .deploy_tile_gen + variables: + VALUES_DIR: "ci_aws" + DEPLOY_SUFFIX: "-g16-radm2" + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + +aws deploy mapserver: + variables: + VALUES_DIR: "ci_aws" + extends: .deploy_mapserver + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + +aws deploy mapcache: + variables: + VALUES_DIR: "ci_aws" + extends: .deploy_mapcache + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + +aws deploy client: + extends: .helm_based_job + stage: deploy Client + script: + - ns=$(./helpers/get_namespace.sh) + - cd geosphere-client/chart + - source geosphere-client/cibuild.env + - echo "Deploying version $docker_tag to cluster namespace $ns" + - helm upgrade -v 2 --kubeconfig $HOME/.kube/config --install --namespace $ns -f ../../ci_aws/values-client.yaml geosphere-client geosphere-client/ + dependencies: + - get_chart_client_test + rules: + - if: $CI_COMMIT_TAG !~ /^aws-r[0-9]+_[0-9]+/ + when: never + - when: on_success + diff --git a/ci_aws/postgres-pvc.yaml b/ci_aws/postgres-pvc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1dd74aa73eb33bcceb295a77d4d674ecb982c6cd --- /dev/null +++ b/ci_aws/postgres-pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: geosphere-postgis + labels: {} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8Gi + storageClassName: "gp2" diff --git a/ci_aws/shapefiles-pvc.yaml b/ci_aws/shapefiles-pvc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee055f15baabe15863a37c26c2bdf238200faac5 --- /dev/null +++ b/ci_aws/shapefiles-pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: geosphere-tile-gen-shapefiles + labels: {} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: "gp2" diff --git a/ci_aws/values-client.yaml b/ci_aws/values-client.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0e2e364aa7cc8c6c0e567119ba18dd27b707ba36 --- /dev/null +++ b/ci_aws/values-client.yaml @@ -0,0 +1,17 @@ +service: + type: LoadBalancer +ingress: + enabled: true +# annotations: +# nginx.ingress.kubernetes.io/ssl-redirect: "false" +# ingress.kubernetes.io/ssl-redirect: "true" + hosts: + - host: "" + paths: ["/"] +# tls: +# - hosts: +# - "geosphere.ssec.wisc.edu" +# secretName: "geosphere-tls-certs" + +infoServer: "http://35.232.126.151" +tileServer: "http://35.184.188.7/mapcache/wmts" diff --git a/ci_aws/values-geo2grid-g16-radc.yaml b/ci_aws/values-geo2grid-g16-radc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8772dc1446006261bacde7f8aa8738bf5194a094 --- /dev/null +++ b/ci_aws/values-geo2grid-g16-radc.yaml @@ -0,0 +1,24 @@ +rabbitIn: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" + topic: "data.goes.g16.abi.radc.l1b.netcdf.all.complete" +rabbitOut: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" +source: +# s3Endpoint: "http://geosphere-minio:9000" + existingClaim: "cspp-geo-grb" +destination: + persistence: + existingClaim: "cspp-geo-geo2grid" + # radf will take care of cleaning up this shared claim + cleanup: + enabled: false +# s3Endpoint: "http://geosphere-minio:9000" + s3Secret: "geosphere-minio" + s3AccessKey: "accesskey" + s3SecretKey: "secretkey" +nodeSelector: + geosphere-node: compute diff --git a/ci_aws/values-geo2grid-g16-radf.yaml b/ci_aws/values-geo2grid-g16-radf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec0fbbd07b86b38214aea44540c25d2644d4285e --- /dev/null +++ b/ci_aws/values-geo2grid-g16-radf.yaml @@ -0,0 +1,23 @@ +rabbitIn: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" + topic: "data.goes.g16.abi.radf.l1b.netcdf.all.complete" +rabbitOut: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" +source: +# s3Endpoint: "http://geosphere-minio:9000" + existingClaim: "cspp-geo-grb" +destination: + persistence: + existingClaim: "cspp-geo-geo2grid" + cleanup: + age: "+1" +# s3Endpoint: "http://geosphere-minio:9000" + s3Secret: "geosphere-minio" + s3AccessKey: "accesskey" + s3SecretKey: "secretkey" +nodeSelector: + geosphere-node: compute diff --git a/ci_aws/values-geo2grid-g16-radm1.yaml b/ci_aws/values-geo2grid-g16-radm1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1a360af39ae41d6e0db139ddba981fdc5bc413ee --- /dev/null +++ b/ci_aws/values-geo2grid-g16-radm1.yaml @@ -0,0 +1,24 @@ +rabbitIn: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" + topic: "data.goes.g16.abi.radm1.l1b.netcdf.all.complete" +rabbitOut: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" +source: +# s3Endpoint: "http://geosphere-minio:9000" + existingClaim: "cspp-geo-grb" +destination: + persistence: + existingClaim: "cspp-geo-geo2grid" + # radf will take care of cleaning up this shared claim + cleanup: + enabled: false +# s3Endpoint: "http://geosphere-minio:9000" + s3Secret: "geosphere-minio" + s3AccessKey: "accesskey" + s3SecretKey: "secretkey" +nodeSelector: + geosphere-node: compute diff --git a/ci_aws/values-geo2grid-g16-radm2.yaml b/ci_aws/values-geo2grid-g16-radm2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f09c6deba51e020bd743fdf7b76e9d26ce462593 --- /dev/null +++ b/ci_aws/values-geo2grid-g16-radm2.yaml @@ -0,0 +1,24 @@ +rabbitIn: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" + topic: "data.goes.g16.abi.radm2.l1b.netcdf.all.complete" +rabbitOut: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" +source: +# s3Endpoint: "http://geosphere-minio:9000" + existingClaim: "cspp-geo-grb" +destination: + persistence: + existingClaim: "cspp-geo-geo2grid" + # radf will take care of cleaning up this shared claim + cleanup: + enabled: false +# s3Endpoint: "http://geosphere-minio:9000" + s3Secret: "geosphere-minio" + s3AccessKey: "accesskey" + s3SecretKey: "secretkey" +nodeSelector: + geosphere-node: compute diff --git a/ci_aws/values-geosphere-rabbit.yaml b/ci_aws/values-geosphere-rabbit.yaml new file mode 100644 index 0000000000000000000000000000000000000000..099686a9be4adfeb0f5063388edf019f4231e7d2 --- /dev/null +++ b/ci_aws/values-geosphere-rabbit.yaml @@ -0,0 +1,14 @@ +#rabbitmq: +# configuration: |- +# ## Clustering +# cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s +# cluster_formation.k8s.host = kubernetes.default.svc.cluster.local +# cluster_formation.node_cleanup.interval = 10 +# cluster_formation.node_cleanup.only_log_warning = true +# cluster_partition_handling = autoheal +# # queue master locator +# queue_master_locator=min-masters +# # enable guest user +# loopback_users.guest = false +#rbac: +# create: false diff --git a/ci_aws/values-grb-g16.yaml b/ci_aws/values-grb-g16.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c3a624c6ceef720e3b412afddf9db664431e55da --- /dev/null +++ b/ci_aws/values-grb-g16.yaml @@ -0,0 +1,22 @@ +rabbitOut: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" +apidFilter: "GOES-16-ABI-Only.xml" +fanoutServer: "fanout1" +sshProxy: "ash.ssec.wisc.edu" +sshUser: "davidh" +sshPrivateKeyFile: "secrets/grb_ssh_proxy_rsa" +leftPort: 50060 +rightPort: 50070 +uploadDst: "/dst" +persistence: + enabled: true + size: 500Gi + storageClass: "standard" +#rabbitOut: +# host: "geosphere-rabbit-rabbitmq" +# username: "user" +# passwordSecret: "geosphere-rabbit-rabbitmq" +nodeSelector: + geosphere-node: compute diff --git a/ci_aws/values-mapcache.yaml b/ci_aws/values-mapcache.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f74c985a59d5c037fda3377a4651026c2991fbba --- /dev/null +++ b/ci_aws/values-mapcache.yaml @@ -0,0 +1,34 @@ +rabbitIn: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" + topic: "data.goes.g16.abi.*.l1b.tiles.*.complete" +wms: + host: "geosphere-mapserver" +cache: + persistence: + enabled: true + storageClass: "gp2" + size: 350Gi + cleanup: + schedule: "0 */6 * * *" + age: "+2" +database: + postgresHost: "geosphere-postgis-postgresql" + postgresPort: 5432 + postgresDatabaseName: "postgres" + postgresUser: "postgres" + postgresPasswordSecret: "geosphere-postgis-postgresql" +seed: + images: false + overlays: true + seedArgs: "-z 0,3" +service: + type: LoadBalancer +ingress: + enabled: true + hosts: + - host: "" + paths: ["/mapcache"] +nodeSelector: + geosphere-node: cache diff --git a/ci_aws/values-mapserver.yaml b/ci_aws/values-mapserver.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6549ca5a48281393cb5701a95b6b5421ff9b3e83 --- /dev/null +++ b/ci_aws/values-mapserver.yaml @@ -0,0 +1,23 @@ +imageSource: + # s3Endpoint: "http://geosphere-minio:9000" + existingClaim: "cspp-geo-geo2grid" +tileSource: + existingClaim: "geosphere-tile-gen-shapefiles" +database: + postgresHost: "geosphere-postgis-postgresql" + postgresPort: 5432 + postgresDatabaseName: "postgres" + postgresUser: "postgres" + postgresPasswordSecret: "geosphere-postgis-postgresql" +service: + type: LoadBalancer +ingress: + enabled: true + hosts: + - host: "" + paths: ["/wms", "/wms_times"] + # the test site uses the production WMS server +# - host: geosphere-test.ssec.wisc.edu +# paths: ["/wms", "/wms_times"] +nodeSelector: + geosphere-node: compute diff --git a/ci_aws/values-postgis.yaml b/ci_aws/values-postgis.yaml new file mode 100644 index 0000000000000000000000000000000000000000..61ff6dd9da067a09aba1a08306ed05fcdf0075a6 --- /dev/null +++ b/ci_aws/values-postgis.yaml @@ -0,0 +1,12 @@ +postgresqlDatabase: "postgres" +postgresqlExtendedConf: + sharedBuffers: "512MB" +persistence: + enabled: false +# existingClaim: "geosphere-postgis" +metrics: + enabled: false +primary: + nodeSelector: + geosphere-node: database + diff --git a/ci_aws/values-tile-gen-g16-radc.yaml b/ci_aws/values-tile-gen-g16-radc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a2abc33626af9016fdacfa6bc5b2b01169c9c93f --- /dev/null +++ b/ci_aws/values-tile-gen-g16-radc.yaml @@ -0,0 +1,25 @@ +rabbitIn: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" + topic: "data.goes.g16.abi.radc.l1b.geotiff.all.complete" +rabbitOut: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" +source: + existingClaim: "cspp-geo-geo2grid" +destination: + # s3Endpoint: "http://geosphere-minio:9000" + persistence: + enabled: true + storageClass: "gp2" + existingClaim: "geosphere-tile-gen-shapefiles" +database: + postgresHost: "geosphere-postgis-postgresql" + postgresPort: 5432 + postgresDatabaseName: "postgres" + postgresUser: "postgres" + postgresPasswordSecret: "geosphere-postgis-postgresql" +nodeSelector: + geosphere-node: compute diff --git a/ci_aws/values-tile-gen-g16-radf.yaml b/ci_aws/values-tile-gen-g16-radf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4ed260db2a4857a246e775602cac16322adfed71 --- /dev/null +++ b/ci_aws/values-tile-gen-g16-radf.yaml @@ -0,0 +1,25 @@ +rabbitIn: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" + topic: "data.goes.g16.abi.radf.l1b.geotiff.all.complete" +rabbitOut: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" +source: + existingClaim: "cspp-geo-geo2grid" +destination: + # s3Endpoint: "http://geosphere-minio:9000" + persistence: + enabled: true + storageClass: "gp2" + existingClaim: "geosphere-tile-gen-shapefiles" +database: + postgresHost: "geosphere-postgis-postgresql" + postgresPort: 5432 + postgresDatabaseName: "postgres" + postgresUser: "postgres" + postgresPasswordSecret: "geosphere-postgis-postgresql" +nodeSelector: + geosphere-node: compute diff --git a/ci_aws/values-tile-gen-g16-radm1.yaml b/ci_aws/values-tile-gen-g16-radm1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ef8e35253b46116a3bf9e06ad2f4ffe42ce3c451 --- /dev/null +++ b/ci_aws/values-tile-gen-g16-radm1.yaml @@ -0,0 +1,25 @@ +rabbitIn: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" + topic: "data.goes.g16.abi.radm1.l1b.geotiff.all.complete" +rabbitOut: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" +source: + existingClaim: "cspp-geo-geo2grid" +destination: + # s3Endpoint: "http://geosphere-minio:9000" + persistence: + enabled: true + storageClass: "gp2" + existingClaim: "geosphere-tile-gen-shapefiles" +database: + postgresHost: "geosphere-postgis-postgresql" + postgresPort: 5432 + postgresDatabaseName: "postgres" + postgresUser: "postgres" + postgresPasswordSecret: "geosphere-postgis-postgresql" +nodeSelector: + geosphere-node: compute diff --git a/ci_aws/values-tile-gen-g16-radm2.yaml b/ci_aws/values-tile-gen-g16-radm2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6368876f17f1d94c295725768ca8ffbebc94decd --- /dev/null +++ b/ci_aws/values-tile-gen-g16-radm2.yaml @@ -0,0 +1,25 @@ +rabbitIn: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" + topic: "data.goes.g16.abi.radm2.l1b.geotiff.all.complete" +rabbitOut: + host: "geosphere-rabbit-rabbitmq" + username: "user" + passwordSecret: "geosphere-rabbit-rabbitmq" +source: + existingClaim: "cspp-geo-geo2grid" +destination: + # s3Endpoint: "http://geosphere-minio:9000" + persistence: + enabled: true + storageClass: "gp2" + existingClaim: "geosphere-tile-gen-shapefiles" +database: + postgresHost: "geosphere-postgis-postgresql" + postgresPort: 5432 + postgresDatabaseName: "postgres" + postgresUser: "postgres" + postgresPasswordSecret: "geosphere-postgis-postgresql" +nodeSelector: + geosphere-node: compute diff --git a/ci_tests/gitlab-ci.yaml b/ci_tests/gitlab-ci.yaml index 80fc5b8863eb1903e7c9ffeb2453c0f06bdbeb19..4f711e0b624336e46a0b26fab7f138da41dcb1ae 100644 --- a/ci_tests/gitlab-ci.yaml +++ b/ci_tests/gitlab-ci.yaml @@ -24,6 +24,8 @@ test_basic_grb: rules: - if: $CI_COMMIT_TAG =~ /^gcp-r[0-9]+_[0-9]+/ when: never + - if: $CI_COMMIT_TAG =~ /^aws-r[0-9]+_[0-9]+/ + when: never - when: on_success test_basic_pg: @@ -43,4 +45,6 @@ test_basic_pg: rules: - if: $CI_COMMIT_TAG =~ /^gcp-r[0-9]+_[0-9]+/ when: never + - if: $CI_COMMIT_TAG =~ /^aws-r[0-9]+_[0-9]+/ + when: never - when: on_success diff --git a/helpers/build_image.yaml b/helpers/build_image.yaml index 89fe09cc68f341292de177371043f49e3d98481a..2674e79dbd6248eb037f1cad0521692fd8aa1a35 100644 --- a/helpers/build_image.yaml +++ b/helpers/build_image.yaml @@ -14,7 +14,7 @@ # IMAGE_DIR: "cspp_geo_grb" .build_image: extends: .docker_based_job - image: gitlab.ssec.wisc.edu:5555/cspp_geo/geosphere/gcloud-kubectl-helm/gcloud-kubectl-helm:6d3e308b + image: gitlab.ssec.wisc.edu:5555/cspp_geo/geosphere/gcloud-kubectl-helm/gcloud-kubectl-helm:f850fd5c variables: IMAGE_NAME: "" IMAGE_DIR: "" diff --git a/helpers/helm_base.yaml b/helpers/helm_base.yaml index 0e2ace4ed620a3615c42528d3c244fbfb58d7210..9d347e34ca05964c0577a301683826bea14b46ca 100644 --- a/helpers/helm_base.yaml +++ b/helpers/helm_base.yaml @@ -1,7 +1,6 @@ .helm_based_job: extends: .docker_based_job -# image: gitlab.ssec.wisc.edu:5555/cspp_geo/geosphere/geosphere-deploy/cichart:latest - image: gitlab.ssec.wisc.edu:5555/cspp_geo/geosphere/gcloud-kubectl-helm/gcloud-kubectl-helm:6d3e308b + image: gitlab.ssec.wisc.edu:5555/cspp_geo/geosphere/gcloud-kubectl-helm/gcloud-kubectl-helm:f850fd5c before_script: - export KUBECONFIG="$kubekorner_k3s_config" - export HELM_EXPERIMENTAL_OCI="1" @@ -17,4 +16,19 @@ gcloud --verbosity=debug config set project "${GEOSPHERE_DEPLOY_GCP_PROJECT_NAME}"; gcloud --verbosity=debug container clusters get-credentials "${GEOSPHERE_DEPLOY_GCP_CLUSTER_NAME}" --zone "${GEOSPHERE_DEPLOY_GCP_ZONE_NAME}"; fi + - if [[ $CI_COMMIT_TAG =~ ^aws-r[0-9]+_[0-9]+ ]]; then + export KUBECONFIG="/root/.kube/config"; + export AWS_ACCESS_KEY_ID=$GEOSPHERE_DEPLOY_AWS_ACCESS_KEY_ID; + export AWS_SECRET_ACCESS_KEY=$GEOSPHERE_DEPLOY_AWS_SECRET_ACCESS_KEY; + export AWS_DEFAULT_REGION=$GEOSPHERE_DEPLOY_AWS_DEFAULT_REGION; + aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID"; + aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY"; + aws configure set default.region "$AWS_DEFAULT_REGION"; + aws sts get-caller-identity; + if [ "${GEOSPHERE_DEPLOY_AWS_CLUSTER_NAME}" != "" ]; then + aws eks --region $GEOSPHERE_DEPLOY_AWS_DEFAULT_REGION update-kubeconfig --name $GEOSPHERE_DEPLOY_AWS_CLUSTER_NAME; + kubectl config set-context $(kubectl config current-context) --namespace=default; + kubectl config view --minify; + fi; + fi - helm registry login -u ${CI_REGISTRY_USER} -p ${CI_JOB_TOKEN} ${CI_REGISTRY} diff --git a/tag_release.sh b/tag_release.sh index 8880c264937789c665a8de7402aaea3b6c4d2676..26e27ac9110d8953b8413eedfd2f2b66d5a163ee 100755 --- a/tag_release.sh +++ b/tag_release.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash if [[ $# -ne 1 ]]; then - echo "Usage: ./tag_and_release.sh geosphere|geosphere-test|gcp" + echo "Usage: ./tag_and_release.sh geosphere|geosphere-test|gcp|aws" exit 1 fi @@ -12,8 +12,10 @@ elif [[ $deploy_id == "geosphere-test" ]]; then tag_prefix="test-" elif [[ $deploy_id == "gcp" ]]; then tag_prefix="gcp-" +elif [[ $deploy_id == "aws" ]]; then + tag_prefix="aws-" else - echo "Unrecognized deploy target: $deploy_id. Should be one of 'geosphere, geosphere-test, gcp'" + echo "Unrecognized deploy target: $deploy_id. Should be one of 'geosphere, geosphere-test, gcp, aws'" exit 2 fi