diff --git a/bin/start_kube.sh b/bin/start_kube.sh index 254ae7f1..680fce1a 100755 --- a/bin/start_kube.sh +++ b/bin/start_kube.sh @@ -146,14 +146,18 @@ sleep 5 # # TODO: Integrate this into the mainline along with logic to work with/without #80 # -# Hack to deploy our secret.... +# This logic takes the JWT and transforms it into a secret so we can pull the NGINX Plus IC. If the user is not +# deploying plus (and does not have a JWT) we create a placeholder credential that is used to create a secert. That +# secret is not a valid secret, but it is created to make the logic easier to read/code. +# if [[ -s "${script_dir}/../extras/jwt.token" ]]; then JWT=$(cat ${script_dir}/../extras/jwt.token) echo "Loading JWT into nginx-ingress/regcred" ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=${JWT} --docker-password=none -n nginx-ingress --dry-run=client -o yaml > ${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml else # TODO: need to adjust so we can deploy from an unauthenticated registry (IC OSS) #81 - echo "No JWT found; this will likely fail" + echo "No JWT found; writing placeholder manifest" + ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=placeholder --docker-password=placeholder -n nginx-ingress --dry-run=client -o yaml > ${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml fi # Check for stack info.... diff --git a/docs/getting_started.md b/docs/getting_started.md index fd7e1835..ebf3347d 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -85,8 +85,8 @@ you are not building from source, you do not need to install `make`. By default, Docker is required because the Ingress Controller is a Docker image and needs Docker to generate the image. -**NOTE**: The kubeconfig deployment option currently requires that a JWT token be used to pull the NGINX plus image from -the nginx.com repository. This will be updated in a future release. +**NOTE**: The kubeconfig deployment option currently only allows you to deploy from a registry. This allows you to +deploy the NGINX IC or the NGINX Plus IC (with a JWT from your F5 account) #### Kubernetes diff --git a/docs/status-and-issues.md b/docs/status-and-issues.md index ed4fe73b..43b2ec00 100644 --- a/docs/status-and-issues.md +++ b/docs/status-and-issues.md @@ -20,21 +20,23 @@ includes the following: All of these configurations use Pulumi code within Python as the Infrastructure as Code (IaC) manager. -| K8 Provider | Tested | Infrastructure Support | IC Options | FQDN/IP | Notes | -|-----------------|--------|-----------------------------|-------------------------|-----------------|--------------------------------------------------| -| AWS EKS | Yes | Full Infrastructure Standup | Build, Pull (uses ECR) | Provided | | -| Azure AKS | Yes | Kubeconfig Only (3) | NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| Google GKE | Yes | Kubeconfig Only (3) | NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| MicroK8s | Yes | Kubeconfig Only (3) | NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Storage, DNS, and Metallb need to be Enabled (4) | -| Harvester/RKE2 | Yes | Kubeconfig Only (3) | NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | -| K3S | Yes | Kubeconfig Only (3) | NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | -| Rancher Desktop | No | Kubeconfig Only (3) | NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | -| Minikube | No | Kubeconfig Only (3) | NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | +| K8 Provider | Tested | Infrastructure Support | IC Options | FQDN/IP | Notes | +|-----------------|--------|-----------------------------|---------------------------------|-----------------|--------------------------------------------------| +| AWS EKS | Yes | Full Infrastructure Standup | Build, Pull (uses ECR) | Provided | | +| Azure AKS | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | +| Google GKE | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | +| MicroK8s | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Storage, DNS, and Metallb need to be Enabled (4) | +| Harvester/RKE2 | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | +| K3S | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | +| Rancher Desktop | No | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | +| Minikube | No | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | ### Notes: 1. The NGINX IC build/deploy process is currently under active development and support for IC will be standardized across all providers. Follow [#81](https://github.com/nginxinc/kic-reference-architectures/issues/81) and -[#86](https://github.com/nginxinc/kic-reference-architectures/issues/86) for details. +[#86](https://github.com/nginxinc/kic-reference-architectures/issues/86) for details. Currently, for all non-AWS environments +you have the option to specify either NGINX or NGINX Plus as your IC. The later does require an active subscription and a +JWT to be included at build time. Please see the documentation for more details. 2. The process via which the IP and FQDN are created and used is currently under active development, and will be streamlined and standardized for all providers. Follow [#82](https://github.com/nginxinc/kic-reference-architectures/issues/82) for details. diff --git a/pulumi/python/README.md b/pulumi/python/README.md index 90841741..2256dec4 100644 --- a/pulumi/python/README.md +++ b/pulumi/python/README.md @@ -1,6 +1,6 @@ # MARA: Pulumi / Python -This directory contains the +This directory contains the This project illustrates the end to end stand up of an AWS VPC cluster, Elastic Kubernetes Service (EKS), NGINX Kubernetes Ingress Controller (KIC), and a sample application using [Pulumi](https://www.pulumi.com/). The project is @@ -17,8 +17,9 @@ For instructions on running the project, refer to the ## Project Structure ### Top Level -There are several directories located at the root of the project which are used; these are at the project root -because they are intended to be outside the specific IaC providers (ie, for example to be used for a port to Terraform). + +There are several directories located at the root of the project which are used; these are at the project root because +they are intended to be outside the specific IaC providers (ie, for example to be used for a port to Terraform). ``` ├── bin @@ -29,17 +30,18 @@ because they are intended to be outside the specific IaC providers (ie, for exam └── extras ``` -- The [`bin`](../../bin) directory contains all the binaries and scripts that are used to start/stop the project, as well -as perform capabilities testing and deployment of extra functionality. +- The [`bin`](../../bin) directory contains all the binaries and scripts that are used to start/stop the project, as + well as perform capabilities testing and deployment of extra functionality. - The [`config`](../../config) directory holds the `requirements.txt` for the venv needed for this project. - The [`config/pulumi`](../../config/pulumi) directory holds the configuration files for deployments, as well as a -reference configuration that illustrates the available configuration options and their defaults. -- The [`docker`](../../docker) directory contains Dockerfiles and a script to build a docker-based deployment image that -contains all the tooling necessary to deploy MARA. + reference configuration that illustrates the available configuration options and their defaults. +- The [`docker`](../../docker) directory contains Dockerfiles and a script to build a docker-based deployment image that + contains all the tooling necessary to deploy MARA. - The [`docs`](../../docs) directory contains all documentation relevant to the overall project. - The [`extras`](../../extras) directory contains additional scripts, notes, and configurations. ### Pulumi/Python Level + This directory contains all the Pulumi/Python based logic, which currently consists of the following: ``` @@ -74,97 +76,100 @@ This directory contains all the Pulumi/Python based logic, which currently consi └── src ``` - -- The [`config`](./config) directory contains files used by Pulumi to manage the configuration for this project. Note that -this directory is essentially a redirect to the project-wide [`config`](../../config/pulumi) directory. -- The [`infrastructure`](./infrastructure) directory contains files used to stand up Kubernetes as well as to provide a -common project for all of the infrastructure and kubeconfig based clusters. -- The [`kubernetes`](./kubernetes) directory contains all of the kubernetes based deployments; there are two key subdirectories -in this directory: - - The [`nginx`](./kubernetes/nginx) directory contains all NGNIX products. - - The [`applications`](./kubernetes/applications) directory contains all applications that have been tested for deployment with MARA. -- The [`tools`](./tools) directory contains projects that are used with the `kubernetes-extras.sh` script found in the bin -directory. -- The [`utility`](./utility) directory contains the code used to build/pull/push the NGNIX Ingress Controller, and other -projects used to support the environment. +- The [`config`](./config) directory contains files used by Pulumi to manage the configuration for this project. Note + that this directory is essentially a redirect to the project-wide [`config`](../../config/pulumi) directory. +- The [`infrastructure`](./infrastructure) directory contains files used to stand up Kubernetes as well as to provide a + common project for all of the infrastructure and kubeconfig based clusters. +- The [`kubernetes`](./kubernetes) directory contains all of the kubernetes based deployments; there are two key + subdirectories in this directory: + - The [`nginx`](./kubernetes/nginx) directory contains all NGNIX products. + - The [`applications`](./kubernetes/applications) directory contains all applications that have been tested for + deployment with MARA. +- The [`tools`](./tools) directory contains projects that are used with the `kubernetes-extras.sh` script found in the + bin directory. +- The [`utility`](./utility) directory contains the code used to build/pull/push the NGNIX Ingress Controller, and other + projects used to support the environment. - The [`venv/bin`](./venv/bin) directory contains the virtual environment for Python along with some key utilities, such -as `pulumi`, `kubectl`, and `node`. + as `pulumi`, `kubectl`, and `node`. ## Configuration -The Pulumi configuration files are in the [`config`](../../config/pulumi) directory. Pulumi's configuration files use the following -naming convention: +The Pulumi configuration files are in the [`config`](../../config/pulumi) directory. Pulumi's configuration files use +the following naming convention: `Pulumi..yaml`. To create a new configuration file for your Pulumi stack, create a new file with a name that -include the stack name. Then, refer to the sample [configuration file](../../config/pulumi/Pulumi.stackname.yaml.example) +include the stack name. Then, refer to the +sample [configuration file](../../config/pulumi/Pulumi.stackname.yaml.example) for configuration entries that you want to customize and copy over the entries that you want to modify from their defaults. ### AWS + The following directories are specific to AWS. #### VPC -Contained within the [`vpc`](./infrastructure/aws/vpc) directory is the first Pulumi project which is responsible for setting up the VPC -and subnets used by EKS. The project is built such that it will attempt to create a subnet for each availability zone -within the running region. You may want to customize this behavior, or the IP addressing scheme used. +Contained within the [`vpc`](./infrastructure/aws/vpc) directory is the first Pulumi project which is responsible for +setting up the VPC and subnets used by EKS. The project is built such that it will attempt to create a subnet for each +availability zone within the running region. You may want to customize this behavior, or the IP addressing scheme used. #### Elastic Kubernetes Service (EKS) -Located within the [`eks`](./infrastructure/aws/eks) directory is a project used to stand up a new EKS cluster on AWS. This project reads -data from the previously executed VPC project using its vpc id and subnets. In this project you may want to customize -the `instance_type`, `min_size`, or `max_size` parameters provided to the cluster. +Located within the [`eks`](./infrastructure/aws/eks) directory is a project used to stand up a new EKS cluster on AWS. +This project reads data from the previously executed VPC project using its vpc id and subnets. In this project you may +want to customize the `instance_type`, `min_size`, or `max_size` parameters provided to the cluster. #### Elastic Container Registry (ECR) -The [`ecr`](./infrastructure/aws/ecr) project is responsible for installing and configuring ECR for use with the previously created EKS -cluster. +The [`ecr`](./infrastructure/aws/ecr) project is responsible for installing and configuring ECR for use with the +previously created EKS cluster. ### NGINX Ingress Controller Docker Image Build -Within the [`kic-image-build`](./utility/kic-image-build) directory, there is a Pulumi project that will allow you to build a -new NGINX Kubernetes Ingress Controller from source. Download of source, compilation, and image creation are fully -automated. This project can be customized to build different flavors of KIC. +Within the [`kic-image-build`](./utility/kic-image-build) directory, there is a Pulumi project that will allow you to +build a new NGINX Kubernetes Ingress Controller from source. Download of source, compilation, and image creation are +fully automated. This project can be customized to build different flavors of KIC. ### NGINX Ingress Controller Docker Image Push -Within the [`kic-image-push`](./utility/kic-image-push) directory, there is a Pulumi project that will allow you to push the -previously created KIC Docker image to ECR in a fully automated manner. +Within the [`kic-image-push`](./utility/kic-image-push) directory, there is a Pulumi project that will allow you to push +the previously created KIC Docker image to ECR in a fully automated manner. ### NGINX Ingress Controller Helm Chart -In the [`ingress-contoller`](./kubernetes/nginx/ingress-controller) directory, you will find the Pulumi project responsible for installing the -NGINX Ingress Controller. You may want to customize this project to allow for deploying different versions of KIC. This -chart is only used for AWS deployments. All other deployments use the [`ingress-controller-repo-only`](./kubernetes/nginx/ingress-controller-repo-only) -directory, which at this time **only allows the use of deployments from the NGINX repo with a JWT**. +In the [`ingress-contoller`](./kubernetes/nginx/ingress-controller) directory, you will find the Pulumi project +responsible for installing the NGINX Ingress Controller. You may want to customize this project to allow for deploying +different versions of KIC. This chart is only used for AWS deployments. All other deployments use +the [`ingress-controller-repo-only`](./kubernetes/nginx/ingress-controller-repo-only) +directory, which at this time **only allows the use of deployments from the NGINX repo - either NGINX IC or NGINX Plus +IC (with a JWT)**. A sample config-map is provided in the Pulumi deployment code; this code will adjust the logging format to approximate the upstream NGINX KIC project which will allow for easier ingestion into log storage and processing systems. -Note that this deployment uses the GA Ingress APIs; this has been tested with helm chart version 0.11.1 and NGINX KIC 2.0.2. -Older versions of the KIC and helm charts can be used, but care should be taken to ensure that the helm chart version used -is compatible with the KIC version. This information can be found in the +Note that this deployment uses the GA Ingress APIs; this has been tested with helm chart version 0.11.1 and NGINX KIC +2.0.2. Older versions of the KIC and helm charts can be used, but care should be taken to ensure that the helm chart +version used is compatible with the KIC version. This information can be found in the [NGINX KIC Release Notes](https://docs.nginx.com/nginx-ingress-controller/releases/) for each release. #### Ingress API Versions and NGINX KIC -Starting with Kubernetes version 1.22, support for the Ingress Beta API `networking.k8s.io/v1beta` will be dropped requiring -use of the GA Ingress API `networking.k8s.io/v1`. However, Kubernetes versions 1.19 through 1.21 allows these two API versions -to coexist and maintains compatibility for consumers of the API, meaning that the API will respond correctly to calls to either -the `v1beta` and/or `v1` routes. +Starting with Kubernetes version 1.22, support for the Ingress Beta API `networking.k8s.io/v1beta` will be dropped +requiring use of the GA Ingress API `networking.k8s.io/v1`. However, Kubernetes versions 1.19 through 1.21 allows these +two API versions to coexist and maintains compatibility for consumers of the API, meaning that the API will respond +correctly to calls to either the `v1beta` and/or `v1` routes. -This project uses the NGINX KIC v2.x releases which includes full support for the GA APIs. -do not use the +This project uses the NGINX KIC v2.x releases which includes full support for the GA APIs. do not use the ### Log Store -In the [`logstore`](./kubernetes/logstore) directory, you will find the Pulumi project responsible for installing your log store. -The current solution deploys +In the [`logstore`](./kubernetes/logstore) directory, you will find the Pulumi project responsible for installing your +log store. The current solution deploys [Elasticsearch and Kibana](https://www.elastic.co/elastic-stack) using the [Bitnami Elasticsearch](https://bitnami.com/stack/elasticsearch/helm) chart. This solution can be swapped for other options as desired. This application is deployed to the `logstore` -namespace. There are several configuration options available in the configuration file for the project in order to better -tailor this deployment to the size of the cluster being used. +namespace. There are several configuration options available in the configuration file for the project in order to +better tailor this deployment to the size of the cluster being used. #### Notes @@ -206,50 +211,52 @@ deployment. ### Prometheus -Prometheus is deployed and configured to enable the collection of metrics for all components that have -a defined service monitor. At installation time, the deployment will instantiate: +Prometheus is deployed and configured to enable the collection of metrics for all components that have a defined service +monitor. At installation time, the deployment will instantiate: + - Node Exporters - Kubernetes Service Monitors - Grafana preloaded with dashboards and datasources for Kubernetes management - The NGINX Ingress Controller - Statsd receiver -The former behavior of using the `prometheus.io:scrape: true` property set in the annotations -indicating pods where metrics should be scraped has been deprecated, and these annotations will -be removed in the near future. +The former behavior of using the `prometheus.io:scrape: true` property set in the annotations indicating pods where +metrics should be scraped has been deprecated, and these annotations will be removed in the near future. + +Also, the standalone Grafana deployment has been removed from the standard deployment scripts, as it is installed as +part of this project. -Also, the standalone Grafana deployment has been removed from the standard deployment scripts, as it is installed -as part of this project. +Finally, this namespace will hold service monitors created by other projects, for example the Bank of Sirius deployment +currently deploys a service monitor for each of the postgres monitors that are deployed. -Finally, this namespace will hold service monitors created by other projects, for example the Bank of Sirius -deployment currently deploys a service monitor for each of the postgres monitors that are deployed. +Notes: -Notes: 1. The NGINX IC needs to be configured to expose prometheus metrics; this is currently done by default. -2. The default address binding of the `kube-proxy` component is set to `127.0.0.1` and as such will cause errors when the -canned prometheus scrape configurations are run. The fix is to set this address to `0.0.0.0`. An example manifest -has been provided in [prometheus/extras](./kubernetes/prometheus/extras) that can be applied against your installation with -`kubectl apply -f ./filename`. Please only apply this change once you have verified that it will work with your -version of Kubernetes. +2. The default address binding of the `kube-proxy` component is set to `127.0.0.1` and as such will cause errors when + the canned prometheus scrape configurations are run. The fix is to set this address to `0.0.0.0`. An example manifest + has been provided in [prometheus/extras](./kubernetes/prometheus/extras) that can be applied against your + installation with + `kubectl apply -f ./filename`. Please only apply this change once you have verified that it will work with your + version of Kubernetes. 3. The _grafana_ namespace has been maintained in the configuration file to be used by the prometheus operator deployed -version of Grafana. This version only accepts a password; you can still specify a username for the admin account but it -will be silently ignored. This will be changed in the future. - + version of Grafana. This version only accepts a password; you can still specify a username for the admin account but + it will be silently ignored. This will be changed in the future. ### Observability We deploy the [OTEL Collector Operator](https://github.com/open-telemetry/opentelemetry-collector) along with a simple -collector. There are several other configurations in the [observability/otel-objects](./kubernetes/observability/otel-objects) -directory. See the [README.md](./kubernetes/observability/otel-objects/README.md) file in the -[observability/otel-objects](./kubernetes/observability/otel-objects) for more information, including an explanation of the -default configuration. +collector. There are several other configurations in +the [observability/otel-objects](./kubernetes/observability/otel-objects) +directory. See the [README.md](./kubernetes/observability/otel-objects/README.md) file in the +[observability/otel-objects](./kubernetes/observability/otel-objects) for more information, including an explanation of +the default configuration. ### Demo Application A forked version of the Google [_Bank of Anthos_](https://github.com/GoogleCloudPlatform/bank-of-anthos) -application is contained in the [`sirius`](./kubernetes/applications/sirius) directory. The github repository for this for is at [_Bank of -Sirius_](https://github.com/nginxinc/bank-of-sirius). +application is contained in the [`sirius`](./kubernetes/applications/sirius) directory. The github repository for this +for is at [_Bank of Sirius_](https://github.com/nginxinc/bank-of-sirius). Normally, the `frontend` microservice is exposed via a load balancer for traffic management. This deployment has been modified to use the NGINX or NGINX Plus KIC to manage traffic to the `frontend` microservice. The NGINX or NGINX Plus @@ -265,20 +272,20 @@ As part of the Bank of Sirius deployment, we deploy a cluster-wide [self-signed](https://cert-manager.io/docs/configuration/selfsigned/) issuer using the cert-manager deployed above. This is then used by the Ingress object created to enable TLS access to the application. Note that this Issuer can be changed out by the user, for example to use the -[ACME](https://cert-manager.io/docs/configuration/acme/) issuer. The use of the ACME issuer has been tested and works -without issues, provided the FQDN meets the length requirements. As of this writing the AWS ELB hostname is too long -to work with the ACME server. Additional work in this area will be undertaken to provide dynamic DNS record creation -as part of this process so legitimate certificates can be issued. +[ACME](https://cert-manager.io/docs/configuration/acme/) issuer. The use of the ACME issuer has been tested and works +without issues, provided the FQDN meets the length requirements. As of this writing the AWS ELB hostname is too long to +work with the ACME server. Additional work in this area will be undertaken to provide dynamic DNS record creation as +part of this process so legitimate certificates can be issued. In order to provide visibility into the Postgres databases that are running as part of the application, the Prometheus Postgres data exporter will be deployed into the same namespace as the application and will be configured to be scraped by the prometheus server installed earlier. **Note** Due to the way that Pulumi currently handles secrets, the [sirius](./kubernetes/applications/sirius) -directory contains its own configuration directory [sirius/config](./kubernetes/applications/sirius/config). This directory contains an example -configuration file that can be copied over and used. The user will be prompted to add passwords to the configuration -file at the first run of the [start.sh](../../bin/start_all.sh) script. This is a work-around that will be retired as Pulumi -provides better tools for hierarchical configuration files. +directory contains its own configuration directory [sirius/config](./kubernetes/applications/sirius/config). This +directory contains an example configuration file that can be copied over and used. The user will be prompted to add +passwords to the configuration file at the first run of the [start.sh](../../bin/start_all.sh) script. This is a +work-around that will be retired as Pulumi provides better tools for hierarchical configuration files. ## Simple Load Testing diff --git a/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py index 8890584b..cda0cf22 100644 --- a/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py +++ b/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py @@ -9,6 +9,10 @@ from kic_util import pulumi_config +# +# We default to the OSS IC; if the user wants Plus they need to enable it in the config file +# along with the Plus flag, and the addition of a JWT. +# config = pulumi.Config('kic-helm') chart_name = config.get('chart_name') if not chart_name: @@ -24,13 +28,13 @@ helm_repo_url = 'https://helm.nginx.com/stable' nginx_repository = config.get('nginx_repository') if not nginx_repository: - nginx_repository = "private-registry.nginx.com/nginx-ic/nginx-plus-ingress" + nginx_repository = "nginx/nginx-ingress" nginx_tag = config.get('nginx_tag') if not nginx_tag: nginx_tag = "2.1.0" nginx_plus_flag = config.get_bool('nginx_plus_flag') if not nginx_plus_flag: - nginx_plus_flag = True + nginx_plus_flag = False # # Allow the user to set timeout per helm chart; otherwise @@ -132,6 +136,7 @@ def k8_manifest_location(): k8s_provider = k8s.Provider(resource_name=f'ingress-controller-repo-only', kubeconfig=kubeconfig) +# This is required for the service monitor from the Prometheus namespace ns = k8s.core.v1.Namespace(resource_name='nginx-ingress', metadata={'name': 'nginx-ingress', 'labels': { @@ -155,7 +160,7 @@ def k8_manifest_location(): # are available. Set this to true to skip waiting on resources being # available. skip_await=False, - # If we fail, clean up + # If we fail, clean up cleanup_on_fail=True, # Provide a name for our release name="kic",