From 06fce09493b8c49fc6d569489e3c5df34997c21f Mon Sep 17 00:00:00 2001 From: Mike Oliverio Date: Tue, 5 Jul 2022 15:25:48 -0400 Subject: [PATCH] Creating public repository --- Dockerfile | 17 + LICENSE | 21 + OPSDOC.md | 65 ++++ README.md | 114 +++++- SECURITY.md | 80 ++++ aws-single-instance-resiliency/Chart.yaml | 24 ++ aws-single-instance-resiliency/LICENSE | 21 + aws-single-instance-resiliency/README.md | 101 +++++ .../templates/NOTES.txt | 1 + .../templates/deployment.yaml | 120 ++++++ .../templates/fluent-bit.yaml | 360 ++++++++++++++++++ .../templates/ingress.yaml | 41 ++ .../templates/namespaces.yaml | 10 + .../nxrm-logback-tasklogfile-override.yaml | 21 + .../templates/pv.yaml | 28 ++ .../templates/pvc.yaml | 12 + .../templates/secret.yaml | 38 ++ .../templates/serviceaccount.yaml | 7 + .../templates/services.yaml | 32 ++ .../templates/storageclass.yaml | 7 + aws-single-instance-resiliency/values.yaml | 77 ++++ header.txt | 3 + single-inst-oss-pro-kubernetes/.DS_Store | Bin 0 -> 6148 bytes single-inst-oss-pro-kubernetes/Chart.yaml | 40 ++ single-inst-oss-pro-kubernetes/LICENSE | 13 + single-inst-oss-pro-kubernetes/README.md | 188 +++++++++ .../templates/.DS_Store | Bin 0 -> 6148 bytes .../templates/NOTES.txt | 27 ++ .../templates/_helpers.tpl | 63 +++ .../templates/configmap-properties.yaml | 17 + .../templates/configmap.yaml | 15 + .../templates/deployment.yaml | 163 ++++++++ .../templates/ingress.yaml | 82 ++++ .../templates/proxy-route.yaml | 23 ++ .../templates/pv.yaml | 26 ++ .../templates/pvc.yaml | 30 ++ .../templates/route.yaml | 27 ++ .../templates/secret.yaml | 15 + .../templates/service.yaml | 66 ++++ .../templates/serviceaccount.yaml | 15 + .../templates/test/test-check-logs.yaml | 25 ++ .../templates/test/test-connection.yaml | 15 + .../tests/deployment_test.yaml | 85 +++++ .../tests/ingress_test.yaml | 144 +++++++ single-inst-oss-pro-kubernetes/values.yaml | 178 +++++++++ 45 files changed, 2456 insertions(+), 1 deletion(-) create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 OPSDOC.md create mode 100644 SECURITY.md create mode 100644 aws-single-instance-resiliency/Chart.yaml create mode 100644 aws-single-instance-resiliency/LICENSE create mode 100644 aws-single-instance-resiliency/README.md create mode 100644 aws-single-instance-resiliency/templates/NOTES.txt create mode 100644 aws-single-instance-resiliency/templates/deployment.yaml create mode 100644 aws-single-instance-resiliency/templates/fluent-bit.yaml create mode 100644 aws-single-instance-resiliency/templates/ingress.yaml create mode 100644 aws-single-instance-resiliency/templates/namespaces.yaml create mode 100644 aws-single-instance-resiliency/templates/nxrm-logback-tasklogfile-override.yaml create mode 100644 aws-single-instance-resiliency/templates/pv.yaml create mode 100644 aws-single-instance-resiliency/templates/pvc.yaml create mode 100644 aws-single-instance-resiliency/templates/secret.yaml create mode 100644 aws-single-instance-resiliency/templates/serviceaccount.yaml create mode 100644 aws-single-instance-resiliency/templates/services.yaml create mode 100644 aws-single-instance-resiliency/templates/storageclass.yaml create mode 100644 aws-single-instance-resiliency/values.yaml create mode 100644 header.txt create mode 100644 single-inst-oss-pro-kubernetes/.DS_Store create mode 100644 single-inst-oss-pro-kubernetes/Chart.yaml create mode 100644 single-inst-oss-pro-kubernetes/LICENSE create mode 100644 single-inst-oss-pro-kubernetes/README.md create mode 100644 single-inst-oss-pro-kubernetes/templates/.DS_Store create mode 100644 single-inst-oss-pro-kubernetes/templates/NOTES.txt create mode 100644 single-inst-oss-pro-kubernetes/templates/_helpers.tpl create mode 100644 single-inst-oss-pro-kubernetes/templates/configmap-properties.yaml create mode 100644 single-inst-oss-pro-kubernetes/templates/configmap.yaml create mode 100644 single-inst-oss-pro-kubernetes/templates/deployment.yaml create mode 100644 single-inst-oss-pro-kubernetes/templates/ingress.yaml create mode 100644 single-inst-oss-pro-kubernetes/templates/proxy-route.yaml create mode 100644 single-inst-oss-pro-kubernetes/templates/pv.yaml create mode 100644 single-inst-oss-pro-kubernetes/templates/pvc.yaml create mode 100644 single-inst-oss-pro-kubernetes/templates/route.yaml create mode 100644 single-inst-oss-pro-kubernetes/templates/secret.yaml create mode 100644 single-inst-oss-pro-kubernetes/templates/service.yaml create mode 100644 single-inst-oss-pro-kubernetes/templates/serviceaccount.yaml create mode 100644 single-inst-oss-pro-kubernetes/templates/test/test-check-logs.yaml create mode 100644 single-inst-oss-pro-kubernetes/templates/test/test-connection.yaml create mode 100644 single-inst-oss-pro-kubernetes/tests/deployment_test.yaml create mode 100644 single-inst-oss-pro-kubernetes/tests/ingress_test.yaml create mode 100644 single-inst-oss-pro-kubernetes/values.yaml diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..19bdabc --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +# +# Copyright (c) 2019-present Sonatype, Inc. All rights reserved. +# Includes the third-party code listed at http://links.sonatype.com/products/clm/attributions. +# "Sonatype" is a trademark of Sonatype, Inc. +# + +# FROM docker-all.repo.sonatype.com/alpine:latest +# LABEL maintainer="operations-group@sontype.com" + +# RUN apk update + +# WORKDIR /app +# COPY ./src ./ + +# EXPOSE 8080 + +# CMD ["./runit"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..19c6e7d --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Sonatype + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/OPSDOC.md b/OPSDOC.md new file mode 100644 index 0000000..6bf4534 --- /dev/null +++ b/OPSDOC.md @@ -0,0 +1,65 @@ + + +## Overview +Overview of the service: what is it, why do we have it, who are the primary +contacts, how to report bugs, links to design docs and other relevant +information. + +### Public Facing Endpoints +The URLs (or IPs) and ports used by the service and what they are used for +(ALB? SSH? FTP?) and notes about any certificates and their location. + +## Monitoring + +Monitoring dashboards / logging / introspection & obseverbility info. + +### Runbooks + +A list of every alert your monitoring system may generate for this service and +a step-by-step "what do to when..." for each of them. + +### SLO +Service Level Objectives in a succinct format: a target value or range of +values for a service level that is measured by an SLI. A natural structure for +SLOs is thus SLI ≤ target, or lower bound ≤ SLI ≤ upper bound. For example, we +might decide that we will return Shakespeare search results "quickly," adopting +an SLO that our average search request latency should be less than 100 +milliseconds. + +For more detailed information, please check out the Service Level Objectives +doc. If you're still unsure of what your SLOs should be, please reach out to +the SREs at #ops-sre-chat. + +Optionally but recommended, have a section of monitoring and dashboards for SLO +tracking (see the auth-service OpsDoc for examples of dashboards). + +## Build + +How to build the software that makes the service. Where to download it from, +where the source code repository is, steps for building and making a package or +other distribution mechanisms. If it is software that you modify in any way +(open source project you contribute to or a local project) include instructions +for how a new developer gets started. Ideally the end result is a package that +can be copied to other machines for installation. + +## Deploy + +How to deploy the service. How to build something from scratch: RAM/disk +requirements, OS version and configuration, what packages to install, and so +on. If this is automated with a configuration management tool like ansible/etc, +then say so. + +## Common Tasks + +Step-by-step instructions for common things like provisioning +(add/change/delete), common problems and their solutions, and so on. + +## DR +Where are backups of data stored? What are disaster / data recovery +procedures? + + diff --git a/README.md b/README.md index 821e427..69bb66e 100644 --- a/README.md +++ b/README.md @@ -1 +1,113 @@ -# nxrm3-helm-charts +![Lint and Test Charts](https://github.com/sonatype/helm3-charts/workflows/Lint%20and%20Test%20Charts/badge.svg) + +## Helm3 Charts for Sonatype Nexus Repository Manager (NXRM3) Products + +These charts are designed to work out of the box with minikube using both ingess and ingress dns addons. + +The current releases have been tested on minikube v1.12.3 running k8s v1.18.3 + +### User Documentation + +See docs/index.md which is also https://sonatype.github.io/nxrm-helm-repository/ + +### Contributing + +See the [contributing document](./CONTRIBUTING.md) for details. + +For Sonatypers, note that external contributors must sign the CLA and +the Dev-Ex team must verify this prior to accepting any PR. + +### Updating Charts + +Charts for NXRM can be updated in `sonatype//charts/` directories. +The most common updates will be to use new application images and to bump +chart versions for release. + +There should likely be no reason to update anything in `docs/` by hand. + +Test a chart in a local k8s cluster (like minikube) by installing the local copy +from within each charts directory: +``` +helm install --generate-name ./ +``` + +### Packaging and Indexing + +*Sonatype CI build will package, commit, and publish to the official helm repository.* + +Upon update of the `charts/`, run `build.sh` from here in the project root to +create `tgz` packages of the latest chart changes and regenerate the `index.yaml` +file to the `docs/` directory which is the root of the +[repo site](https://sonatype.github.io/nxrm-helm-repository/). + +The build process requires Helm 3. + +### Testing the Helm Charts +To test Helm Charts locally you will need to follow the next steps: + +1. Install docker, helm, kubectl, and [minikube](https://minikube.sigs.k8s.io/docs/start/), if you don't already have it on your local workstation. + * You could also use docker with k8s enabled instead of minikube. You don't need both. +2. Start up minikube: `minikube start` +3. Confirm minikube is up and running: `minikube status` +4. List the existing pods in the cluster: `kubectl get pods` (There should not be anything listed at this point.) +5. Install the helm chart in any of these ways: + * From a copy of the source: `helm install iq {path/to/your/nxrm-helm-repository/charts/}/nexus-iq --wait` + * From our production online repo: Add our helm repo locally as instructed at https://sonatype.github.io/nxrm-helm-repository// +6. List installed servers with helm: helm list +7. Watch the server start in kubernetes by running: `kubectl get pods` +8. Use the pod name you get from last command to follow the console logs: `kubectl logs -f iq-nexus-iq-server-xxx` +9. Confirm expected version numbers in those logs. +10. Forward a localhost port to a port on the running pod: `kubectl port-forward iq-nexus-iq-server-xxx 8070` +11. Connect and check that your fresh new server is successfully running: `http://localhost:8070/` +12. Uninstall the server with helm: `helm delete iq` +13. Confirm it's gone: `helm list && kubectl get pods` +14. Shutdown minikube: `minikube stop` + +### Running Unit Tests +To unit test the helm charts you can follow the next steps: + +1. Install the unittest plugin for Helm: https://github.com/quintush/helm-unittest +2. Run the tests for each individual chart: + * `cd charts//nexus-repository-manager; helm unittest -3 -t junit -o test-output.xml .` + +### Running Integration Tests +You can run the integration tests for the helm charts by running the next commands. + +Before running the integration tests: +* Install docker, helm, kubectl, and [minikube](https://minikube.sigs.k8s.io/docs/start/), if you don't already have it on your local workstation. + * You could also use docker with k8s enabled instead of minikube. +* The integration tests will be executed on a running cluster. Each test will create a new POD that will connect to the server installed by our +helm chart. Check [this](https://helm.sh/docs/topics/chart_tests/) + + +Running integration tests for Nexus Repository Manager: +1. From source code: `helm install nxrm ./charts//nexus-repository-manager --wait` +3. Run the tests: `helm test nxrm` + +### Further Notes on Usage + +#### Resolver File and Ingress-DNS + +Get the default `values.yaml` for each chart. +- Nexus Repository: `helm show values nexus-repo sonatype/nxrm3-helm-repository > iq-values.yaml` + +Edit the values file you just downloaded to enable ingress support, and install the chart +with those values: + +- Nexus Repository: `helm install nexus-repo sonatype/nxrm3-helm-repository -f repo-values.yaml` + +If you want to use the custom values file for the demo environment that expose +the apps on a local domain of *.demo which is done by creating a resolver file. +On a Mac it's `/etc/resolver/minikube-minikube-demo` with the following entries: +``` +domain demo +nameserver 192.168.64.8 +search_order 1 +timeout 5 +``` + +You'll need to update the IP address to match the running instance's IP address. +Use `minikube ip` to get the address + +Docs for Ingress-dns are here +https://github.com/kubernetes/minikube/tree/master/deploy/addons/ingress-dns diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..829cd68 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,80 @@ + + +# Reporting Security Vulnerabilities + +## When to report + +First check +[Important advisories of known security vulnerabilities in Sonatype products](https://support.sonatype.com/hc/en-us/sections/203012668-Security-Advisories) +to see if this has been previously reported. + +## How to report + +Please email reports regarding security related issues you find to [mailto:security@sonatype.com](security@sonatype.com). + +Use our public key below to keep your message safe. + +## What to include + +Please use a descriptive subject line in your email report. + +Your name and/or affiliation. + +A detailed technical description of the vulnerability, attack scenario and where +possible, how we can reproduce your findings. + +Provide us with a secure way to respond. + +## What to expect + +Your email will be acknowledged within 1 - 2 business days, and you'll receive a +more detailed response to your email within 7 business days. + +We ask that everyone please follow responsible disclosure practices and allow +time for us to release a fix prior to public release. + +Once an issue is reported, Sonatype uses the following disclosure process: + +When a report is received, we confirm the issue and determine its severity. + +If third-party services or software require mitigation before publication, those +projects will be notified. + +## Our public key + +```console +-----BEGIN PUBLIC KEY BLOCK----- +mQENBFF+a9ABCADQWSAAU7w9i71Zn3TQ6k7lT9x57cRdtX7V709oeN/c/1it+gCw +onmmCyf4ypor6XcPSOasp/x0s3hVuf6YfMbI0tSwJUWWihrmoPGIXtmiSOotQE0Q +Sav41xs3YyI9LzQB4ngZR/nhp4YhioD1dVorD6LGXk08rvl2ikoqHwTagbEXZJY7 +3VYhW6JHbZTLwCsfyg6uaSYF1qXfUxHPOiHYKNbhK/tM3giX+9ld/7xi+9f4zEFQ +eX9wcRTdgdDOAqDOK7MV30KXagSqvW0MgEYtKX6q4KjjRzBYjkiTdFW/yMXub/Bs +5UckxHTCuAmvpr5J0HIUeLtXi1QCkijyn8HJABEBAAG0KVNvbmF0eXBlIFNlY3Vy +aXR5IDxzZWN1cml0eUBzb25hdHlwZS5jb20+iQE4BBMBAgAiBQJRfmvQAhsDBgsJ +CAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAgkmxsNtgwfUzbCACLtCgieq1kJOqo +2i136ND5ZOj31zIzNENLn8dhSg5zQwTHOcntWAtS8uCNq4fSlslwvlbPYWTLD7fE +iJn1z7BCU8gBk+pkAJJFWEPweMVt+9bYQ4HfKceGbJeuwBBhS34SK9ZIp9gfxxfA +oTm0aGYwKR5wH3sqL/mrhwKhPt9wXR4qwlE635STEX8wzJ5SBqf3ArJUtCp1rzgR +Dx+DiZed5HE1pOI2Kyb6O80bm485WThPXxpvp3bfzTNYoGzeLi/F7WkmgggkXxsT +Pyd0sSx0B/MO4lJtQvEBlIHDFno9mXa30fKl+rzp2geG5UxNHJUjaC5JhfWLEXEX +wV0ErBsmuQENBFF+a9ABCADXj04+GLIz8VCaZH554nUHEhaKoiIXH3Tj7UiMZDqy +o4WIw2RFaCQNA8T0R5Q0yxINU146JQMbA2SN59AGcGYZcajyEvTR7tLG0meMO6S0 +JWpkX7s3xaC0s+5SJ/ba00oHGzW0aotgzG9BWA5OniNHK7zZKMVu7M80M/wB1RvK +x775hAeJ+8F9MDJ+ijydBtaOfDdkbg+0kU1xR6Io+vVLPk38ghlWU8QFP4/B0oWi +jK4xiDqK6cG7kyH9kC9nau+ckH8MrJ/RzEpsc4GRwqS4IEnvHWe7XbgydWS1bCp6 +8uP5ma3d02elQmSEa+PABIPKnZcAf1YKLr9O/+IzEdOhABEBAAGJAR8EGAECAAkF +AlF+a9ACGwwACgkQIJJsbDbYMH3WzAf/XOm4YQZFOgG2h9d03m8me8d1vrYico+0 +pBYU9iCozLgamM4er9Efb+XzfLvNVKuqyR0cgvGszukIPQYeX58DMrZ07C+E0wDZ +bG+ZAYXT5GqsHkSVnMCVIfyJNLjR4sbVzykyVtnccBL6bP3jxbCP1jJdT7bwiKre +1jQjvyoL0yIegdiN/oEdmx52Fqjt4NkQsp4sk625UBFTVISr22bnf60ZIGgrRbAP +DU1XMdIrmqmhEEQcXMp4CeflDMksOmaIeAUkZY7eddnXMwQDJTnz5ziCal+1r0R3 +dh0XISRG0NkiLEXeGkrs7Sn7BAAsTsaH/1zU6YbvoWlMlHYT6EarFQ== =sFGt +-----END PUBLIC KEY BLOCK----- +``` + diff --git a/aws-single-instance-resiliency/Chart.yaml b/aws-single-instance-resiliency/Chart.yaml new file mode 100644 index 0000000..82feece --- /dev/null +++ b/aws-single-instance-resiliency/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: nxrm-aws-resiliency +description: Helm chart for a Resilient Nexus Repository deployment in AWS + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 40.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "3.40.1" diff --git a/aws-single-instance-resiliency/LICENSE b/aws-single-instance-resiliency/LICENSE new file mode 100644 index 0000000..19c6e7d --- /dev/null +++ b/aws-single-instance-resiliency/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Sonatype + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/aws-single-instance-resiliency/README.md b/aws-single-instance-resiliency/README.md new file mode 100644 index 0000000..9cf4fd9 --- /dev/null +++ b/aws-single-instance-resiliency/README.md @@ -0,0 +1,101 @@ +# Helm Chart for a Resilient Nexus Repository Deployment in AWS + +This Helm chart configures the Kubernetes resources that are needed for a resilient Nexus Repository deployment on AWS as described in our documented [single-node cloud resilient deployment example using AWS](https://help.sonatype.com/repomanager3/planning-your-implementation/resiliency-and-high-availability/single-node-cloud-resilient-deployment-example-using-aws). + +Use the checklist below to determine if this Helm chart is suitable for your deployment needs. + +--- + +## When to Use This Helm Chart +Use this Helm chart if you are doing any of the following: +- Deploying Nexus Repository Pro to an AWS cloud environment with the desire for automatic failover across Availability Zones (AZs) within a single region +- Planning to configure a single Nexus Repository Pro instance within your Kubernetes/EKS cluster with two or more nodes spread across different AZs within an AWS region +- Using an external PostgreSQL database + +> **Note**: A Nexus Repository Pro license is required for our resilient deployment options. Your Nexus Repository Pro license file must be stored externally as mounted from AWS Secrets AWS (required). + +--- + +## Prerequisites for This Chart +In order to set up an environment like the one illustrated above and described in this section, you will need the following: + +- Kubernetes 1.19+ +- [kubectl](https://kubernetes.io/docs/tasks/tools/) +- [Helm 3](https://helm.sh/docs/intro/install/) +- A Nexus Repository Pro license +- An AWS account with permissions for accessing the following AWS services: + - Elastic Kubernetes Service (EKS) + - Relational Database Service (RDS) for PostgreSQL + - Application Load Balancer (ALB) + - CloudWatch + - Simple Storage Service (S3) + - Secrets Manager + +You will also need to complete the steps below. See the referenced AWS documentation for detailed configuration steps. Also see [our resiliency documentation](https://help.sonatype.com/repomanager3/planning-your-implementation/resiliency-and-high-availability/single-node-cloud-resilient-deployment-example-using-aws) for more details about why these steps are necessary and how each AWS solution functions within a resilient deployment: +1. Configure an EKS cluster - [AWS documentation for managed nodes (i.e., EC2)](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html) +2. Create an Aurora database cluster - [AWS documentation for creating an Aurora database cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.CreateInstance.html) +3. Deploy the AWS Load Balancer Controller (LBC) to your EKS cluster - [AWS documentation for deploying the AWS LBC to your EKS cluster](https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html) +4. Install AWS Secrets Store CSI drivers - You need to create an IAM service account using the ```eksctl create iamserviceaccount``` command. Before proceeding, read the points below as they contain important required steps to ensure this helm chart will work for you:
+ - **You must include two additional command parameters when running the command**: ```--role-only``` and ```--namespace ``` + - It is important to include the ```--role-only``` option in the ```eksctl create iamserviceaccount``` command so that the helm chart manages the Kubernetes service account.
+ - **The namespace you specify to the ```eksctl create iamserviceaccount``` must be the same namespace into which you will deploy the Nexus Repository pod.**
+ - Although the namespace does not exist at this point, you must specify it as part of the command. **Do not create that namespace manually beforehand**; the helm chart will create and manage it. + - You should specify this same namespace as the value of ```nexusNs``` in your values.yaml.
+ - Follow the instructions provided in the [AWS Secrets Store CSI drivers documentation](https://github.com/aws/secrets-store-csi-driver-provider-aws/blob/main/README.md) to install the AWS Secrets Store CSI drivers; ensure that you follow the additional instructions in the bullets above when you reach the ```eksctl create iamserviceaccount``` command on that page. +5. Ensure that your EKS nodes are granted CloudWatchFullAccess and CloudWatchAgentServerPolicy IAM policies. This Helm chart will configure Fluentbit for log externalisation to CloudWatch. + - [AWS documentation for setting up Fluentbit](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-EKS.html) + +--- + +## Deployment +1. Pull the [nxrm-resiliency-aws-helmchart](https://github.com/sonatype/nxrm-resiliency-aws-helmchart). +2. Ensure you have updated your values.yaml with appropriate values for your environment. +3. Install the chart using the following: + +```helm install nxrm nexus/nxrm-aws-resiliency --values values.yaml``` + +3. Get the Nexus Repository link using the following: + +```kubectl get ingresses -n nexusrepo``` + +--- + +## Health Check +You can use the following commands to perform various health checks: + +See a list of releases: + + ```helm list``` + + Check pods using the following: + + ```kubectl get pods -n nexusrepo``` + +Check the Nexus Repository logs with the following: + + ```kubectl logs -n nexusrepo nxrm-app``` + +Check if the pod is OK by using the following; you shouldn't see any error/warning messages: + + ```kubectl describe pod -n nexusrepo``` + +Check if ingress is OK using the following: + + ```kubectl describe ingress -n nexusrepo``` + +Check that the Fluent Bit pod is sending events to CloudWatch using the following: + + ```kubectl logs -n amazon-cloudwatch ``` + +If the above returns without error, then check CloudWatch for the ```/aws/containerinsights//nexus-logs``` log group, which should contain four log streams. + +--- + +## Uninstall +To uninstall the deployment, use the following: + + ```helm uninstall nxrm``` + +After removing the deployment, ensure that the namespace is deleted and that Nexus Repository is not listed when using the following: + + ```helm list``` diff --git a/aws-single-instance-resiliency/templates/NOTES.txt b/aws-single-instance-resiliency/templates/NOTES.txt new file mode 100644 index 0000000..c5615c6 --- /dev/null +++ b/aws-single-instance-resiliency/templates/NOTES.txt @@ -0,0 +1 @@ +Thank you for installing {{ .Chart.Name }}. diff --git a/aws-single-instance-resiliency/templates/deployment.yaml b/aws-single-instance-resiliency/templates/deployment.yaml new file mode 100644 index 0000000..e17baa0 --- /dev/null +++ b/aws-single-instance-resiliency/templates/deployment.yaml @@ -0,0 +1,120 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-{{ .Values.deployment.name }} + namespace: {{ .Values.namespaces.nexusNs }} + labels: + app: nxrm +spec: + replicas: 1 + selector: + matchLabels: + app: nxrm + template: + metadata: + labels: + app: nxrm + spec: + serviceAccountName: {{ .Values.serviceAccount.name }} + initContainers: + # chown nexus-data to 'nexus' user and init log directories/files for a new pod + # otherwise the side car containers will crash a couple of times and backoff whilst waiting + # for nxrm-app to start and this increases the total start up time. + - name: chown-nexusdata-owner-to-nexus-and-init-log-dir + image: {{ .Values.deployment.initContainer.image.repository }}:{{ .Values.deployment.initContainer.image.tag }} + command: [/bin/sh] + args: + - -c + - >- + mkdir -p /nexus-data/etc/logback && + mkdir -p /nexus-data/log/tasks && + mkdir -p /nexus-data/log/audit && + touch -a /nexus-data/log/tasks/allTasks.log && + touch -a /nexus-data/log/audit/audit.log && + touch -a /nexus-data/log/request.log && + chown -R '200:200' /nexus-data + volumeMounts: + - name: nexusdata + mountPath: /nexus-data + containers: + - name: nxrm-app + image: {{ .Values.deployment.container.image.repository }}:{{ .Values.deployment.container.image.tag }} + securityContext: + runAsUser: 200 + imagePullPolicy: {{ .Values.deployment.container.pullPolicy }} + ports: + - containerPort: {{ .Values.deployment.container.containerPort }} + env: + - name: DB_NAME + value: "{{ .Values.deployment.container.env.nexusDBName }}" + - name: DB_PASSWORD + valueFrom: + secretKeyRef: + name: nxrm-db-secret + key: db-password + - name: DB_USER + valueFrom: + secretKeyRef: + name: nxrm-db-secret + key: db-user + - name: DB_HOST + valueFrom: + secretKeyRef: + name: nxrm-db-secret + key: db-host + - name: NEXUS_SECURITY_INITIAL_PASSWORD + valueFrom: + secretKeyRef: + name: nxrm-admin-secret + key: nexus-admin-password + - name: NEXUS_SECURITY_RANDOMPASSWORD + value: "false" + - name: INSTALL4J_ADD_VM_PARAMS + value: "-Xms2703m -Xmx2703m -XX:MaxDirectMemorySize=2703m -Dnexus.licenseFile=/nxrm-secrets/{{ .Values.secret.license.alias }} \ + -Dnexus.datastore.enabled=true -Djava.util.prefs.userRoot=${NEXUS_DATA}/javaprefs \ + -Dnexus.datastore.nexus.jdbcUrl=jdbc:postgresql://${DB_HOST}:{{ .Values.deployment.container.env.nexusDBPort }}/${DB_NAME} \ + -Dnexus.datastore.nexus.username=${DB_USER} \ + -Dnexus.datastore.nexus.password=${DB_PASSWORD}" + volumeMounts: + - mountPath: /nxrm-secrets + name: nxrm-secrets + - name: nexusdata + mountPath: /nexus-data + - name: logback-tasklogfile-override + mountPath: /nexus-data/etc/logback/logback-tasklogfile-appender-override.xml + subPath: logback-tasklogfile-appender-override.xml + - name: request-log + image: {{ .Values.deployment.requestLogContainer.image.repository }}:{{ .Values.deployment.requestLogContainer.image.tag }} + args: [/bin/sh, -c, 'tail -n+1 -F /nexus-data/log/request.log'] + volumeMounts: + - name: nexusdata + mountPath: /nexus-data + - name: audit-log + image: {{ .Values.deployment.auditLogContainer.image.repository }}:{{ .Values.deployment.auditLogContainer.image.tag }} + args: [/bin/sh, -c, 'tail -n+1 -F /nexus-data/log/audit/audit.log'] + volumeMounts: + - name: nexusdata + mountPath: /nexus-data + - name: tasks-log + image: {{ .Values.deployment.taskLogContainer.image.repository }}:{{ .Values.deployment.taskLogContainer.image.tag }} + args: [/bin/sh, -c, 'tail -n+1 -F /nexus-data/log/tasks/allTasks.log'] + volumeMounts: + - name: nexusdata + mountPath: /nexus-data + volumes: + - name: nexusdata + persistentVolumeClaim: + claimName: {{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-ebs-claim + - name: nxrm-secrets + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: {{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-secret + fsType: ext4 + - name: logback-tasklogfile-override + configMap: + name: {{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-logback-tasklogfile-override + items: + - key: logback-tasklogfile-appender-override.xml + path: logback-tasklogfile-appender-override.xml diff --git a/aws-single-instance-resiliency/templates/fluent-bit.yaml b/aws-single-instance-resiliency/templates/fluent-bit.yaml new file mode 100644 index 0000000..8556edf --- /dev/null +++ b/aws-single-instance-resiliency/templates/fluent-bit.yaml @@ -0,0 +1,360 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-fluent-bit + namespace: {{ .Values.namespaces.cloudwatchNs }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-fluent-bit-role +rules: + - nonResourceURLs: + - /metrics + verbs: + - get + - apiGroups: [""] + resources: + - namespaces + - pods + - pods/logs + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-fluent-bit-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-fluent-bit-role +subjects: + - kind: ServiceAccount + name: {{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-fluent-bit + namespace: {{ .Values.namespaces.cloudwatchNs }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: fluent-bit-cluster-info + namespace: {{ .Values.namespaces.cloudwatchNs }} +data: + cluster.name: {{ .Values.deployment.clusterName }} + http.server: "On" + http.port: "2020" + read.head: "Off" + read.tail: "On" + logs.region: {{ .Values.deployment.logsRegion }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-fluent-bit-config + namespace: {{ .Values.namespaces.cloudwatchNs }} + labels: + k8s-app: fluent-bit +data: + fluent-bit.conf: | + [SERVICE] + Flush 5 + Log_Level info + Daemon off + Parsers_File parsers.conf + HTTP_Server ${HTTP_SERVER} + HTTP_Listen 0.0.0.0 + HTTP_Port ${HTTP_PORT} + storage.path /var/fluent-bit/state/flb-storage/ + storage.sync normal + storage.checksum off + storage.backlog.mem_limit 5M + + @INCLUDE nexus-log.conf + @INCLUDE nexus-request-log.conf + @INCLUDE nexus-audit-log.conf + @INCLUDE nexus-tasks-log.conf + + nexus-log.conf: | + [INPUT] + Name tail + Tag nexus.nexus-log + Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-nxrm.deployment-*-*_{{ .Values.namespaces.nexusNs }}_nxrm-app-*.log + Parser docker + DB /var/fluent-bit/state/flb_container.db + Mem_Buf_Limit 5MB + Skip_Long_Lines Off + Refresh_Interval 10 + Rotate_Wait 30 + storage.type filesystem + Read_from_Head ${READ_FROM_HEAD} + + [FILTER] + Name kubernetes + Match nexus.nexus-log + Kube_URL https://kubernetes.default.svc:443 + Kube_Tag_Prefix application.var.log.containers. + Merge_Log On + Merge_Log_Key log_processed + K8S-Logging.Parser On + K8S-Logging.Exclude Off + Labels Off + Annotations Off + + [OUTPUT] + Name cloudwatch_logs + Match nexus.nexus-log + region ${AWS_REGION} + log_group_name /aws/containerinsights/${CLUSTER_NAME}/nexus-logs + log_stream_prefix ${HOST_NAME}-nexus.log- + auto_create_group true + extra_user_agent container-insights + + nexus-request-log.conf: | + [INPUT] + Name tail + Tag nexus.request-log + Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-nxrm.deployment-*-*_{{ .Values.namespaces.nexusNs }}_request-log-*.log + Parser docker + DB /var/fluent-bit/state/flb_container.db + Mem_Buf_Limit 5MB + Skip_Long_Lines Off + Refresh_Interval 10 + Rotate_Wait 30 + storage.type filesystem + Read_from_Head ${READ_FROM_HEAD} + + [FILTER] + Name kubernetes + Match nexus.request-log + Kube_URL https://kubernetes.default.svc:443 + Kube_Tag_Prefix application.var.log.containers. + Merge_Log On + Merge_Log_Key log_processed + K8S-Logging.Parser On + K8S-Logging.Exclude Off + Labels Off + Annotations Off + + [OUTPUT] + Name cloudwatch_logs + Match nexus.request-log + region ${AWS_REGION} + log_group_name /aws/containerinsights/${CLUSTER_NAME}/nexus-logs + log_stream_prefix ${HOST_NAME}-request.log- + auto_create_group true + extra_user_agent container-insights + + nexus-audit-log.conf: | + [INPUT] + Name tail + Tag nexus.audit-log + Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-nxrm.deployment-*-*_{{ .Values.namespaces.nexusNs }}_audit-log-*.log + Parser docker + DB /var/fluent-bit/state/flb_container.db + Mem_Buf_Limit 5MB + Skip_Long_Lines Off + Refresh_Interval 10 + Rotate_Wait 30 + storage.type filesystem + Read_from_Head ${READ_FROM_HEAD} + + [FILTER] + Name kubernetes + Match nexus.audit-log + Kube_URL https://kubernetes.default.svc:443 + Kube_Tag_Prefix application.var.log.containers. + Merge_Log On + Merge_Log_Key log_processed + K8S-Logging.Parser On + K8S-Logging.Exclude Off + Labels Off + Annotations Off + + [OUTPUT] + Name cloudwatch_logs + Match nexus.audit-log + region ${AWS_REGION} + log_group_name /aws/containerinsights/${CLUSTER_NAME}/nexus-logs + log_stream_prefix ${HOST_NAME}-audit.log- + auto_create_group true + extra_user_agent container-insights + + nexus-tasks-log.conf: | + [INPUT] + Name tail + Tag nexus.tasks-log + Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-nxrm.deployment-*-*_{{ .Values.namespaces.nexusNs }}_tasks-log-*.log + Parser docker + DB /var/fluent-bit/state/flb_container.db + Mem_Buf_Limit 5MB + Skip_Long_Lines Off + Refresh_Interval 10 + Rotate_Wait 30 + storage.type filesystem + Read_from_Head ${READ_FROM_HEAD} + + [FILTER] + Name kubernetes + Match nexus.tasks-log + Kube_URL https://kubernetes.default.svc:443 + Kube_Tag_Prefix application.var.log.containers. + Merge_Log On + Merge_Log_Key log_processed + K8S-Logging.Parser On + K8S-Logging.Exclude Off + Labels Off + Annotations Off + + [OUTPUT] + Name cloudwatch_logs + Match nexus.tasks-log + region ${AWS_REGION} + log_group_name /aws/containerinsights/${CLUSTER_NAME}/nexus-logs + log_stream_prefix ${HOST_NAME}-tasks.log- + auto_create_group true + extra_user_agent container-insights + + parsers.conf: | + [PARSER] + Name docker + Format json + Time_Key time + Time_Format %Y-%m-%dT%H:%M:%S.%LZ + + [PARSER] + Name syslog + Format regex + Regex ^(?