Compare commits

..

No commits in common. "main" and "41.1.3" have entirely different histories.
main ... 41.1.3

92 changed files with 365 additions and 345 deletions

View File

@ -11,7 +11,7 @@
# Eclipse Foundation. All other trademarks are the property of their respective owners.
#
FROM docker-all.repo.sonatype.com/alpine/helm:3.10.1
FROM docker-all.repo.sonatype.com/alpine/helm:3.9.3
RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh

View File

@ -17,6 +17,16 @@ final jira = [
credentialId : 'jenkins-jira', autoRelease: true, failOnError: true
]
final jiraVersionMappings = [
'nexus-repository-manager': 'helm-nxrm',
'nxrm-aws-resiliency': 'helm-nxrm-aws-resiliency'
]
final chartLocation = [
'nexus-repository-manager': 'nexus-repository-manager',
'nxrm-aws-resiliency': 'nxrm-aws-resiliency'
]
properties([
parameters([
string(
@ -44,9 +54,8 @@ dockerizedBuildPipeline(
runSafely "git checkout ${gitBranch(env)}"
runSafely "./upgrade.sh ./nexus-repository-manager ${chartVersion} ${params.appVersion}"
runSafely "./upgrade.sh ./nxrm-aws-resiliency ${chartVersion} ${params.appVersion}"
runSafely './build.sh'
runSafely 'git add nxrm-aws-resiliency'
runSafely 'git add nexus-repository-manager'
runSafely './build.sh'
runSafely 'git add nxrm-aws-resiliency nexus-repository-manager'
},
skipVulnerabilityScan: true,
archiveArtifacts: 'docs/*',

View File

@ -12,12 +12,17 @@
Eclipse Foundation. All other trademarks are the property of their respective owners.
-->
# ⚠️ Archive Notice
As of October 24, 2023, we will no longer update or support the [Single-Instance OSS/Pro Helm Chart](https://github.com/sonatype/nxrm3-helm-repository/tree/main/nexus-repository-manager).
Deploying Nexus Repository in containers with an embedded database has been known to corrupt the database under some circumstances. We strongly recommend that you use an external PostgreSQL database for Kubernetes deployments.
## Helm Charts for Sonatype Nexus Repository Manager 3
We now provide one [HA/Resiliency Helm Chart](https://github.com/sonatype/nxrm3-ha-repository/tree/main/nxrm-ha) that supports both high availability and resilient deployments in AWS, Azure, or on-premises in a Kubernetes cluster. This is our only supported Helm chart for deploying Sonatype Nexus Repository; it requires a PostgreSQL database.
We provide Helm charts for two different deployment scenarios:
See the [AWS Single-Instance Resiliency Chart](https://github.com/sonatype/nxrm3-helm-repository/tree/main/nxrm-aws-resiliency) if you are doing the following:
* Deploying Nexus Repository Pro to an AWS cloud environment with the desire for automatic failover across Availability Zones (AZs) within a single region
* Planning to configure a single Nexus Repository Pro instance within your Kubernetes/EKS cluster with two or more nodes spread across different AZs within an AWS region
* Using an external PostgreSQL database (required)
See the [Single-Instance OSS/Pro Kubernetes Chart](https://github.com/sonatype/nxrm3-helm-repository/tree/main/nexus-repository-manager) if you are doing the following:
* Using embedded OrientDB (required)
* Deploying either Nexus Repository Pro or OSS to an on-premises environment with bare metal/VM server (Node)
* Deploying a single Nexus Repository instance within a Kubernetes cluster that has a single Node configured

View File

@ -12,7 +12,7 @@
# Eclipse Foundation. All other trademarks are the property of their respective owners.
#
helm plugin install --version "0.2.11" https://github.com/quintush/helm-unittest
helm plugin install https://github.com/quintush/helm-unittest
set -e

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,24 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# OWNERS file for Kubernetes
OWNERS
*.tar

View File

@ -1,16 +1,14 @@
apiVersion: v2
name: nexus-repository-manager
# The nexus-repository-manager chart is deprecated and no longer maintained
deprecated: true
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 64.2.0
version: 41.1.3
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 3.64.0
appVersion: 3.41.1
description: DEPRECATED Sonatype Nexus Repository Manager - Universal Binary repository
description: Sonatype Nexus Repository Manager - Universal Binary repository
# A chart can be either an 'application' or a 'library' chart.
#
@ -37,3 +35,6 @@ home: https://www.sonatype.com/nexus-repository-oss
icon: https://sonatype.github.io/helm3-charts/NexusRepo_Vertical.svg
sources:
- https://github.com/sonatype/nexus-public
maintainers:
- email: support@sonatype.com
name: Sonatype

View File

@ -12,8 +12,192 @@
Eclipse Foundation. All other trademarks are the property of their respective owners.
-->
# ⚠️ Archive Notice
As of October 24, 2023, we will no longer update or support this Helm chart.
# Nexus Repository
We now provide one [HA/Resiliency Helm Chart](https://github.com/sonatype/nxrm3-ha-repository/tree/main/nxrm-ha) that supports both high availability and resilient deployments in AWS, Azure, or on-premises in a Kubernetes cluster. This is our only supported Helm chart for deploying Sonatype Nexus Repository; it requires a PostgreSQL database.
[Nexus Repository OSS](https://www.sonatype.com/nexus-repository-oss) provides universal support for all major build tools.
- Store and distribute Maven/Java, npm, NuGet, Helm, Docker, p2, OBR, APT, Go, R, Conan components and more.
- Manage components from dev through delivery: binaries, containers, assemblies, and finished goods.
- Support for the Java Virtual Machine (JVM) ecosystem, including Gradle, Ant, Maven, and Ivy.
- Compatible with popular tools like Eclipse, IntelliJ, Hudson, Jenkins, Puppet, Chef, Docker, and more.
*Efficiency and Flexibility to Empower Development Teams*
- Streamline productivity by sharing components internally.
- Gain insight into component security, license, and quality issues.
- Build off-line with remote package availability.
- Integrate with industry-leading build tools.
---
## Introduction
This chart installs a single Nexus Repository instance within a Kubernetes cluster that has a single node (server) configured. It is not appropriate for a resilient Nexus Repository deployment. Refer to our [resiliency documentation](https://help.sonatype.com/repomanager3/planning-your-implementation/resiliency-and-high-availability) for information about resilient Nexus Repository deployment options.
Use the checklist below to determine if this Helm chart is suitable for your deployment needs.
### When to Use This Helm Chart
Use this Helm chart if you are doing any of the following:
- Deploying either Nexus Repository Pro or OSS to an on-premises environment with bare metal/VM server (Node)
- Deploying a single Nexus Repository instance within a Kubernetes cluster that has a single Node configured
> **Note**: If you are using Nexus Repository Pro, your license file and embedded database will reside on the node and be mounted on the container as a Persistent Volume (required).
### When Not to Use This Helm Chart
Do not use this Helm chart and, instead, refer to our [resiliency documentation](https://help.sonatype.com/repomanager3/planning-your-implementation/resiliency-and-high-availability) if you are doing any of the following:
- Deploying Nexus Repository Pro to a cloud environment with the desire for automatic failover across Availability Zones (AZs) within a single region
- Planning to configure a single Nexus Repository Pro instance within your Kubernetes/EKS cluster with two or more nodes spread across different AZs within an AWS region
- Using an external PostgreSQL database
> **Note**: A Nexus Repository Pro license is required for our resilient deployment options. Your Nexus Repository Pro license file must be stored externally as either mounted from AWS Secrets/Azure Key Vault in AWS/Azure deployments or mounted using Kustomize for on-premises deployments (required).
> **Note**: We do not currently provide Helm charts for our resilient deployment options.
---
## Prerequisites for This Chart
- Kubernetes 1.19+
- PV provisioner support in the underlying infrastructure
- Helm 3
### With Open Docker Image
By default, this Chart uses Sonatype's Public Docker image. If you want to use a different image, run with the following: `--set nexus.imageName=<my>/<image>`.
### With Red Hat Certified container
If you're looking run our Certified Red Hat image in an OpenShift4 environment, there is a Certified Operator in OperatorHub.
---
## Adding the repo
To add as a Helm Repo, use the following:
```helm repo add sonatype https://sonatype.github.io/helm3-charts/```
---
## Testing the Chart
To test the chart, use the following:
```bash
$ helm install --dry-run --debug --generate-name ./
```
To test the chart with your own values, use the following:
```bash
$ helm install --dry-run --debug --generate-name -f myvalues.yaml ./
```
---
## Installing the Chart
To install the chart, use the following:
```bash
$ helm install nexus-rm sonatype/nexus-repository-manager [ --version v29.2.0 ]
```
The above command deploys Nexus Repository on the Kubernetes cluster in the default configuration.
You can pass custom configuration values as follows:
```bash
$ helm install -f myvalues.yaml sonatype-nexus ./
```
The default login is randomized and can be found in `/nexus-data/admin.password` or you can get the initial static passwords (admin/admin123)
by setting the environment variable `NEXUS_SECURITY_RANDOMPASSWORD` to `false` in your `values.yaml`.
---
## Uninstalling the Chart
To uninstall/delete the deployment, use the following:
```bash
$ helm list
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
plinking-gopher default 1 2021-03-10 15:44:57.301847 -0800 PST deployed nexus-repository-manager-29.2.0 3.29.2
$ helm delete plinking-gopher
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
---
## Configuration
The following table lists the configurable parameters of the Nexus chart and their default values.
| Parameter | Description | Default |
|--------------------------------------------|----------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------|
| `deploymentStrategy` | Deployment Strategy | `Recreate` |
| `nexus.imagePullPolicy` | Nexus Repository image pull policy | `IfNotPresent` |
| `nexus.imagePullSecrets` | Secret to download Nexus Repository image from private registry | `nil` |
| `nexus.docker.enabled` | Enable/disable Docker support | `false` |
| `nexus.docker.registries` | Support multiple Docker registries | (see below) |
| `nexus.docker.registries[0].host` | Host for the Docker registry | `cluster.local` |
| `nexus.docker.registries[0].port` | Port for the Docker registry | `5000` |
| `nexus.docker.registries[0].secretName` | TLS Secret Name for the ingress | `registrySecret` |
| `nexus.env` | Nexus Repository environment variables | `[{INSTALL4J_ADD_VM_PARAMS: -Xms1200M -Xmx1200M -XX:MaxDirectMemorySize=2G -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap}]` |
| `nexus.resources` | Nexus Repository resource requests and limits | `{}` |
| `nexus.nexusPort` | Internal port for Nexus Repository service | `8081` |
| `nexus.securityContext` | Security Context (for enabling official image use `fsGroup: 2000`) | `{}` |
| `nexus.labels` | Service labels | `{}` |
| `nexus.podAnnotations` | Pod Annotations | `{}` |
| `nexus.livenessProbe.initialDelaySeconds` | LivenessProbe initial delay | 30 |
| `nexus.livenessProbe.periodSeconds` | Seconds between polls | 30 |
| `nexus.livenessProbe.failureThreshold` | Number of attempts before failure | 6 |
| `nexus.livenessProbe.timeoutSeconds` | Time in seconds after liveness probe times out | `nil` |
| `nexus.livenessProbe.path` | Path for LivenessProbe | / |
| `nexus.readinessProbe.initialDelaySeconds` | ReadinessProbe initial delay | 30 |
| `nexus.readinessProbe.periodSeconds` | Seconds between polls | 30 |
| `nexus.readinessProbe.failureThreshold` | Number of attempts before failure | 6 |
| `nexus.readinessProbe.timeoutSeconds` | Time in seconds after readiness probe times out | `nil` |
| `nexus.readinessProbe.path` | Path for ReadinessProbe | / |
| `nexus.hostAliases` | Aliases for IPs in /etc/hosts | [] |
| `nexus.properties.override` | Set to true to override default nexus.properties | `false` |
| `nexus.properties.data` | A map of custom nexus properties if `override` is set to true | `nexus.scripts.allowCreation: true` |
| `ingress.enabled` | Create an ingress for Nexus Repository | `true` |
| `ingress.annotations` | Annotations to enhance ingress configuration | `{kubernetes.io/ingress.class: nginx}` |
| `ingress.tls.secretName` | Name of the secret storing TLS cert, `false` to use the Ingress' default certificate | `nexus-tls` |
| `ingress.path` | Path for ingress rules. GCP users should set to `/*`. | `/` |
| `tolerations` | tolerations list | `[]` |
| `config.enabled` | Enable configmap | `false` |
| `config.mountPath` | Path to mount the config | `/sonatype-nexus-conf` |
| `config.data` | Configmap data | `nil` |
| `deployment.annotations` | Annotations to enhance deployment configuration | `{}` |
| `deployment.initContainers` | Init containers to run before main containers | `nil` |
| `deployment.postStart.command` | Command to run after starting the container | `nil` |
| `deployment.terminationGracePeriodSeconds` | Update termination grace period (in seconds) | 120s |
| `deployment.additionalContainers` | Add additional Container | `nil` |
| `deployment.additionalVolumes` | Add additional Volumes | `nil` |
| `deployment.additionalVolumeMounts` | Add additional Volume mounts | `nil` |
| `secret.enabled` | Enable secret | `false` |
| `secret.mountPath` | Path to mount the secret | `/etc/secret-volume` |
| `secret.readOnly` | Secret readonly state | `true` |
| `secret.data` | Secret data | `nil` |
| `service.enabled` | Enable additional service | `true` |
| `service.name` | Service name | `nexus3` |
| `service.labels` | Service labels | `nil` |
| `service.annotations` | Service annotations | `nil` |
| `service.type` | Service Type | `ClusterIP` |
| `route.enabled` | Set to true to create route for additional service | `false` |
| `route.name` | Name of route | `docker` |
| `route.portName` | Target port name of service | `docker` |
| `route.labels` | Labels to be added to route | `{}` |
| `route.annotations` | Annotations to be added to route | `{}` |
| `route.path` | Host name of Route e.g. jenkins.example.com | nil |
| `serviceAccount.create` | Set to true to create ServiceAccount | `true` |
| `serviceAccount.annotations` | Set annotations for ServiceAccount | `{}` |
| `serviceAccount.name` | The name of the service account to use. Auto-generate if not set and create is true. | `{}` |
| `persistence.enabled` | Set false to eliminate persistent storage | `true` |
| `persistence.existingClaim` | Specify the name of an existing persistent volume claim to use instead of creating a new one | nil |
| `persistence.storageSize` | Size of the storage the chart will request | `8Gi` |
### Persistence
By default, a `PersistentVolumeClaim` is created and mounted into the `/nexus-data` directory. In order to disable this functionality, you can change the `values.yaml` to disable persistence, which will use an `emptyDir` instead.
> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."*

View File

@ -48,7 +48,7 @@ spec:
hostAliases:
{{ toYaml .Values.nexus.hostAliases | nindent 8 }}
{{- end }}
{{- with .Values.imagePullSecrets }}
{{- if .Values.nexus.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
@ -59,14 +59,7 @@ spec:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
allowPrivilegeEscalation: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
lifecycle:
{{- if .Values.deployment.postStart.command }}
postStart:

View File

@ -62,9 +62,6 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if $.Values.ingress.ingressClassName }}
ingressClassName: {{ $.Values.ingress.ingressClassName }}
{{- end }}
tls:
- hosts:
- {{ $registry.host | quote }}

View File

@ -36,14 +36,7 @@ tests:
pattern: sonatype/nexus3:3\.\d+\.\d+
- equal:
path: spec.template.spec.containers[0].securityContext
value:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
value: null
- equal:
path: spec.template.spec.containers[0].imagePullPolicy
value: IfNotPresent
@ -51,17 +44,12 @@ tests:
path: spec.template.spec.containers[0].env
value:
- name: INSTALL4J_ADD_VM_PARAMS
value: |-
-Xms2703M -Xmx2703M
-XX:MaxDirectMemorySize=2703M
-XX:+UnlockExperimentalVMOptions
-XX:+UseCGroupMemoryLimitForHeap
-Djava.util.prefs.userRoot=/nexus-data/javaprefs
value: -Xms2703M -Xmx2703M -XX:MaxDirectMemorySize=2703M -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap
- name: NEXUS_SECURITY_RANDOMPASSWORD
value: "true"
- equal:
path: spec.template.spec.containers[0].ports
value:
value:
- containerPort: 8081
name: nexus-ui
- equal:
@ -95,26 +83,3 @@ tests:
- name: nexus-repository-manager-data
persistentVolumeClaim:
claimName: RELEASE-NAME-nexus-repository-manager-data
- equal:
path: spec.template.spec.securityContext
value:
fsGroup: 200
runAsGroup: 200
runAsUser: 200
- it: should use our simple values
template: deployment.yaml
set:
deploymentStrategy: my-strategy
imagePullSecrets:
- name: top-secret
asserts:
- hasDocuments:
count: 1
- equal:
path: spec.strategy.type
value: my-strategy
- equal:
path: spec.template.spec.imagePullSecrets
value:
- name: top-secret

View File

@ -1,4 +1,3 @@
---
suite: ingress
templates:
- ingress.yaml
@ -98,105 +97,7 @@ tests:
equal:
path: metadata.name
value: RELEASE-NAME-nexus-repository-manager
- documentIndex: 0
equal:
path: spec
value:
ingressClassName: nginx
rules:
- host: repo.demo
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: RELEASE-NAME-nexus-repository-manager
port:
number: 8081
- documentIndex: 1
equal:
path: metadata.name
value: RELEASE-NAME-nexus-repository-manager-docker-5000
- documentIndex: 1
equal:
path: spec
value:
ingressClassName: nginx
rules:
- host: docker.repo.demo
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: RELEASE-NAME-nexus-repository-manager-docker-5000
port:
number: 5000
tls:
- hosts:
- docker.repo.demo
secretName: registry-secret
- it: we can exclude ingressClassName for repo ingress and docker ingress
set:
ingress:
enabled: true
ingressClassName: {}
nexus:
docker:
enabled: true
registries:
- host: docker.repo.demo
port: 5000
secretName: registry-secret
asserts:
- hasDocuments:
count: 2
- isKind:
of: Ingress
- equal:
path: apiVersion
value: networking.k8s.io/v1
- equal:
path: metadata.labels.[app.kubernetes.io/instance]
value: RELEASE-NAME
- equal:
path: metadata.labels.[app.kubernetes.io/managed-by]
value: Helm
- matchRegex:
path: metadata.labels.[app.kubernetes.io/version]
pattern: \d+\.\d+\.\d+
- matchRegex:
path: metadata.labels.[helm.sh/chart]
pattern: nexus-repository-manager-\d+\.\d+\.\d+
- equal:
path: metadata.labels.[app.kubernetes.io/name]
value: nexus-repository-manager
- equal:
path: metadata.annotations
value:
nginx.ingress.kubernetes.io/proxy-body-size: "0"
- documentIndex: 0
equal:
path: metadata.name
value: RELEASE-NAME-nexus-repository-manager
- documentIndex: 0
equal:
path: spec
value:
rules:
- host: repo.demo
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: RELEASE-NAME-nexus-repository-manager
port:
number: 8081
- documentIndex: 1
equal:
path: metadata.name
@ -218,8 +119,9 @@ tests:
number: 5000
tls:
- hosts:
- docker.repo.demo
- docker.repo.demo
secretName: registry-secret
- it: is disabled by default
asserts:
- hasDocuments:

View File

@ -2,16 +2,13 @@
statefulset:
# This is not supported
enabled: false
# By default deploymentStrategy is set to rollingUpdate with maxSurge of 25% and maxUnavailable of 25% . you can change type to `Recreate` or can uncomment `rollingUpdate` specification and adjust them to your usage.
deploymentStrategy: Recreate
image:
# Sonatype Official Public Image
repository: sonatype/nexus3
tag: 3.64.0
tag: 3.41.1
pullPolicy: IfNotPresent
imagePullSecrets:
# for image registries that require login, specify the name of the existing
# kubernetes secret
# - name: <pull-secret-name>
nexus:
docker:
@ -19,17 +16,12 @@ nexus:
# registries:
# - host: chart.local
# port: 5000
# secretName: registry-secret
# secretName: registrySecret
env:
# minimum recommended memory settings for a small, person instance from
# https://help.sonatype.com/repomanager3/product-information/system-requirements
- name: INSTALL4J_ADD_VM_PARAMS
value: |-
-Xms2703M -Xmx2703M
-XX:MaxDirectMemorySize=2703M
-XX:+UnlockExperimentalVMOptions
-XX:+UseCGroupMemoryLimitForHeap
-Djava.util.prefs.userRoot=/nexus-data/javaprefs
value: "-Xms2703M -Xmx2703M -XX:MaxDirectMemorySize=2703M -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap"
- name: NEXUS_SECURITY_RANDOMPASSWORD
value: "true"
properties:
@ -80,6 +72,8 @@ nexus:
# - "example.com"
# - "www.example.com"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""

View File

@ -1,8 +1,6 @@
apiVersion: v2
name: nxrm-aws-resiliency
# The nxrm-aws-resiliency chart is deprecated and no longer maintained
deprecated: true
description: DEPRECATED Resilient AWS Deployment of Sonatype Nexus Repository Manager - Universal Binary repository
description: Resilient AWS Deployment of Sonatype Nexus Repository Manager - Universal Binary repository
# A chart can be either an 'application' or a 'library' chart.
#
@ -17,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 64.2.0
version: 41.1.3
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: 3.64.0
appVersion: 3.41.1
keywords:
- artifacts
@ -38,4 +36,6 @@ keywords:
- nexus3
home: https://www.sonatype.com/nexus-repository-oss
icon: https://sonatype.github.io/helm3-charts/NexusRepo_Vertical.svg
maintainers:
- name: Sonatype

View File

@ -12,24 +12,106 @@
Eclipse Foundation. All other trademarks are the property of their respective owners.
-->
# ⚠️ Archive Notice
As of February 9, 2024, we now provide one [HA/Resiliency Helm Chart](https://github.com/sonatype/nxrm3-ha-repository/tree/main/nxrm-ha) that supports both high availability and resilient deployments in AWS, Azure, or on-premises in a Kubernetes cluster. This is our only supported Helm chart for deploying Sonatype Nexus Repository; it requires a PostgreSQL database and a Pro license.
# Helm Chart for a Resilient Nexus Repository Deployment in AWS
# Helm Chart Instructions
This Helm chart configures the Kubernetes resources that are needed for a resilient Nexus Repository deployment on AWS as described in our documented [single-node cloud resilient deployment example using AWS](https://help.sonatype.com/repomanager3/planning-your-implementation/resiliency-and-high-availability/single-node-cloud-resilient-deployment-example-using-aws).
See the [HA/Resiliency Helm Chart in GitHub](https://github.com/sonatype/nxrm3-ha-repository/tree/main/nxrm-ha) for details on the new combined Helm chart.
Detailed Help instructions are also available at the following locations:
* [Single-Node Cloud Resilient Example Using AWS] (https://help.sonatype.com/en/single-node-cloud-resilient-deployment-example-using-aws.html)
* [Single-Node Cloud Resilient Example Using Azure] (https://help.sonatype.com/en/single-node-cloud-resilient-deployment-example-using-azure.html)
* [Single Data Center On-Premises Resilient Example Using Kubernetes] (https://help.sonatype.com/en/single-data-center-on-premises-deployment-example-using-kubernetes.html)
* [High Availability Deployment in AWS] (https://help.sonatype.com/en/option-3---high-availability-deployment-in-amazon-web-services--aws-.html)
* [High Availability Deployment in Azure] (https://help.sonatype.com/en/option-4---high-availability-deployment-in-azure.html)
* [On-Premises High Availability Deployment Using Kubernetes] (https://help.sonatype.com/en/option-2---on-premises-high-availability-deployment-using-kubernetes.html)
Detailed Help instructions are also available at the following locations:
* [Single-Node Cloud Resilient Example Using AWS] (https://help.sonatype.com/en/single-node-cloud-resilient-deployment-example-using-aws.html)
* [Single-Node Cloud Resilient Example Using Azure] (https://help.sonatype.com/en/single-node-cloud-resilient-deployment-example-using-azure.html)
* [Single Data Center On-Premises Resilient Example Using Kubernetes] (https://help.sonatype.com/en/single-data-center-on-premises-deployment-example-using-kubernetes.html)
* [High Availability Deployment in AWS] (https://help.sonatype.com/en/option-3---high-availability-deployment-in-amazon-web-services--aws-.html)
* [High Availability Deployment in Azure] (https://help.sonatype.com/en/option-4---high-availability-deployment-in-azure.html)
* [On-Premises High Availability Deployment Using Kubernetes] (https://help.sonatype.com/en/option-2---on-premises-high-availability-deployment-using-kubernetes.html)
Use the checklist below to determine if this Helm chart is suitable for your deployment needs.
---
## When to Use This Helm Chart
Use this Helm chart if you are doing any of the following:
- Deploying Nexus Repository Pro to an AWS cloud environment with the desire for automatic failover across Availability Zones (AZs) within a single region
- Planning to configure a single Nexus Repository Pro instance within your Kubernetes/EKS cluster with two or more nodes spread across different AZs within an AWS region
- Using an external PostgreSQL database
> **Note**: A Nexus Repository Pro license is required for our resilient deployment options. Your Nexus Repository Pro license file must be stored externally as mounted from AWS Secrets AWS (required).
---
## Prerequisites for This Chart
In order to set up an environment like the one illustrated above and described in this section, you will need the following:
- Kubernetes 1.19+
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
- [Helm 3](https://helm.sh/docs/intro/install/)
- A Nexus Repository Pro license
- An AWS account with permissions for accessing the following AWS services:
- Elastic Kubernetes Service (EKS)
- Relational Database Service (RDS) for PostgreSQL
- Application Load Balancer (ALB)
- CloudWatch
- Simple Storage Service (S3)
- Secrets Manager
You will also need to complete the steps below. See the referenced AWS documentation for detailed configuration steps. Also see [our resiliency documentation](https://help.sonatype.com/repomanager3/planning-your-implementation/resiliency-and-high-availability/single-node-cloud-resilient-deployment-example-using-aws) for more details about why these steps are necessary and how each AWS solution functions within a resilient deployment:
1. Configure an EKS cluster - [AWS documentation for managed nodes (i.e., EC2)](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html)
2. Create an Aurora database cluster - [AWS documentation for creating an Aurora database cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.CreateInstance.html)
3. Deploy the AWS Load Balancer Controller (LBC) to your EKS cluster - [AWS documentation for deploying the AWS LBC to your EKS cluster](https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html)
4. Install AWS Secrets Store CSI drivers - You need to create an IAM service account using the ```eksctl create iamserviceaccount``` command. Before proceeding, read the points below as they contain important required steps to ensure this helm chart will work for you: <br>
- **You must include two additional command parameters when running the command**: ```--role-only``` and ```--namespace <nexusrepo namespace>```
- It is important to include the ```--role-only``` option in the ```eksctl create iamserviceaccount``` command so that the helm chart manages the Kubernetes service account. <br>
- **The namespace you specify to the ```eksctl create iamserviceaccount``` must be the same namespace into which you will deploy the Nexus Repository pod.** <br>
- Although the namespace does not exist at this point, you must specify it as part of the command. **Do not create that namespace manually beforehand**; the helm chart will create and manage it.
- You should specify this same namespace as the value of ```nexusNs``` in your values.yaml. <br>
- Follow the instructions provided in the [AWS Secrets Store CSI drivers documentation](https://github.com/aws/secrets-store-csi-driver-provider-aws/blob/main/README.md) to install the AWS Secrets Store CSI drivers; ensure that you follow the additional instructions in the bullets above when you reach the ```eksctl create iamserviceaccount``` command on that page.
5. Ensure that your EKS nodes are granted CloudWatchFullAccess and CloudWatchAgentServerPolicy IAM policies. This Helm chart will configure Fluentbit for log externalisation to CloudWatch.
- [AWS documentation for setting up Fluentbit](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-EKS.html)
---
## Deployment
1. Add the sonatype repo to your helm:
```helm repo add sonatype https://sonatype.github.io/helm3-charts/ ```
2. Ensure you have updated your values.yaml with appropriate values for your environment.
3. Install the chart using the following:
```helm install nxrm sonatype/nxrm-aws-resiliency -f values.yaml```
4. Get the Nexus Repository link using the following:
```kubectl get ingresses -n nexusrepo```
---
## Health Check
You can use the following commands to perform various health checks:
See a list of releases:
```helm list```
Check pods using the following:
```kubectl get pods -n nexusrepo```
Check the Nexus Repository logs with the following:
```kubectl logs <pod_name> -n nexusrepo nxrm-app```
Check if the pod is OK by using the following; you shouldn't see any error/warning messages:
```kubectl describe pod <pod_name> -n nexusrepo```
Check if ingress is OK using the following:
```kubectl describe ingress <ingress_name> -n nexusrepo```
Check that the Fluent Bit pod is sending events to CloudWatch using the following:
```kubectl logs -n amazon-cloudwatch <fluent-bit pod id>```
If the above returns without error, then check CloudWatch for the ```/aws/containerinsights/<eks cluster name>/nexus-logs``` log group, which should contain four log streams.
---
## Uninstall
To uninstall the deployment, use the following:
```helm uninstall nxrm```
After removing the deployment, ensure that the namespace is deleted and that Nexus Repository is not listed when using the following:
```helm list```

View File

@ -70,7 +70,7 @@ spec:
- name: NEXUS_SECURITY_RANDOMPASSWORD
value: "false"
- name: INSTALL4J_ADD_VM_PARAMS
value: "{{ .Values.deployment.container.env.install4jAddVmParams }} -Dnexus.licenseFile=/nxrm-secrets/{{ .Values.secret.license.alias }} \
value: "-Xms2703m -Xmx2703m -XX:MaxDirectMemorySize=2703m -Dnexus.licenseFile=/nxrm-secrets/{{ .Values.secret.license.alias }} \
-Dnexus.datastore.enabled=true -Djava.util.prefs.userRoot=${NEXUS_DATA}/javaprefs \
-Dnexus.datastore.nexus.jdbcUrl=jdbc:postgresql://${DB_HOST}:{{ .Values.deployment.container.env.nexusDBPort }}/${DB_NAME} \
-Dnexus.datastore.nexus.username=${DB_USER} \

View File

@ -1,4 +1,4 @@
{{- if .Values.externaldns.enabled }}
# comment out sa if it was previously created
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
@ -64,4 +64,3 @@ spec:
env:
- name: AWS_DEFAULT_REGION
value: {{ .Values.deployment.clusterRegion }}
{{- end }}

View File

@ -1,4 +1,3 @@
{{- if .Values.fluentbit.enabled -}}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -78,7 +77,7 @@ data:
[INPUT]
Name tail
Tag nexus.nexus-log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-{{ .Values.deployment.name }}*{{ .Values.namespaces.nexusNs }}_nxrm-app-*.log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-nxrm.deployment*{{ .Values.namespaces.nexusNs }}_nxrm-app-*.log
Parser docker
DB /var/fluent-bit/state/flb_container.db
Mem_Buf_Limit 5MB
@ -113,7 +112,7 @@ data:
[INPUT]
Name tail
Tag nexus.request-log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-{{ .Values.deployment.name }}*{{ .Values.namespaces.nexusNs }}_request-log-*.log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-nxrm.deployment*{{ .Values.namespaces.nexusNs }}_request-log-*.log
Parser docker
DB /var/fluent-bit/state/flb_container.db
Mem_Buf_Limit 5MB
@ -148,7 +147,7 @@ data:
[INPUT]
Name tail
Tag nexus.audit-log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-{{ .Values.deployment.name }}*{{ .Values.namespaces.nexusNs }}_audit-log-*.log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-nxrm.deployment*{{ .Values.namespaces.nexusNs }}_audit-log-*.log
Parser docker
DB /var/fluent-bit/state/flb_container.db
Mem_Buf_Limit 5MB
@ -183,7 +182,7 @@ data:
[INPUT]
Name tail
Tag nexus.tasks-log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-{{ .Values.deployment.name }}*{{ .Values.namespaces.nexusNs }}_tasks-log-*.log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-nxrm.deployment*{{ .Values.namespaces.nexusNs }}_tasks-log-*.log
Parser docker
DB /var/fluent-bit/state/flb_container.db
Mem_Buf_Limit 5MB
@ -358,5 +357,4 @@ spec:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
{{- end }}
effect: "NoSchedule"

View File

@ -24,7 +24,6 @@ spec:
port:
number: {{ .Values.service.nexus.port }}
---
{{- if .Values.ingress.dockerIngress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
@ -50,4 +49,3 @@ spec:
name: {{ .Chart.Name }}-docker-service
port:
number: {{ .Values.service.docker.port }}
{{- end }}

View File

@ -3,16 +3,13 @@ kind: Namespace
metadata:
name: {{ .Values.namespaces.nexusNs }}
---
{{- if .Values.fluentbit.enabled }}
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Values.namespaces.cloudwatchNs }}
{{- end }}
---
{{- if .Values.externaldns.enabled }}
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Values.namespaces.externaldnsNs }}
{{- end }}
---

View File

@ -6,7 +6,6 @@ metadata:
annotations:
eks.amazonaws.com/role-arn: {{ .Values.serviceAccount.role }}
---
{{- if .Values.externaldns.enabled }}
apiVersion: v1
kind: ServiceAccount
metadata:
@ -14,4 +13,4 @@ metadata:
namespace: {{ .Values.namespaces.externaldnsNs }}
annotations:
eks.amazonaws.com/role-arn: {{ .Values.serviceAccount.externaldns.role }}
{{- end }}
---

View File

@ -14,7 +14,6 @@ spec:
port: {{ .Values.service.nexus.port }}
targetPort: {{ .Values.service.nexus.targetPort }}
---
{{- if .Values.service.docker.enabled -}}
apiVersion: v1
kind: Service
metadata:
@ -31,4 +30,3 @@ spec:
protocol: {{ .Values.service.docker.protocol }}
port: {{ .Values.service.docker.port }}
targetPort: {{ .Values.service.docker.targetPort }}
{{- end }}

View File

@ -1,11 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.workdir.configmap.name }}
namespace: {{ .Values.namespaces.nexusNs }}
data:
create-nexus-work-dir.sh: |
#!/bin/bash
# Make Nexus Repository Manager work directory
mkdir -p /nexus-repo-mgr-work-dir/work

View File

@ -1,51 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ .Values.workdir.daemonset.name }}
namespace: {{ .Values.namespaces.nexusNs }}
spec:
selector:
matchLabels:
job: dircreator
template:
metadata:
labels:
job: dircreator
spec:
hostPID: true
restartPolicy: Always
initContainers:
# Copy file for creating nexus work directory over and execute it on host
- name: create-nexus-work-dir
image: ubuntu:23.04
command: [/bin/sh]
args:
- -c
- >-
cp /tmp/create-nexus-work-dir.sh /host-dir &&
/usr/bin/nsenter -m/proc/1/ns/mnt -- chmod u+x /tmp/install/create-nexus-work-dir.sh &&
/usr/bin/nsenter -m/proc/1/ns/mnt /tmp/install/create-nexus-work-dir.sh
securityContext:
privileged: true
volumeMounts:
- name: create-nexus-work-dir-script
mountPath: /tmp
- name: host-mnt
mountPath: /host-dir
containers:
- name: directory-creator
image: busybox:1.33.1
command: ["/bin/sh"]
args:
- -c
- >-
tail -f /dev/null
securityContext:
privileged: true
volumes:
- name: create-nexus-work-dir-script
configMap:
name: {{ .Values.workdir.configmap.name }}
- name: host-mnt
hostPath:
path: /tmp/install

View File

@ -4,18 +4,14 @@ namespaces:
cloudwatchNs: amazon-cloudwatch
externaldnsNs: nexus-externaldns
externaldns:
enabled: false
domainFilter: example.com #your root domain e.g example.com
awsZoneType: private # hosted zone to look at (valid values are public, private or no value for both)
fluentbit:
enabled: false
deployment:
clusterRegion: us-east-1
name: nxrm.deployment
clusterName: nxrm-nexus
logsRegion: us-east-1
fluentBitVersion: 2.28.0
replicaCount: 1
initContainer:
image:
repository: busybox
@ -23,13 +19,12 @@ deployment:
container:
image:
repository: sonatype/nexus3
tag: 3.45.1
tag: 3.41.1
containerPort: 8081
pullPolicy: IfNotPresent
env:
nexusDBName: nexus
nexusDBPort: 3306
install4jAddVmParams: "-Xms2703m -Xmx2703m"
requestLogContainer:
image:
repository: busybox
@ -52,33 +47,24 @@ ingress:
#host: "example.com" #host to apply this ingress rule to. Uncomment this in your values.yaml and set it as you wish
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/healthcheck-path: /service/rest/v1/status
alb.ingress.kubernetes.io/scheme: internal # scheme
alb.ingress.kubernetes.io/subnets: subnet-1,subnet-2 #comma separated list of subnet ids
#alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]' uncomment for https
#alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:0000000000000:certificate/00000000-1111-2222-3333-444444444444 # Uncomment for https. The AWS Certificate Manager ARN for your HTTPS certificate
dockerIngress: #Ingress for Docker Connector - comment out if you don't use docker repositories
enabled: false
alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]'
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:0000000000000:certificate/00000000-1111-2222-3333-444444444444 # The AWS Certificate Manager ARN for your HTTPS certificate
dockerIngress: #Ingress for Docker Connector - comment out if you don't use docker repositories
annotations:
kubernetes.io/ingress.class: alb # comment out if you don't use docker repositories
alb.ingress.kubernetes.io/scheme: internal # scheme comment out if you don't use docker repositories
alb.ingress.kubernetes.io/subnets: subnet-1,subnet-2 #comma separated list of subnet ids, comment out if you don't use docker repositories
# alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]' #uncomment if you use docker repositories
# alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:0000000000000:certificate/00000000-1111-2222-3333-444444444444 # Uncomment if you use docker repositories - The AWS Certificate Manager ARN for your HTTPS certificate
# external-dns.alpha.kubernetes.io/hostname: dockerrepo1.example.com, dockerrepo2.example.com, dockerrepo3.example.com # Add more docker subdomains using dockerrepoName.example.com othereise comment out if you don't use docker repositories
workdir:
configmap:
name: create-nexus-workdir-config
daemonset:
name: create-nexus-work-dir
storageClass:
iopsPerGB: "10" #Note: aws plugin multiplies this by the size of the requested volumne to compute IOPS of the volumne and caps it a 20, 000 IOPS
alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]' #comment out if you don't use docker repositories
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:0000000000000:certificate/00000000-1111-2222-3333-444444444444 # Comment out if you don't use docker repositories - The AWS Certificate Manager ARN for your HTTPS certificate
external-dns.alpha.kubernetes.io/hostname: dockerrepo1.example.com, dockerrepo2.example.com, dockerrepo3.example.com # Add more docker subdomains using dockerrepoName.example.com othereise comment out if you don't use docker repositories
pv:
storage: 120Gi
volumeMode: Filesystem
accessModes: ReadWriteOnce
reclaimPolicy: Retain
path: /nexus-repo-mgr-work-dir/work
path: /mnt
zones:
zone1: us-east-1a
zone2: us-east-1b
@ -86,22 +72,21 @@ pvc:
accessModes: ReadWriteOnce
storage: 100Gi
service: #Nexus Repo NodePort Service
service: #Nexus Repo NodePort Service
nexus:
type: NodePort
protocol: TCP
port: 80
targetPort: 8081
docker: #Nodeport Service for Docker Service
enabled: false
type: NodePort
protocol: TCP
port: 9090
targetPort: 8081
type: NodePort
protocol: TCP
port: 80
targetPort: 8081
docker: #Nodeport Service for Docker Service
type: NodePort
protocol: TCP
port: 9090
targetPort: 8081
secret:
license:
arn: arn:aws:secretsmanager:us-east-1:000000000000:secret:nxrm-nexus-license
alias: nxrm-license.lic
arn: arn:aws:secretsmanager:us-east-1:000000000000:secret:nxrm-nexus-license
alias: nxrm-license.lic
rds:
arn: arn:aws:secretsmanager:us-east-1:000000000000:secret:nxrmrds-cred-nexus
adminpassword: