Compare commits

...

64 Commits

Author SHA1 Message Date
a566f84674 Release Update for 54.1.0 2023-05-18 20:29:53 +00:00
01dd44acdc Release Update for 54.0.0 2023-05-18 15:45:04 +00:00
1014e66058 Release Update for 53.1.0 2023-05-13 02:57:17 +00:00
9b42daf4c0 Release Update for 53.0.0 2023-05-02 22:39:59 +00:00
a532a5eabc Release Update for 52.0.0 2023-04-18 21:16:24 +00:00
9831de32bc Merge pull request #47 from sonatype/pin-unittest-version
Pin helm-unittest to 0.2.11
2023-04-18 16:14:40 -05:00
f3a0053f4c Pin helm-unittest to 0.2.11 2023-04-18 16:11:11 -05:00
eaff5b490c Release Update for 51.0.0 2023-04-04 20:28:21 +00:00
90921100be Release Update for 50.0.0 2023-03-27 14:40:37 +00:00
cc5a53e5a4 Merge pull request #45 from sonatype/NEXUS-38174
fix fluent bit path
2023-03-24 09:01:35 +00:00
0cb275e219 fix fluent bit path 2023-03-23 12:55:53 +00:00
0462b7eb43 Release Update for 49.0.0 2023-03-06 18:33:41 +00:00
d1d2971125 Release Update for 48.0.0 2023-02-27 15:57:25 +00:00
616ecbc831 Release Update for 47.1.0 2023-02-13 18:07:33 +00:00
61c0cb0eb3 Delete nxrm-aws-resiliency-47.0.0.tgz 2023-02-13 12:57:52 -05:00
ce20f243b6 Delete nexus-repository-manager-47.0.0.tgz 2023-02-13 12:53:49 -05:00
3487c9fdb6 Release Update for 47.0.0 2023-02-07 21:53:31 +00:00
f166861198 Merge pull request #41 from sonatype/specify-custom-folder-for-nexus-data
Create directory for nexus-data on node (i.e. ec2) startup
2023-01-31 11:25:58 +00:00
00dfee338c externalise config map name 2023-01-31 11:04:24 +00:00
560b9f1ff6 Release Update for 46.0.0 2023-01-30 16:59:06 +00:00
35fb1119fb trunc to 63 characters 2023-01-27 12:26:36 +00:00
8396c0de20 Don't use mnt for nexus-data 2023-01-27 11:45:26 +00:00
541e70232b Release Update for 45.1.0 2023-01-17 19:47:06 +00:00
7ef8c04eef Release Update for 45.0.0 2022-12-28 08:17:36 +00:00
43580c8a5d Release Update for 44.0.0 2022-12-14 16:45:34 +00:00
d27891b463 remove erroneous link 2022-12-08 10:58:53 -05:00
53f8dcfa69 Merge pull request #33 from sonatype/Readme-Update
Update README
2022-12-08 09:44:07 -05:00
55a17e0b76 Update README.md 2022-12-08 09:43:44 -05:00
7d3dcf6fe1 Update README.md 2022-12-08 09:42:55 -05:00
c8b1ad3059 INT-7432 security context for openshift (#34)
OpenShift requires the red hat image (optional)
and these security settings to alleviate warnings.

These changes are fine for other k8s implementations
like minikube using the stock container from docker hub.
2022-11-17 11:05:22 -05:00
990728c288 Update README.md 2022-11-16 10:09:13 -05:00
4b62f278ef Update README.md 2022-11-16 10:08:19 -05:00
28f4e871e5 Update README.md 2022-11-15 10:32:40 -05:00
e2b335d84b Update README.md 2022-11-15 10:32:24 -05:00
e5ea67a8b4 Update README.md 2022-11-15 10:23:53 -05:00
cb992b92e5 Update README 2022-11-15 10:22:20 -05:00
7dd8cd5112 Release Update for 43.0.0 2022-11-07 16:44:38 +00:00
5db6031514 Wording tweak per Vijay 2022-10-26 12:15:37 -04:00
70d639ca4e Merge pull request #29 from sonatype/end-support-chart
End of support for chart
2022-10-25 12:33:45 -04:00
95b8a984ca upgrade to helm 3.10.1 (#31)
latest helm for packaging and testing.
2022-10-25 12:28:21 -04:00
3f0979d531 Update README.md 2022-10-21 12:42:27 -04:00
c164995fbf End of support for chart 2022-10-21 12:41:09 -04:00
3a22af41bd NEXUS-35492 - Allow configuring install4j vm arguments (#25) 2022-10-05 12:42:26 -04:00
59e8ebcd98 Merge pull request #26 from sonatype/fix_broken_link
Fix broken link
2022-09-30 14:37:55 +01:00
5d0bfa8e4e fix link 2022-09-30 10:56:38 +01:00
4b4bbd13f3 Release Update for 42.0.1 2022-09-28 15:38:44 +00:00
77aac91a9f Release Update for 42.0.0 2022-09-28 14:28:27 +00:00
0c51e4c7be Merge pull request #22 from sonatype/NEXUS-34974-publish-nxrm-helm-charts 2022-09-28 16:35:17 +03:00
0adb4a9fc7 uncomment the tests related code in Jenkinsfile 2022-09-20 10:43:08 +03:00
1a821d1032 uncomment the tests 2022-09-19 12:41:25 +03:00
1fa72df38c license header added to README.md 2022-09-14 11:26:56 +03:00
8123c3db21 NEXUS-34974 Publish nxrm helm charts to Sonatype helm repo and Artifact Hub 2022-09-13 15:33:07 +03:00
595206fdc9 Merge pull request #21 from sonatype/fix-build
fix build
2022-09-02 16:12:52 +01:00
7d1dfaa1bd Update README.md 2022-09-02 10:43:11 -04:00
de46a3ca1f fix build 2022-09-02 13:06:18 +01:00
298a49e994 Merge pull request #20 from sonatype/fix-build
fix build
2022-09-02 12:53:26 +01:00
786e5717d2 fix build 2022-09-02 12:50:47 +01:00
6ed696ec15 Merge pull request #19 from sonatype/NEXUS-35078-Remove-nexus-repository-manager-helm3-charts
Remove non resiliency helm3-charts
2022-09-02 12:41:59 +01:00
e82e7a3208 Remove non resiliency helm3-charts 2022-09-02 11:23:27 +01:00
d9da79bb8d Update README.md
wording cleanup
2022-08-30 15:29:11 -04:00
670344d45a Merge pull request #15 from sonatype/update-readme
NEXUS-34871 - readme update
2022-08-30 15:15:32 -04:00
b5168a2dc3 Merge pull request #16 from sonatype/lisadurant-patch-2
readme update
2022-08-30 15:09:58 -04:00
1b4585d89c readme update 2022-08-30 19:32:36 +01:00
4b91e48ef5 readme update 2022-08-30 19:30:05 +01:00
57 changed files with 434 additions and 64 deletions

View File

@ -11,7 +11,7 @@
# Eclipse Foundation. All other trademarks are the property of their respective owners.
#
FROM docker-all.repo.sonatype.com/alpine/helm:3.9.3
FROM docker-all.repo.sonatype.com/alpine/helm:3.10.1
RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh

View File

@ -17,16 +17,6 @@ final jira = [
credentialId : 'jenkins-jira', autoRelease: true, failOnError: true
]
final jiraVersionMappings = [
'nexus-repository-manager': 'helm-nxrm',
'nxrm-aws-resiliency': 'helm-nxrm-aws-resiliency'
]
final chartLocation = [
'nexus-repository-manager': 'nexus-repository-manager',
'nxrm-aws-resiliency': 'nxrm-aws-resiliency'
]
properties([
parameters([
string(
@ -54,8 +44,9 @@ dockerizedBuildPipeline(
runSafely "git checkout ${gitBranch(env)}"
runSafely "./upgrade.sh ./nexus-repository-manager ${chartVersion} ${params.appVersion}"
runSafely "./upgrade.sh ./nxrm-aws-resiliency ${chartVersion} ${params.appVersion}"
runSafely './build.sh'
runSafely 'git add nxrm-aws-resiliency nexus-repository-manager'
runSafely './build.sh'
runSafely 'git add nxrm-aws-resiliency'
runSafely 'git add nexus-repository-manager'
},
skipVulnerabilityScan: true,
archiveArtifacts: 'docs/*',

View File

@ -12,6 +12,15 @@
Eclipse Foundation. All other trademarks are the property of their respective owners.
-->
# ⚠️ Archive Notice
As of October 24, 2023, we will no longer update or support the [Single-Instance OSS/Pro Helm Chart](https://github.com/sonatype/nxrm3-helm-repository/tree/main/nexus-repository-manager).
Deploying Nexus Repository in containers with an embedded database has been known to corrupt the database under some circumstances. We strongly recommend that you use an external PostgreSQL database for Kubernetes deployments.
If you are deploying in AWS, you can use our [AWS Helm chart](https://github.com/sonatype/nxrm3-helm-repository/tree/main/nxrm-aws-resiliency) to deploy Nexus Repository in an EKS cluster.
We do not currently provide Helm charts for on-premises deployments using PostgreSQL. For those wishing to deploy on premises, see our [Single Data Center On-Premises Deployment Example Using Kubernetes documentation](https://help.sonatype.com/repomanager3/planning-your-implementation/resiliency-and-high-availability/single-data-center-on-premises-deployment-example-using-kubernetes) for information and sample YAMLs to help you plan a resilient on-premises deployment.
## Helm Charts for Sonatype Nexus Repository Manager 3
@ -22,7 +31,7 @@ See the [AWS Single-Instance Resiliency Chart](https://github.com/sonatype/nxrm3
* Planning to configure a single Nexus Repository Pro instance within your Kubernetes/EKS cluster with two or more nodes spread across different AZs within an AWS region
* Using an external PostgreSQL database (required)
See the [Single-Instance OSS/Pro Kubernetes Chart](https://github.com/sonatype/nxrm3-helm-repository/tree/main/nexus-repository-manager) if you are doing the following:
See the [Single-Instance OSS/Pro Helm Chart](https://github.com/sonatype/nxrm3-helm-repository/tree/main/nexus-repository-manager) if you are doing the following:
* Using embedded OrientDB (required)
* Deploying either Nexus Repository Pro or OSS to an on-premises environment with bare metal/VM server (Node)
* Deploying a single Nexus Repository instance within a Kubernetes cluster that has a single Node configured

View File

@ -12,7 +12,7 @@
# Eclipse Foundation. All other trademarks are the property of their respective owners.
#
helm plugin install https://github.com/quintush/helm-unittest
helm plugin install --version "0.2.11" https://github.com/quintush/helm-unittest
set -e

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,24 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# OWNERS file for Kubernetes
OWNERS
*.tar

View File

@ -3,10 +3,10 @@ name: nexus-repository-manager
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 41.1.3
version: 54.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 3.41.1
appVersion: 3.54.1
description: Sonatype Nexus Repository Manager - Universal Binary repository

View File

@ -12,6 +12,15 @@
Eclipse Foundation. All other trademarks are the property of their respective owners.
-->
# ⚠️ Archive Notice
As of October 24, 2023, we will no longer update or support this Helm chart.
Deploying Nexus Repository in containers with an embedded database has been known to corrupt the database under some circumstances. We strongly recommend that you use an external PostgreSQL database for Kubernetes deployments.
If you are deploying in AWS, you can use our [AWS Helm chart](https://github.com/sonatype/nxrm3-helm-repository/tree/main/nxrm-aws-resiliency) to deploy Nexus Repository in an EKS cluster.
We do not currently provide Helm charts for on-premises deployments using PostgreSQL. For those wishing to deploy on premises, see our [Single Data Center On-Premises Deployment Example Using Kubernetes documentation](https://help.sonatype.com/repomanager3/planning-your-implementation/resiliency-and-high-availability/single-data-center-on-premises-deployment-example-using-kubernetes) for information and sample YAMLs to help you plan a resilient on-premises deployment.
# Nexus Repository
@ -67,14 +76,9 @@ Do not use this Helm chart and, instead, refer to our [resiliency documentation]
By default, this Chart uses Sonatype's Public Docker image. If you want to use a different image, run with the following: `--set nexus.imageName=<my>/<image>`.
### With Red Hat Certified container
## Adding the Sonatype Repository to your Helm
If you're looking run our Certified Red Hat image in an OpenShift4 environment, there is a Certified Operator in OperatorHub.
---
## Adding the repo
To add as a Helm Repo, use the following:
To add as a Helm Repo
```helm repo add sonatype https://sonatype.github.io/helm3-charts/```
---
@ -111,6 +115,7 @@ The default login is randomized and can be found in `/nexus-data/admin.password`
by setting the environment variable `NEXUS_SECURITY_RANDOMPASSWORD` to `false` in your `values.yaml`.
---
## Uninstalling the Chart
To uninstall/delete the deployment, use the following:
@ -133,16 +138,16 @@ The following table lists the configurable parameters of the Nexus chart and the
| Parameter | Description | Default |
|--------------------------------------------|----------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------|
| `deploymentStrategy` | Deployment Strategy | `Recreate` |
| `nexus.imagePullPolicy` | Nexus Repository image pull policy | `IfNotPresent` |
| `nexus.imagePullSecrets` | Secret to download Nexus Repository image from private registry | `nil` |
| `nexus.imagePullPolicy` | Nexus Repository image pull policy | `IfNotPresent` |
| `imagePullSecrets` | The names of the kubernetes secrets with credentials to login to a registry | `[]` |
| `nexus.docker.enabled` | Enable/disable Docker support | `false` |
| `nexus.docker.registries` | Support multiple Docker registries | (see below) |
| `nexus.docker.registries[0].host` | Host for the Docker registry | `cluster.local` |
| `nexus.docker.registries[0].port` | Port for the Docker registry | `5000` |
| `nexus.docker.registries[0].secretName` | TLS Secret Name for the ingress | `registrySecret` |
| `nexus.env` | Nexus Repository environment variables | `[{INSTALL4J_ADD_VM_PARAMS: -Xms1200M -Xmx1200M -XX:MaxDirectMemorySize=2G -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap}]` |
| `nexus.resources` | Nexus Repository resource requests and limits | `{}` |
| `nexus.nexusPort` | Internal port for Nexus Repository service | `8081` |
| `nexus.env` | Nexus Repository environment variables | `[{INSTALL4J_ADD_VM_PARAMS: -Xms1200M -Xmx1200M -XX:MaxDirectMemorySize=2G -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap}]` |
| `nexus.resources` | Nexus Repository resource requests and limits | `{}` |
| `nexus.nexusPort` | Internal port for Nexus Repository service | `8081` |
| `nexus.securityContext` | Security Context (for enabling official image use `fsGroup: 2000`) | `{}` |
| `nexus.labels` | Service labels | `{}` |
| `nexus.podAnnotations` | Pod Annotations | `{}` |
@ -159,17 +164,17 @@ The following table lists the configurable parameters of the Nexus chart and the
| `nexus.hostAliases` | Aliases for IPs in /etc/hosts | [] |
| `nexus.properties.override` | Set to true to override default nexus.properties | `false` |
| `nexus.properties.data` | A map of custom nexus properties if `override` is set to true | `nexus.scripts.allowCreation: true` |
| `ingress.enabled` | Create an ingress for Nexus Repository | `true` |
| `ingress.enabled` | Create an ingress for Nexus Repository | `false` |
| `ingress.annotations` | Annotations to enhance ingress configuration | `{kubernetes.io/ingress.class: nginx}` |
| `ingress.tls.secretName` | Name of the secret storing TLS cert, `false` to use the Ingress' default certificate | `nexus-tls` |
| `ingress.path` | Path for ingress rules. GCP users should set to `/*`. | `/` |
| `ingress.path` | Path for ingress rules. GCP users should set to `/*`. | `/` |
| `tolerations` | tolerations list | `[]` |
| `config.enabled` | Enable configmap | `false` |
| `config.mountPath` | Path to mount the config | `/sonatype-nexus-conf` |
| `config.data` | Configmap data | `nil` |
| `deployment.annotations` | Annotations to enhance deployment configuration | `{}` |
| `deployment.initContainers` | Init containers to run before main containers | `nil` |
| `deployment.postStart.command` | Command to run after starting the container | `nil` |
| `deployment.postStart.command` | Command to run after starting the container | `nil` |
| `deployment.terminationGracePeriodSeconds` | Update termination grace period (in seconds) | 120s |
| `deployment.additionalContainers` | Add additional Container | `nil` |
| `deployment.additionalVolumes` | Add additional Volumes | `nil` |
@ -188,16 +193,44 @@ The following table lists the configurable parameters of the Nexus chart and the
| `route.portName` | Target port name of service | `docker` |
| `route.labels` | Labels to be added to route | `{}` |
| `route.annotations` | Annotations to be added to route | `{}` |
| `route.path` | Host name of Route e.g. jenkins.example.com | nil |
| `route.path` | Host name of Route e.g. jenkins.example.com | nil |
| `serviceAccount.create` | Set to true to create ServiceAccount | `true` |
| `serviceAccount.annotations` | Set annotations for ServiceAccount | `{}` |
| `serviceAccount.name` | The name of the service account to use. Auto-generate if not set and create is true. | `{}` |
| `serviceAccount.name` | The name of the service account to use. Auto-generate if not set and create is true. | `{}` |
| `persistence.enabled` | Set false to eliminate persistent storage | `true` |
| `persistence.existingClaim` | Specify the name of an existing persistent volume claim to use instead of creating a new one | nil |
| `persistence.storageSize` | Size of the storage the chart will request | `8Gi` |
| `persistence.storageSize` | Size of the storage the chart will request | `8Gi` |
### Persistence
By default, a `PersistentVolumeClaim` is created and mounted into the `/nexus-data` directory. In order to disable this functionality, you can change the `values.yaml` to disable persistence, which will use an `emptyDir` instead.
> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."*
## Using the Image from the Red Hat Registry
To use the [Nexus Repository Manager image available from Red Hat's registry](https://catalog.redhat.com/software/containers/sonatype/nexus-repository-manager/594c281c1fbe9847af657690),
you'll need to:
* Load the credentials for the registry as a secret in your cluster
```shell
kubectl create secret docker-registry redhat-pull-secret \
--docker-server=registry.connect.redhat.com \
--docker-username=<user_name> \
--docker-password=<password> \
--docker-email=<email>
```
See Red Hat's [Registry Authentication documentation](https://access.redhat.com/RegistryAuthentication)
for further details.
* Provide the name of the secret in `imagePullSecrets` in this chart's `values.yaml`
```yaml
imagePullSecrets:
- name: redhat-pull-secret
```
* Set `image.name` and `image.tag` in `values.yaml`
```yaml
image:
repository: registry.connect.redhat.com/sonatype/nexus-repository-server
tag: 3.39.0-ubi-1
```
---

View File

@ -48,7 +48,7 @@ spec:
hostAliases:
{{ toYaml .Values.nexus.hostAliases | nindent 8 }}
{{- end }}
{{- if .Values.nexus.imagePullSecrets }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
@ -59,7 +59,14 @@ spec:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
allowPrivilegeEscalation: false
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
lifecycle:
{{- if .Values.deployment.postStart.command }}
postStart:

View File

@ -62,6 +62,9 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if $.Values.ingress.ingressClassName }}
ingressClassName: {{ $.Values.ingress.ingressClassName }}
{{- end }}
tls:
- hosts:
- {{ $registry.host | quote }}

View File

@ -36,7 +36,14 @@ tests:
pattern: sonatype/nexus3:3\.\d+\.\d+
- equal:
path: spec.template.spec.containers[0].securityContext
value: null
value:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
- equal:
path: spec.template.spec.containers[0].imagePullPolicy
value: IfNotPresent
@ -44,12 +51,17 @@ tests:
path: spec.template.spec.containers[0].env
value:
- name: INSTALL4J_ADD_VM_PARAMS
value: -Xms2703M -Xmx2703M -XX:MaxDirectMemorySize=2703M -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap
value: |-
-Xms2703M -Xmx2703M
-XX:MaxDirectMemorySize=2703M
-XX:+UnlockExperimentalVMOptions
-XX:+UseCGroupMemoryLimitForHeap
-Djava.util.prefs.userRoot=/nexus-data/javaprefs
- name: NEXUS_SECURITY_RANDOMPASSWORD
value: "true"
- equal:
path: spec.template.spec.containers[0].ports
value:
value:
- containerPort: 8081
name: nexus-ui
- equal:
@ -83,3 +95,26 @@ tests:
- name: nexus-repository-manager-data
persistentVolumeClaim:
claimName: RELEASE-NAME-nexus-repository-manager-data
- equal:
path: spec.template.spec.securityContext
value:
fsGroup: 200
runAsGroup: 200
runAsUser: 200
- it: should use our simple values
template: deployment.yaml
set:
deploymentStrategy: my-strategy
imagePullSecrets:
- name: top-secret
asserts:
- hasDocuments:
count: 1
- equal:
path: spec.strategy.type
value: my-strategy
- equal:
path: spec.template.spec.imagePullSecrets
value:
- name: top-secret

View File

@ -1,3 +1,4 @@
---
suite: ingress
templates:
- ingress.yaml
@ -97,7 +98,105 @@ tests:
equal:
path: metadata.name
value: RELEASE-NAME-nexus-repository-manager
- documentIndex: 0
equal:
path: spec
value:
ingressClassName: nginx
rules:
- host: repo.demo
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: RELEASE-NAME-nexus-repository-manager
port:
number: 8081
- documentIndex: 1
equal:
path: metadata.name
value: RELEASE-NAME-nexus-repository-manager-docker-5000
- documentIndex: 1
equal:
path: spec
value:
ingressClassName: nginx
rules:
- host: docker.repo.demo
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: RELEASE-NAME-nexus-repository-manager-docker-5000
port:
number: 5000
tls:
- hosts:
- docker.repo.demo
secretName: registry-secret
- it: we can exclude ingressClassName for repo ingress and docker ingress
set:
ingress:
enabled: true
ingressClassName: {}
nexus:
docker:
enabled: true
registries:
- host: docker.repo.demo
port: 5000
secretName: registry-secret
asserts:
- hasDocuments:
count: 2
- isKind:
of: Ingress
- equal:
path: apiVersion
value: networking.k8s.io/v1
- equal:
path: metadata.labels.[app.kubernetes.io/instance]
value: RELEASE-NAME
- equal:
path: metadata.labels.[app.kubernetes.io/managed-by]
value: Helm
- matchRegex:
path: metadata.labels.[app.kubernetes.io/version]
pattern: \d+\.\d+\.\d+
- matchRegex:
path: metadata.labels.[helm.sh/chart]
pattern: nexus-repository-manager-\d+\.\d+\.\d+
- equal:
path: metadata.labels.[app.kubernetes.io/name]
value: nexus-repository-manager
- equal:
path: metadata.annotations
value:
nginx.ingress.kubernetes.io/proxy-body-size: "0"
- documentIndex: 0
equal:
path: metadata.name
value: RELEASE-NAME-nexus-repository-manager
- documentIndex: 0
equal:
path: spec
value:
rules:
- host: repo.demo
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: RELEASE-NAME-nexus-repository-manager
port:
number: 8081
- documentIndex: 1
equal:
path: metadata.name
@ -119,9 +218,8 @@ tests:
number: 5000
tls:
- hosts:
- docker.repo.demo
- docker.repo.demo
secretName: registry-secret
- it: is disabled by default
asserts:
- hasDocuments:

View File

@ -2,13 +2,16 @@
statefulset:
# This is not supported
enabled: false
# By default deploymentStrategy is set to rollingUpdate with maxSurge of 25% and maxUnavailable of 25% . you can change type to `Recreate` or can uncomment `rollingUpdate` specification and adjust them to your usage.
deploymentStrategy: Recreate
image:
# Sonatype Official Public Image
repository: sonatype/nexus3
tag: 3.41.1
tag: 3.54.1
pullPolicy: IfNotPresent
imagePullSecrets:
# for image registries that require login, specify the name of the existing
# kubernetes secret
# - name: <pull-secret-name>
nexus:
docker:
@ -16,12 +19,17 @@ nexus:
# registries:
# - host: chart.local
# port: 5000
# secretName: registrySecret
# secretName: registry-secret
env:
# minimum recommended memory settings for a small, person instance from
# https://help.sonatype.com/repomanager3/product-information/system-requirements
- name: INSTALL4J_ADD_VM_PARAMS
value: "-Xms2703M -Xmx2703M -XX:MaxDirectMemorySize=2703M -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap"
value: |-
-Xms2703M -Xmx2703M
-XX:MaxDirectMemorySize=2703M
-XX:+UnlockExperimentalVMOptions
-XX:+UseCGroupMemoryLimitForHeap
-Djava.util.prefs.userRoot=/nexus-data/javaprefs
- name: NEXUS_SECURITY_RANDOMPASSWORD
value: "true"
properties:
@ -72,8 +80,6 @@ nexus:
# - "example.com"
# - "www.example.com"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""

View File

@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 41.1.3
version: 54.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: 3.41.1
appVersion: 3.54.1
keywords:
- artifacts

View File

@ -62,6 +62,98 @@ You will also need to complete the steps below. See the referenced AWS documenta
---
## External-dns
This helm chart uses [external-dns](https://github.com/kubernetes-sigs/external-dns) to create 'A' records in AWS Route 53 for our [Docker subdomain feature](https://help.sonatype.com/repomanager3/nexus-repository-administration/formats/docker-registry/docker-subdomain-connector).
See the ```external-dns.alpha.kubernetes.io/hostname``` annotation in the dockerIngress resource in the values.yaml.
### Permissions for external-dns
Open a terminal that has connectivity to your EKS cluster and run the following commands:
```
cat <<'EOF' >> external-dns-r53-policy.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets"
],
"Resource": [
"arn:aws:route53:::hostedzone/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones",
"route53:ListResourceRecordSets"
],
"Resource": [
"*"
]
}
]
}
EOF
aws iam create-policy --policy-name "AllowExternalDNSUpdates" --policy-document file://external-dns-r53-policy.json
POLICY_ARN=$(aws iam list-policies --query 'Policies[?PolicyName==`AllowExternalDNSUpdates`].Arn' --output text)
EKS_CLUSTER_NAME=<Your EKS Cluster Name>
aws eks describe-cluster --name $EKS_CLUSTER_NAME --query "cluster.identity.oidc.issuer" --output text
eksctl utils associate-iam-oidc-provider --cluster $EKS_CLUSTER_NAME --approve
ACCOUNT_ID=$(aws sts get-caller-identity --query "Account" --output text)
OIDC_PROVIDER=$(aws eks describe-cluster --name $EKS_CLUSTER_NAME --query "cluster.identity.oidc.issuer" --output text | sed -e 's|^https://||')
```
Note: The value you assign to the 'EXTERNALDNS_NS' variable below should be the same as the one you specify in your values.yaml for namespaces.externaldnsNs
```
EXTERNALDNS_NS=nexus-externaldns
cat <<-EOF > externaldns-trust.json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws:iam::$ACCOUNT_ID:oidc-provider/$OIDC_PROVIDER"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"$OIDC_PROVIDER:sub": "system:serviceaccount:${EXTERNALDNS_NS}:external-dns",
"$OIDC_PROVIDER:aud": "sts.amazonaws.com"
}
}
}
]
}
EOF
IRSA_ROLE="nexusrepo-external-dns-irsa-role"
aws iam create-role --role-name $IRSA_ROLE --assume-role-policy-document file://externaldns-trust.json
aws iam attach-role-policy --role-name $IRSA_ROLE --policy-arn $POLICY_ARN
ROLE_ARN=$(aws iam get-role --role-name $IRSA_ROLE --query Role.Arn --output text)
echo $ROLE_ARN
```
2. Take note of the ROLE_ARN outputted last above and specify it in your values.yaml for serviceAccount.externaldns.role
## Deployment
1. Add the sonatype repo to your helm:
```helm repo add sonatype https://sonatype.github.io/helm3-charts/ ```

View File

@ -70,7 +70,7 @@ spec:
- name: NEXUS_SECURITY_RANDOMPASSWORD
value: "false"
- name: INSTALL4J_ADD_VM_PARAMS
value: "-Xms2703m -Xmx2703m -XX:MaxDirectMemorySize=2703m -Dnexus.licenseFile=/nxrm-secrets/{{ .Values.secret.license.alias }} \
value: "{{ .Values.deployment.container.env.install4jAddVmParams }} -Dnexus.licenseFile=/nxrm-secrets/{{ .Values.secret.license.alias }} \
-Dnexus.datastore.enabled=true -Djava.util.prefs.userRoot=${NEXUS_DATA}/javaprefs \
-Dnexus.datastore.nexus.jdbcUrl=jdbc:postgresql://${DB_HOST}:{{ .Values.deployment.container.env.nexusDBPort }}/${DB_NAME} \
-Dnexus.datastore.nexus.username=${DB_USER} \

View File

@ -77,7 +77,7 @@ data:
[INPUT]
Name tail
Tag nexus.nexus-log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-nxrm.deployment*{{ .Values.namespaces.nexusNs }}_nxrm-app-*.log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-{{ .Values.deployment.name }}*{{ .Values.namespaces.nexusNs }}_nxrm-app-*.log
Parser docker
DB /var/fluent-bit/state/flb_container.db
Mem_Buf_Limit 5MB
@ -112,7 +112,7 @@ data:
[INPUT]
Name tail
Tag nexus.request-log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-nxrm.deployment*{{ .Values.namespaces.nexusNs }}_request-log-*.log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-{{ .Values.deployment.name }}*{{ .Values.namespaces.nexusNs }}_request-log-*.log
Parser docker
DB /var/fluent-bit/state/flb_container.db
Mem_Buf_Limit 5MB
@ -147,7 +147,7 @@ data:
[INPUT]
Name tail
Tag nexus.audit-log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-nxrm.deployment*{{ .Values.namespaces.nexusNs }}_audit-log-*.log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-{{ .Values.deployment.name }}*{{ .Values.namespaces.nexusNs }}_audit-log-*.log
Parser docker
DB /var/fluent-bit/state/flb_container.db
Mem_Buf_Limit 5MB
@ -182,7 +182,7 @@ data:
[INPUT]
Name tail
Tag nexus.tasks-log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-nxrm.deployment*{{ .Values.namespaces.nexusNs }}_tasks-log-*.log
Path /var/log/containers/{{ .Chart.Name }}-{{ .Chart.Version }}.{{ .Release.Name }}-{{ .Values.deployment.name }}*{{ .Values.namespaces.nexusNs }}_tasks-log-*.log
Parser docker
DB /var/fluent-bit/state/flb_container.db
Mem_Buf_Limit 5MB

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Values.workdir.configmap.name }}
namespace: {{ .Values.namespaces.nexusNs }}
data:
create-nexus-work-dir.sh: |
#!/bin/bash
# Make Nexus Repository Manager work directory
mkdir -p /nexus-repo-mgr-work-dir/work

View File

@ -0,0 +1,51 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ .Values.workdir.daemonset.name }}
namespace: {{ .Values.namespaces.nexusNs }}
spec:
selector:
matchLabels:
job: dircreator
template:
metadata:
labels:
job: dircreator
spec:
hostPID: true
restartPolicy: Always
initContainers:
# Copy file for creating nexus work directory over and execute it on host
- name: create-nexus-work-dir
image: ubuntu:23.04
command: [/bin/sh]
args:
- -c
- >-
cp /tmp/create-nexus-work-dir.sh /host-dir &&
/usr/bin/nsenter -m/proc/1/ns/mnt -- chmod u+x /tmp/install/create-nexus-work-dir.sh &&
/usr/bin/nsenter -m/proc/1/ns/mnt /tmp/install/create-nexus-work-dir.sh
securityContext:
privileged: true
volumeMounts:
- name: create-nexus-work-dir-script
mountPath: /tmp
- name: host-mnt
mountPath: /host-dir
containers:
- name: directory-creator
image: busybox:1.33.1
command: ["/bin/sh"]
args:
- -c
- >-
tail -f /dev/null
securityContext:
privileged: true
volumes:
- name: create-nexus-work-dir-script
configMap:
name: {{ .Values.workdir.configmap.name }}
- name: host-mnt
hostPath:
path: /tmp/install

View File

@ -12,6 +12,7 @@ deployment:
clusterName: nxrm-nexus
logsRegion: us-east-1
fluentBitVersion: 2.28.0
replicaCount: 1
initContainer:
image:
repository: busybox
@ -19,12 +20,13 @@ deployment:
container:
image:
repository: sonatype/nexus3
tag: 3.41.1
tag: 3.45.1
containerPort: 8081
pullPolicy: IfNotPresent
env:
nexusDBName: nexus
nexusDBPort: 3306
install4jAddVmParams: "-Xms2703m -Xmx2703m"
requestLogContainer:
image:
repository: busybox
@ -47,27 +49,35 @@ ingress:
#host: "example.com" #host to apply this ingress rule to. Uncomment this in your values.yaml and set it as you wish
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/healthcheck-path: /service/rest/v1/status
alb.ingress.kubernetes.io/scheme: internal # scheme
alb.ingress.kubernetes.io/subnets: subnet-1,subnet-2 #comma separated list of subnet ids
alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]'
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:0000000000000:certificate/00000000-1111-2222-3333-444444444444 # The AWS Certificate Manager ARN for your HTTPS certificate
#alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]' uncomment for https
#alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:0000000000000:certificate/00000000-1111-2222-3333-444444444444 # Uncomment for https. The AWS Certificate Manager ARN for your HTTPS certificate
dockerIngress: #Ingress for Docker Connector - comment out if you don't use docker repositories
annotations:
kubernetes.io/ingress.class: alb # comment out if you don't use docker repositories
alb.ingress.kubernetes.io/scheme: internal # scheme comment out if you don't use docker repositories
alb.ingress.kubernetes.io/subnets: subnet-1,subnet-2 #comma separated list of subnet ids, comment out if you don't use docker repositories
alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]' #comment out if you don't use docker repositories
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:0000000000000:certificate/00000000-1111-2222-3333-444444444444 # Comment out if you don't use docker repositories - The AWS Certificate Manager ARN for your HTTPS certificate
external-dns.alpha.kubernetes.io/hostname: dockerrepo1.example.com, dockerrepo2.example.com, dockerrepo3.example.com # Add more docker subdomains using dockerrepoName.example.com othereise comment out if you don't use docker repositories
# alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}]' #uncomment if you use docker repositories
# alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:0000000000000:certificate/00000000-1111-2222-3333-444444444444 # Uncomment if you use docker repositories - The AWS Certificate Manager ARN for your HTTPS certificate
# external-dns.alpha.kubernetes.io/hostname: dockerrepo1.example.com, dockerrepo2.example.com, dockerrepo3.example.com # Add more docker subdomains using dockerrepoName.example.com othereise comment out if you don't use docker repositories
workdir:
configmap:
name: create-nexus-workdir-config
daemonset:
name: create-nexus-work-dir
storageClass:
iopsPerGB: "10" #Note: aws plugin multiplies this by the size of the requested volumne to compute IOPS of the volumne and caps it a 20, 000 IOPS
pv:
storage: 120Gi
volumeMode: Filesystem
accessModes: ReadWriteOnce
reclaimPolicy: Retain
path: /mnt
path: /nexus-repo-mgr-work-dir/work
zones:
zone1: us-east-1a
zone2: us-east-1b
- us-east-1a
- us-east-1b
pvc:
accessModes: ReadWriteOnce
storage: 100Gi