Compare commits

..

No commits in common. "main" and "v10.0.1" have entirely different histories.

97 changed files with 1013 additions and 4394 deletions

View File

@ -1,7 +0,0 @@
{
"extends": ["@commitlint/config-conventional"],
"rules": {
"type-enum": [2, "always", ["feat", "fix", "chore", "docs", "style", "refactor", "test", "perf", "ci", "WIP"]],
"type-case": [0, "always", "lower-case"]
}
}

View File

@ -23,7 +23,7 @@
### Applicable issues ### Applicable issues
<!-- Enter any applicable Issues here (You can reference an issue using #). Please remove this section if there is no referenced issue. --> <!-- Enter any applicable Issues here (You can reference an issue using #). Please remove this section if there is no referenced issue. -->
- Fixes # - fixes #
### Additional information ### Additional information
@ -39,6 +39,4 @@
- [ ] Parameters are documented in the `values.yaml` and added to the `README.md` using [readme-generator-for-helm](https://github.com/bitnami-labs/readme-generator-for-helm) - [ ] Parameters are documented in the `values.yaml` and added to the `README.md` using [readme-generator-for-helm](https://github.com/bitnami-labs/readme-generator-for-helm)
- [ ] Breaking changes are documented in the `README.md` - [ ] Breaking changes are documented in the `README.md`
- [ ] Helm templating unittests are added (required when changing anything in `templates` folder) - [ ] Templating unittests are added
- [ ] Bash unittests are added (required when changing anything in `scripts` folder)
- [ ] All added template resources MUST render a namespace in metadata

View File

@ -1,114 +0,0 @@
#!/bin/bash
set -e
CHART_FILE="Chart.yaml"
if [ ! -f "${CHART_FILE}" ]; then
echo "ERROR: ${CHART_FILE} not found!" 1>&2
exit 1
fi
DEFAULT_NEW_TAG="$(git tag --sort=-version:refname | head -n 1)"
DEFAULT_OLD_TAG="$(git tag --sort=-version:refname | head -n 2 | tail -n 1)"
if [ -z "${1}" ]; then
read -p "Enter start tag [${DEFAULT_OLD_TAG}]: " OLD_TAG
if [ -z "${OLD_TAG}" ]; then
OLD_TAG="${DEFAULT_OLD_TAG}"
fi
while [ -z "$(git tag --list "${OLD_TAG}")" ]; do
echo "ERROR: Tag '${OLD_TAG}' not found!" 1>&2
read -p "Enter start tag [${DEFAULT_OLD_TAG}]: " OLD_TAG
if [ -z "${OLD_TAG}" ]; then
OLD_TAG="${DEFAULT_OLD_TAG}"
fi
done
else
OLD_TAG=${1}
if [ -z "$(git tag --list "${OLD_TAG}")" ]; then
echo "ERROR: Tag '${OLD_TAG}' not found!" 1>&2
exit 1
fi
fi
if [ -z "${2}" ]; then
read -p "Enter end tag [${DEFAULT_NEW_TAG}]: " NEW_TAG
if [ -z "${NEW_TAG}" ]; then
NEW_TAG="${DEFAULT_NEW_TAG}"
fi
while [ -z "$(git tag --list "${NEW_TAG}")" ]; do
echo "ERROR: Tag '${NEW_TAG}' not found!" 1>&2
read -p "Enter end tag [${DEFAULT_NEW_TAG}]: " NEW_TAG
if [ -z "${NEW_TAG}" ]; then
NEW_TAG="${DEFAULT_NEW_TAG}"
fi
done
else
NEW_TAG=${2}
if [ -z "$(git tag --list "${NEW_TAG}")" ]; then
echo "ERROR: Tag '${NEW_TAG}' not found!" 1>&2
exit 1
fi
fi
CHANGE_LOG_YAML=$(mktemp)
echo "[]" > "${CHANGE_LOG_YAML}"
function map_type_to_kind() {
case "${1}" in
feat)
echo "added"
;;
fix)
echo "fixed"
;;
chore|style|test|ci|docs|refac)
echo "changed"
;;
revert)
echo "removed"
;;
sec)
echo "security"
;;
*)
echo "skip"
;;
esac
}
COMMIT_TITLES="$(git log --pretty=format:"%s" "${OLD_TAG}..${NEW_TAG}")"
echo "INFO: Generate change log entries from ${OLD_TAG} until ${NEW_TAG}"
while IFS= read -r line; do
if [[ "${line}" =~ ^([a-zA-Z]+)(\([^\)]+\))?\:\ (.+)$ ]]; then
TYPE="${BASH_REMATCH[1]}"
KIND=$(map_type_to_kind "${TYPE}")
if [ "${KIND}" == "skip" ]; then
continue
fi
DESC="${BASH_REMATCH[3]}"
echo "- ${KIND}: ${DESC}"
jq --arg kind changed --arg description "$DESC" '. += [ $ARGS.named ]' < ${CHANGE_LOG_YAML} > ${CHANGE_LOG_YAML}.new
mv ${CHANGE_LOG_YAML}.new ${CHANGE_LOG_YAML}
fi
done <<< "${COMMIT_TITLES}"
if [ -s "${CHANGE_LOG_YAML}" ]; then
yq --inplace --input-format json --output-format yml "${CHANGE_LOG_YAML}"
yq --no-colors --inplace ".annotations.\"artifacthub.io/changes\" |= loadstr(\"${CHANGE_LOG_YAML}\") | sort_keys(.)" "${CHART_FILE}"
else
echo "ERROR: Changelog file is empty: ${CHANGE_LOG_YAML}" 1>&2
exit 1
fi
rm "${CHANGE_LOG_YAML}"

View File

@ -2,145 +2,67 @@ name: generate-chart
on: on:
push: push:
branches: tags:
- "*" - "*"
env: env:
# renovate: datasource=docker depName=alpine/helm # renovate: datasource=docker depName=alpine/helm
HELM_VERSION: "3.17.3" HELM_VERSION: "3.13.2"
jobs: jobs:
generate-chart-publish: generate-chart-publish:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: - name: install tools
fetch-depth: 0
- name: Determine Architecture and Operating System to support x86_64 and ARM based CI nodes
run: | run: |
# determine operating system apt update -y
OS=$(uname | tr '[:upper:]' '[:lower:]') apt install -y curl ca-certificates curl gnupg
echo "OS=${OS}" >> $GITHUB_ENV # helm
echo "INFO: Set environment variable OS=${OS}" curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | tee /usr/share/keyrings/helm.gpg > /dev/null
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | tee /etc/apt/sources.list.d/helm-stable-debian.list
# determine architecture # docker
ARCH="$(uname -m)"
case "${ARCH}" in
aarch64) ARCH=arm64;;
x86_64) ARCH=amd64;;
esac
echo "ARCH=${ARCH}" >> $GITHUB_ENV
echo "INFO: Set environment variable ARCH=${ARCH}"
- name: Install packages via apt
run: |
apt update --yes
echo "INFO: Install packages via apt"
apt install --yes curl ca-certificates curl gnupg jq
- name: Install helm
run: |
curl --fail --location --output /dev/stdout --silent --show-error https://get.helm.sh/helm-v${{ env.HELM_VERSION }}-${OS}-${ARCH}.tar.gz | tar --extract --gzip --file /dev/stdin
mv ${OS}-${ARCH}/helm /usr/local/bin/
rm --force --recursive ${OS}-${ARCH} helm-v${{ env.HELM_VERSION }}-${OS}-${ARCH}.tar.gz
helm version
- name: Install yq
env:
YQ_VERSION: v4.45.4 # renovate: datasource=github-releases depName=mikefarah/yq
run: |
curl --fail --location --output /dev/stdout --silent --show-error https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/yq_${OS}_${ARCH}.tar.gz | tar --extract --gzip --file /dev/stdin
mv yq_${OS}_${ARCH} /usr/local/bin
rm --force --recursive yq_${OS}_${ARCH} yq_${OS}_${ARCH}.tar.gz
yq --version
- name: Install docker-ce via apt
run: |
echo "INFO: Install docker"
install -m 0755 -d /etc/apt/keyrings install -m 0755 -d /etc/apt/keyrings
curl --fail --location --silent --show-error https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
chmod a+r /etc/apt/keyrings/docker.gpg chmod a+r /etc/apt/keyrings/docker.gpg
echo "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null echo "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
apt update --yes apt update -y
apt install --yes python3 python3-pip apt-transport-https docker-ce-cli apt install -y python helm=${{ env.HELM_VERSION }}-1 python3-pip apt-transport-https docker-ce-cli
pip install awscli
- name: Install awscli - name: Import GPG key
id: import_gpg
uses: https://github.com/crazy-max/ghaction-import-gpg@v5
with:
gpg_private_key: ${{ secrets.GPGSIGN_KEY }}
passphrase: ${{ secrets.GPGSIGN_PASSPHRASE }}
fingerprint: CC64B1DB67ABBEECAB24B6455FC346329753F4B0
# Using helm gpg plugin as 'helm package --sign' has issues with gpg2: https://github.com/helm/helm/issues/2843
- name: package chart
run: | run: |
echo "INFO: Install awscli via python pip" echo ${{ secrets.DOCKER_CHARTS_PASSWORD }} | docker login -u ${{ secrets.DOCKER_CHARTS_USERNAME }} --password-stdin
pip install awscli --break-system-packages # FIXME: use upstream after https://github.com/technosophos/helm-gpg/issues/1 is solved
aws --version helm plugin install https://github.com/pat-s/helm-gpg
helm dependency build
helm package --version "${GITHUB_REF#refs/tags/v}" ./
helm gpg sign "gitea-${GITHUB_REF#refs/tags/v}.tgz"
mkdir gitea
mv gitea*.tgz gitea/
curl -s -L -o gitea/index.yaml https://dl.gitea.com/charts/index.yaml
helm repo index gitea/ --url https://dl.gitea.com/charts --merge gitea/index.yaml
# push to dockerhub
echo ${{ secrets.DOCKER_CHARTS_PASSWORD }} | helm registry login -u ${{ secrets.DOCKER_CHARTS_USERNAME }} registry-1.docker.io --password-stdin
helm push gitea/gitea-${GITHUB_REF#refs/tags/v}.tgz oci://registry-1.docker.io/giteacharts
helm registry logout registry-1.docker.io
# - name: Import GPG key - name: aws credential configure
# id: import_gpg uses: https://github.com/aws-actions/configure-aws-credentials@v2
# uses: https://github.com/crazy-max/ghaction-import-gpg@v6 with:
# with: aws-access-key-id: ${{ secrets.AWS_KEY_ID }}
# gpg_private_key: ${{ secrets.GPGSIGN_KEY }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
# passphrase: ${{ secrets.GPGSIGN_PASSPHRASE }} aws-region: ${{ secrets.AWS_REGION }}
# fingerprint: CC64B1DB67ABBEECAB24B6455FC346329753F4B0
- name: Add Artifacthub.io annotations - name: Copy files to S3 and clear cache
run: | run: |
NEW_TAG=v12.0.0 aws s3 sync gitea/ s3://${{ secrets.AWS_S3_BUCKET}}/charts/
OLD_TAG=v11.0.1
# NEW_TAG="$(git tag --sort=-version:refname | head --lines 1)"
# OLD_TAG="$(git tag --sort=-version:refname | head --lines 2 | tail --lines 1)"
.gitea/scripts/add-annotations.sh "${OLD_TAG}" "${NEW_TAG}"
- name: Print Chart.yaml
run: cat Chart.yaml
# # Using helm gpg plugin as 'helm package --sign' has issues with gpg2: https://github.com/helm/helm/issues/2843
# - name: package chart
# run: |
# echo ${{ secrets.DOCKER_CHARTS_PASSWORD }} | docker login -u ${{ secrets.DOCKER_CHARTS_USERNAME }} --password-stdin
# # FIXME: use upstream after https://github.com/technosophos/helm-gpg/issues/1 is solved
# helm plugin install https://github.com/pat-s/helm-gpg
# helm dependency build
# helm package --version "${GITHUB_REF#refs/tags/v}" ./
# mkdir gitea
# mv gitea*.tgz gitea/
# curl --fail --location --output gitea/index.yaml --silent --show-error https://dl.gitea.com/charts/index.yaml
# helm repo index gitea/ --url https://dl.gitea.com/charts --merge gitea/index.yaml
# # push to dockerhub
# echo ${{ secrets.DOCKER_CHARTS_PASSWORD }} | helm registry login -u ${{ secrets.DOCKER_CHARTS_USERNAME }} registry-1.docker.io --password-stdin
# helm push gitea/gitea-${GITHUB_REF#refs/tags/v}.tgz oci://registry-1.docker.io/giteacharts
# helm registry logout registry-1.docker.io
# - name: aws credential configure
# uses: https://github.com/aws-actions/configure-aws-credentials@v4
# with:
# aws-access-key-id: ${{ secrets.AWS_KEY_ID }}
# aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
# aws-region: ${{ secrets.AWS_REGION }}
# - name: Copy files to S3 and clear cache
# run: |
# aws s3 sync gitea/ s3://${{ secrets.AWS_S3_BUCKET}}/charts/
# release-gitea:
# # needs: generate-chart-publish
# runs-on: ubuntu-latest
# container: docker.io/thegeeklab/git-sv:2.0.1
# steps:
# - name: install tools
# run: |
# apk add -q --update --no-cache nodejs
# - uses: actions/checkout@v4
# with:
# fetch-tags: true
# fetch-depth: 0
# - name: Create changelog
# run: |
# git sv current-version
# git sv release-notes -t ${GITHUB_REF#refs/tags/} -o CHANGELOG.md
# sed -i '1,2d' CHANGELOG.md # remove version
# cat CHANGELOG.md
# - name: Release
# uses: https://github.com/akkuman/gitea-release-action@v1
# with:
# body_path: CHANGELOG.md
# token: "${{ secrets.RELEASE_TOKEN }}"

View File

@ -0,0 +1,41 @@
name: check-and-test
on:
pull_request:
branches:
- "*"
push:
branches:
- main
- "renovate/**"
env:
# renovate: datasource=github-releases depName=helm-unittest/helm-unittest
HELM_UNITTEST_VERSION: "v0.3.6"
jobs:
check-and-test:
runs-on: ubuntu-latest
container: alpine/helm:3.13.2
steps:
- name: install tools
run: |
apk update
apk add --update make nodejs npm yamllint
- uses: actions/checkout@v4
- name: install chart dependencies
run: helm dependency build
- name: lint
run: helm lint
- name: template
run: helm template --debug gitea-helm .
- name: unit tests
run: |
helm plugin install --version ${{ env.HELM_UNITTEST_VERSION }} https://github.com/helm-unittest/helm-unittest
make unittests
- name: verify readme
run: |
make readme
git diff --exit-code --name-only README.md
- name: yaml lint
uses: https://github.com/ibiqlik/action-yamllint@v3

12
.gitmodules vendored
View File

@ -1,12 +0,0 @@
[submodule "unittests/bash/bats"]
path = unittests/bash/bats
url = https://github.com/bats-core/bats-core.git
[submodule "unittests/bash/test_helper/bats-support"]
path = unittests/bash/test_helper/bats-support
url = https://github.com/bats-core/bats-support.git
[submodule "unittests/bash/test_helper/bats-assert"]
path = unittests/bash/test_helper/bats-assert
url = https://github.com/bats-core/bats-assert.git
[submodule "unittests/bash/test_helper/bats-mock"]
path = unittests/bash/test_helper/bats-mock
url = https://github.com/jasonkarns/bats-mock.git

View File

@ -1,57 +0,0 @@
version: '1.1' # Configuration version.
versioning:
update-major: [breaking] # Commit types used to bump major.
update-minor: [feat, perf] # Commit types used to bump minor.
update-patch: [build, ci, chore, fix, perf, refactor, test] # Commit types used to bump patch.
# When type is not present on update rules and is unknown (not mapped on commit message types);
# if ignore-unknown=false bump patch, if ignore-unknown=true do not bump version.
ignore-unknown: false
tag:
pattern: 'v%d.%d.%d' # Pattern used to create git tag.
filter: '' # Enables you to filter for considerable tags using git pattern syntax.
release-notes:
sections: # Array with each section of release note. Check template section for more information.
- name: Breaking Changes
section-type: breaking-changes
- name: Features # Name used on section.
section-type: commits # Type of the section, supported types: commits, breaking-changes.
commit-types: [feat, perf] # Commit types for commit section-type, one commit type cannot be in more than one section.
- name: Bug Fixes
section-type: commits
commit-types: [fix]
- name: Maintenance
section-type: commits
commit-types: [chore, refactor]
- name: Documentation
commit-types: [docs]
section-type: commits
- name: CI
commit-types: [ci]
section-type: commits
branches: # Git branches config.
prefix: ([a-z]+\/)? # Prefix used on branch name, it should be a regex group.
suffix: (-.*)? # Suffix used on branch name, it should be a regex group.
disable-issue: false # Set true if there is no need to recover issue id from branch name.
skip: [] # List of branch names ignored on commit message validation.
skip-detached: false # Set true if a detached branch should be ignored on commit message validation.
commit-message:
# Supported commit types.
types: [build, ci, chore, docs, feat, fix, perf, refactor, revert, style, test]
header-selector: '' # You can put in a regex here to select only a certain part of the commit message. Please define a regex group 'header'.
scope:
# Define supported scopes, if blank, scope will not be validated, if not, only scope listed will be valid.
# Don't forget to add "" on your list if you need to define scopes and keep it optional.
values: []
footer:
issue: # Use "issue: {}" if you wish to disable issue footer.
key: jira # Name used to define an issue on footer metadata.
key-synonyms: [Jira, JIRA] # Supported variations for footer metadata.
use-hash: false # If false, use :<space> separator. If true, use <space># separator.
add-value-prefix: '' # Add a prefix to issue value.
issue:
regex: '[A-Z]+-[0-9]+' # Regex for issue id.

View File

@ -5,7 +5,6 @@
# Common VCS dirs # Common VCS dirs
.git/ .git/
.gitignore .gitignore
.gitmodules
.bzr/ .bzr/
.bzrignore .bzrignore
.hg/ .hg/
@ -32,10 +31,3 @@ Makefile
.drone.yml .drone.yml
CONTRIBUTING.md CONTRIBUTING.md
unittests/ unittests/
.editorconfig
.prettierignore
.yamllint
CODEOWNERS
renovate.json5
.commitlintrc.json
.gitsv/

View File

@ -73,7 +73,7 @@ MD022:
# MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the same content # MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the same content
MD024: MD024:
# Only check sibling headings # Only check sibling headings
siblings_only: true allow_different_nesting: true
# MD025/single-title/single-h1 - Multiple top-level headings in the same document # MD025/single-title/single-h1 - Multiple top-level headings in the same document
MD025: MD025:
@ -129,7 +129,6 @@ MD041:
MD044: MD044:
# List of proper names # List of proper names
names: names:
- docker.gitea.com
- Gitea - Gitea
- PostgreSQL - PostgreSQL
- Memcached - Memcached

View File

@ -3,7 +3,6 @@
"yzhang.markdown-all-in-one", "yzhang.markdown-all-in-one",
"DavidAnson.vscode-markdownlint", "DavidAnson.vscode-markdownlint",
"Tim-Koehler.helm-intellisense", "Tim-Koehler.helm-intellisense",
"esbenp.prettier-vscode", "esbenp.prettier-vscode"
"jetmartin.bats"
] ]
} }

11
.vscode/settings.json vendored
View File

@ -1,15 +1,8 @@
{ {
"yaml.schemas": { "yaml.schemas": {
"https://raw.githubusercontent.com/helm-unittest/helm-unittest/v0.8.2/schema/helm-testsuite.json": [ "https://raw.githubusercontent.com/helm-unittest/helm-unittest/v0.3.6/schema/helm-testsuite.json": [
"/unittests/**/*.yaml" "/unittests/**/*.yaml"
] ]
}, },
"yaml.schemaStore.enable": true, "yaml.schemaStore.enable": true
"[bats]": {
"editor.tabSize": 2
},
"[shellscript]": {
"files.eol": "\n",
"editor.tabSize": 2
}
} }

View File

@ -5,7 +5,7 @@ ignore: |
.yamllint .yamllint
node_modules node_modules
templates templates
unittests/bash
rules: rules:
truthy: truthy:
@ -17,4 +17,4 @@ rules:
comments: comments:
min-spaces-from-content: 1 min-spaces-from-content: 1
braces: braces:
max-spaces-inside: 2 max-spaces-inside: 2

View File

@ -1 +0,0 @@
charts/* @justusbunsi @pat-s

View File

@ -29,7 +29,6 @@ When submitting or updating a PR:
- try to avoid rebases. They make code reviews for large PRs and comments much harder. - try to avoid rebases. They make code reviews for large PRs and comments much harder.
- if applicable, use the PR template for a well-defined PR description. - if applicable, use the PR template for a well-defined PR description.
- clearly mark breaking changes. - clearly mark breaking changes.
- format the PR title following the [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#specification) schema
## Local development & testing ## Local development & testing
@ -38,7 +37,7 @@ be used:
1. Install `minikube` and `helm`. 1. Install `minikube` and `helm`.
1. Start a `minikube` cluster via `minikube start`. 1. Start a `minikube` cluster via `minikube start`.
1. From the `gitea/helm-gitea` directory execute the following command. 1. From the `gitea/helm-chart` directory execute the following command.
This will install the dependencies listed in `Chart.yml` and deploy the current state of the helm chart found locally. This will install the dependencies listed in `Chart.yml` and deploy the current state of the helm chart found locally.
If you want to test a branch, make sure to switch to the respective branch first. If you want to test a branch, make sure to switch to the respective branch first.
`helm install --dependency-update gitea . -f values.yaml`. `helm install --dependency-update gitea . -f values.yaml`.
@ -49,32 +48,18 @@ default port-forward svc/gitea-http 3000:3000`.
### Unit tests ### Unit tests
#### Helm templating tests
```bash ```bash
# install the unittest plugin # install the unittest plugin
$ helm plugin install https://github.com/helm-unittest/helm-unittest $ helm plugin install https://github.com/helm-unittest/helm-unittest
# run the Helm unittests # run the unittests
make unittests-helm make unittests
``` ```
See [plugin documentation](https://github.com/helm-unittest/helm-unittest/blob/main/DOCUMENT.md) for usage instructions. See [plugin documentation](https://github.com/helm-unittest/helm-unittest/blob/main/DOCUMENT.md) for usage instructions.
#### Bash script tests
```bash
# setup the environment
git submodule update --init --recursive
# run the bash tests
make unittests-bash
```
See [bats documentation](https://bats-core.readthedocs.io/en/stable/) for usage instructions.
## Release process ## Release process
1. Ensure you have [`git-sv`](https://github.com/thegeeklab/git-sv) installed 1. Create a tag following the tagging schema
1. Run `git sv tag` (this creates and pushes the tag following the respective next tag according to the semver commits issued since the last release) 1. Push the tag
1. Let CI do it's work 1. Let CI do it's work

View File

@ -1,15 +1,12 @@
dependencies: dependencies:
- name: postgresql - name: postgresql
repository: oci://registry-1.docker.io/bitnamicharts repository: oci://registry-1.docker.io/bitnamicharts
version: 16.7.4 version: 13.2.24
- name: postgresql-ha - name: postgresql-ha
repository: oci://registry-1.docker.io/bitnamicharts repository: oci://registry-1.docker.io/bitnamicharts
version: 16.0.6 version: 12.3.3
- name: valkey-cluster - name: redis-cluster
repository: oci://registry-1.docker.io/bitnamicharts repository: oci://registry-1.docker.io/bitnamicharts
version: 3.0.10 version: 9.1.3
- name: valkey digest: sha256:c4ae8a7ddfb6670acc7f39d5728a0929f6c7666d32459229b5e4e66b19749677
repository: oci://registry-1.docker.io/bitnamicharts generated: "2023-12-17T00:11:27.841588235Z"
version: 3.0.9
digest: sha256:aeafc605b86db0ff3999cd808af1c9ca3a6a749aae0d42f2fdae89803b3bb60a
generated: "2025-05-25T00:23:17.804516988Z"

View File

@ -3,8 +3,7 @@ name: gitea
description: Gitea Helm chart for Kubernetes description: Gitea Helm chart for Kubernetes
type: application type: application
version: 0.0.0 version: 0.0.0
# renovate datasource=github-releases depName=go-gitea/gitea extractVersion=^v(?<version>.*)$ appVersion: 1.21.2
appVersion: 1.23.8
icon: https://gitea.com/assets/img/logo.svg icon: https://gitea.com/assets/img/logo.svg
keywords: keywords:
@ -15,9 +14,9 @@ keywords:
- gitea - gitea
- gogs - gogs
sources: sources:
- https://gitea.com/gitea/helm-gitea - https://gitea.com/gitea/helm-chart
- https://github.com/go-gitea/gitea - https://github.com/go-gitea/gitea
- https://docker.gitea.com/gitea - https://hub.docker.com/r/gitea/gitea/
maintainers: maintainers:
- name: Charlie Drage - name: Charlie Drage
email: charlie@charliedrage.com email: charlie@charliedrage.com
@ -32,24 +31,20 @@ maintainers:
- name: Patrick Schratz - name: Patrick Schratz
email: patrick.schratz@gmail.com email: patrick.schratz@gmail.com
# Bitnami charts are served from GitHub CDN - See https://github.com/bitnami/charts/issues/10539 for details
dependencies: dependencies:
# https://github.com/bitnami/charts/blob/main/bitnami/postgresql # https://github.com/bitnami/charts/blob/main/bitnami/postgresql
- name: postgresql - name: postgresql
repository: oci://registry-1.docker.io/bitnamicharts repository: oci://registry-1.docker.io/bitnamicharts
version: 16.7.4 version: 13.2.24
condition: postgresql.enabled condition: postgresql.enabled
# https://github.com/bitnami/charts/blob/main/bitnami/postgresql-ha/Chart.yaml # https://github.com/bitnami/charts/blob/main/bitnami/postgresql-ha/Chart.yaml
- name: postgresql-ha - name: postgresql-ha
repository: oci://registry-1.docker.io/bitnamicharts repository: oci://registry-1.docker.io/bitnamicharts
version: 16.0.6 version: 12.3.3
condition: postgresql-ha.enabled condition: postgresql-ha.enabled
# https://github.com/bitnami/charts/blob/main/bitnami/valkey-cluster/Chart.yaml # https://github.com/bitnami/charts/blob/main/bitnami/redis-cluster/Chart.yaml
- name: valkey-cluster - name: redis-cluster
repository: oci://registry-1.docker.io/bitnamicharts repository: oci://registry-1.docker.io/bitnamicharts
version: 3.0.10 version: 9.1.3
condition: valkey-cluster.enabled condition: redis-cluster.enabled
# https://github.com/bitnami/charts/blob/main/bitnami/valkey/Chart.yaml
- name: valkey
repository: oci://registry-1.docker.io/bitnamicharts
version: 3.0.9
condition: valkey.enabled

View File

@ -1,5 +1,3 @@
SHELL := /usr/bin/env bash -O globstar
.PHONY: prepare-environment .PHONY: prepare-environment
prepare-environment: prepare-environment:
npm install npm install
@ -10,15 +8,8 @@ readme: prepare-environment
npm run readme:lint npm run readme:lint
.PHONY: unittests .PHONY: unittests
unittests: unittests-helm unittests-bash unittests:
helm unittest --strict -f 'unittests/**/*.yaml' -f 'unittests/dependency-major-image-check.yaml' ./
.PHONY: unittests-helm
unittests-helm:
helm unittest --strict -f 'unittests/helm/**/*.yaml' -f 'unittests/helm/values-conflicting-checks.yaml' ./
.PHONY: unittests-bash
unittests-bash:
./unittests/bash/bats/bin/bats --pretty ./unittests/bash/tests/**/*.bats
.PHONY: helm .PHONY: helm
update-helm-dependencies: update-helm-dependencies:

339
README.md
View File

@ -8,7 +8,6 @@
- [Dependency Versioning](#dependency-versioning) - [Dependency Versioning](#dependency-versioning)
- [Installing](#installing) - [Installing](#installing)
- [High Availability](#high-availability) - [High Availability](#high-availability)
- [Limit resources](#limit-resources)
- [Configuration](#configuration) - [Configuration](#configuration)
- [Default Configuration](#default-configuration) - [Default Configuration](#default-configuration)
- [Database defaults](#database-defaults) - [Database defaults](#database-defaults)
@ -31,7 +30,6 @@
- [OAuth2 Settings](#oauth2-settings) - [OAuth2 Settings](#oauth2-settings)
- [Configure commit signing](#configure-commit-signing) - [Configure commit signing](#configure-commit-signing)
- [Metrics and profiling](#metrics-and-profiling) - [Metrics and profiling](#metrics-and-profiling)
- [Secure Metrics Endpoint](#secure-metrics-endpoint)
- [Pod annotations](#pod-annotations) - [Pod annotations](#pod-annotations)
- [Themes](#themes) - [Themes](#themes)
- [Renovate](#renovate) - [Renovate](#renovate)
@ -51,9 +49,8 @@
- [LivenessProbe](#livenessprobe) - [LivenessProbe](#livenessprobe)
- [ReadinessProbe](#readinessprobe) - [ReadinessProbe](#readinessprobe)
- [StartupProbe](#startupprobe) - [StartupProbe](#startupprobe)
- [valkey-cluster](#valkey-cluster) - [redis-cluster](#redis-cluster)
- [valkey](#valkey) - [PostgreSQL-ha](#postgresql-ha)
- [PostgreSQL HA](#postgresql-ha)
- [PostgreSQL](#postgresql) - [PostgreSQL](#postgresql)
- [Advanced](#advanced) - [Advanced](#advanced)
- [Contributing](#contributing) - [Contributing](#contributing)
@ -71,7 +68,7 @@ Additionally, this chart allows to provide LDAP and admin user configuration wit
## Update and versioning policy ## Update and versioning policy
The Gitea helm chart versioning does not follow Gitea's versioning. The Gitea helm chart versioning does not follow Gitea's versioning.
The latest chart version can be looked up in [https://dl.gitea.com/charts](https://dl.gitea.com/charts) or in the [repository releases](https://gitea.com/gitea/helm-gitea/releases). The latest chart version can be looked up in [https://dl.gitea.com/charts](https://dl.gitea.com/charts) or in the [repository releases](https://gitea.com/gitea/helm-chart/releases).
The chart aims to follow Gitea's releases closely. The chart aims to follow Gitea's releases closely.
There might be times when the chart is behind the latest Gitea release. There might be times when the chart is behind the latest Gitea release.
@ -95,14 +92,13 @@ Users can also configure their own external providers via the configuration.
These dependencies are enabled by default: These dependencies are enabled by default:
- PostgreSQL HA ([Bitnami PostgreSQL-HA](https://github.com/bitnami/charts/blob/main/bitnami/postgresql-ha/Chart.yaml)) - PostgreSQL HA ([Bitnami PostgreSQL-HA](https://github.com/bitnami/charts/blob/main/bitnami/postgresql-ha/Chart.yaml))
- Valkey-Cluster ([Bitnami Valkey-Cluster](https://github.com/bitnami/charts/blob/main/bitnami/valkey-cluster/Chart.yaml)) - Redis-Cluster ([Bitnami Redis-Cluster](https://github.com/bitnami/charts/blob/main/bitnami/redis-cluster/Chart.yaml))
### Non-HA Dependencies ### Non-HA Dependencies
Alternatively, the following non-HA replacements are available: Alternatively, the following non-HA replacements are available:
- PostgreSQL ([Bitnami PostgreSQL](<Postgresql](https://github.com/bitnami/charts/blob/main/bitnami/postgresql/Chart.yaml)>)) - PostgreSQL ([Bitnami PostgreSQL](postgresql](https://github.com/bitnami/charts/blob/main/bitnami/postgresql/Chart.yaml)))
- Valkey ([Bitnami Valkey](<Valkey](https://github.com/bitnami/charts/blob/main/bitnami/valkey/Chart.yaml)>))
### Dependency Versioning ### Dependency Versioning
@ -120,8 +116,7 @@ Please double-check the image repository and available tags in the sub-chart:
- [PostgreSQL-HA](https://hub.docker.com/r/bitnami/postgresql-repmgr/tags) - [PostgreSQL-HA](https://hub.docker.com/r/bitnami/postgresql-repmgr/tags)
- [PostgreSQL](https://hub.docker.com/r/bitnami/postgresql/tags) - [PostgreSQL](https://hub.docker.com/r/bitnami/postgresql/tags)
- [Valkey Cluster](https://hub.docker.com/r/bitnami/valkey-cluster/tags) - [Redis Cluster](https://hub.docker.com/r/bitnami/redis-cluster/tags)
- [Valkey](https://hub.docker.com/r/bitnami/valkey/tags)
and look up the image tag which fits your needs on Dockerhub. and look up the image tag which fits your needs on Dockerhub.
@ -139,12 +134,6 @@ Alternatively, the chart can also be installed from Dockerhub (since v9.6.0)
helm install gitea oci://registry-1.docker.io/giteacharts/gitea helm install gitea oci://registry-1.docker.io/giteacharts/gitea
``` ```
To avoid potential Dockerhub rate limits, the chart can also be installed via [docker.gitea.com](https://blog.gitea.com/docker-registry-update/) (since v9.6.0)
```sh
helm install gitea oci://docker.gitea.com/charts/gitea
```
When upgrading, please refer to the [Upgrading](#upgrading) section at the bottom of this document for major and breaking changes. When upgrading, please refer to the [Upgrading](#upgrading) section at the bottom of this document for major and breaking changes.
## High Availability ## High Availability
@ -155,44 +144,6 @@ Care must be taken for production use as not all implementation details of Gitea
Deploying a HA-ready Gitea instance requires some effort including using HA-ready dependencies. Deploying a HA-ready Gitea instance requires some effort including using HA-ready dependencies.
See the [HA Setup](docs/ha-setup.md) document for more details. See the [HA Setup](docs/ha-setup.md) document for more details.
## Limit resources
If the application is deployed with a CPU resource limit, Prometheus may throw a CPU throttling warning for the
application. This has more or less to do with the fact that the application finds the number of CPUs of the host, but
cannot use the available CPU time to perform computing operations.
The application must be informed that despite several CPUs only a part (limit) of the available computing time is
available. As this is a Golang application, this can be implemented using `GOMAXPROCS`. The following example is one way
of defining `GOMAXPROCS` automatically based on the defined CPU limit like `1000m`. Please keep in mind, that the CFS
rate of `100ms` - default on each kubernetes node, is also very important to avoid CPU throttling.
Further information about this topic can be found [here](https://kanishk.io/posts/cpu-throttling-in-containerized-go-apps/).
> [!NOTE]
> The environment variable `GOMAXPROCS` is set automatically, when a CPU limit is defined. An explicit configuration is
> not anymore required.
>
> Please note that a CPU limit < `1000m` can also lead to CPU throttling. Please read the linked documentation carefully.
```yaml
deployment:
env:
# Will be automatically defined!
- name: GOMAXPROCS
valueFrom:
resourceFieldRef:
divisor: "1" # Is required for GitDevOps systems like ArgoCD/Flux. Otherwise throw the system a diff error. (k8s-default=1)
resource: limits.cpu
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi
```
## Configuration ## Configuration
Gitea offers lots of configuration options. Gitea offers lots of configuration options.
@ -281,29 +232,27 @@ If `.Values.image.rootless: true`, then the following will occur. In case you us
#### Session, Cache and Queue #### Session, Cache and Queue
The session, cache and queue settings are set to use the built-in Valkey Cluster sub-chart dependency. The session, cache and queue settings are set to use the built-in Redis Cluster sub-chart dependency.
If Valkey Cluster is disabled, the chart will fall back to the Gitea defaults which use "memory" for `session` and `cache` and "level" for `queue`. If Redis Cluster is disabled, the chart will fall back to the Gitea defaults which use "memory" for `session` and `cache` and "level" for `queue`.
While these will work and even not cause immediate issues after startup, **they are not recommended for production use**. While these will work and even not cause immediate issues after startup, **they are not recommended for production use**.
Reasons being that a single pod will take on all the work for `session` and `cache` tasks in its available memory. Reasons being that a single pod will take on all the work for `session` and `cache` tasks in its available memory.
It is likely that the pod will run out of memory or will face substantial memory spikes, depending on the workload. It is likely that the pod will run out of memory or will face substantial memory spikes, depending on the workload.
External tools such as `valkey-cluster` or `memcached` handle these workloads much better. External tools such as `redis-cluster` or `memcached` handle these workloads much better.
### Single-Pod Configurations ### Single-Pod Configurations
If HA is not needed/desired, the following configurations can be used to deploy a single-pod Gitea instance. If HA is not needed/desired, the following configurations can be used to deploy a single-pod Gitea instance.
1. For a production-ready single-pod Gitea instance without external dependencies (using the chart dependency `postgresql` and `valkey`): 1. For a production-ready single-pod Gitea instance without external dependencies (using the chart dependency `postgresql`):
<details> <details>
<summary>values.yml</summary> <summary>values.yml</summary>
```yaml ```yaml
valkey-cluster: redis-cluster:
enabled: false enabled: false
valkey:
enabled: true
postgresql: postgresql:
enabled: true enabled: true
postgresql-ha: postgresql-ha:
@ -316,6 +265,12 @@ If HA is not needed/desired, the following configurations can be used to deploy
config: config:
database: database:
DB_TYPE: postgres DB_TYPE: postgres
session:
PROVIDER: db
cache:
ADAPTER: memory
queue:
TYPE: level
indexer: indexer:
ISSUE_INDEXER_TYPE: bleve ISSUE_INDEXER_TYPE: bleve
REPO_INDEXER_ENABLED: true REPO_INDEXER_ENABLED: true
@ -333,9 +288,7 @@ If HA is not needed/desired, the following configurations can be used to deploy
<summary>values.yml</summary> <summary>values.yml</summary>
```yaml ```yaml
valkey-cluster: redis-cluster:
enabled: false
valkey:
enabled: false enabled: false
postgresql: postgresql:
enabled: false enabled: false
@ -466,9 +419,6 @@ gitea:
postgresql: postgresql:
enabled: false enabled: false
postgresql-ha:
enabled: false
``` ```
### Ports and external url ### Ports and external url
@ -533,23 +483,20 @@ and the repository exists.
``` ```
To solve this problem add the capability `SYS_CHROOT` to the `securityContext`. To solve this problem add the capability `SYS_CHROOT` to the `securityContext`.
More about this issue [here](https://gitea.com/gitea/helm-gitea/issues/161). More about this issue [here](https://gitea.com/gitea/helm-chart/issues/161).
### Cache ### Cache
The cache handling is done via `valkey-cluster` (via the `bitnami` chart) by default. The cache handling is done via `redis-cluster` (via the `bitnami` chart) by default.
This deployment is HA-ready but can also be used for single-pod deployments. This deployment is HA-ready but can also be used for single-pod deployments.
By default, 6 replicas are deployed for a working `valkey-cluster` deployment. By default, 6 replicas are deployed for a working `redis-cluster` deployment.
Many cloud providers offer a managed valkey service, which can be used instead of the built-in `valkey-cluster`. Many cloud providers offer a managed redis service, which can be used instead of the built-in `redis-cluster`.
```yaml ```yaml
valkey-cluster: redis-cluster:
enabled: true enabled: true
``` ```
⚠️ The valkey charts [do not work well with special characters in the password](https://gitea.com/gitea/helm-chart/issues/690).
Consider omitting such or open an issue in the Bitnami repo and let us know once this got fixed.
### Persistence ### Persistence
Gitea will be deployed as a deployment. Gitea will be deployed as a deployment.
@ -583,7 +530,7 @@ You can interact with the postgres settings as displayed in the following exampl
postgresql: postgresql:
persistence: persistence:
enabled: true enabled: true
existingClaim: MyAwesomeGiteaPostgresClaim claimName: MyAwesomeGiteaPostgresClaim
``` ```
### Admin User ### Admin User
@ -621,20 +568,6 @@ gitea:
existingSecret: gitea-admin-secret existingSecret: gitea-admin-secret
``` ```
Whether you use the existing Secret or specify a user name and password, there are three modes for how the admin user password is created or set.
- `keepUpdated` (the default) will set the admin user password, and reset it to the defined value every time the pod is recreated.
- `initialOnlyNoReset` will set the admin user password when creating it, but never try to update the password.
- `initialOnlyRequireReset` will set the admin user password when creating it, never update it, and require that the password be changed at the initial login.
These modes can be set like the following:
```yaml
gitea:
admin:
passwordMode: initialOnlyRequireReset
```
### LDAP Settings ### LDAP Settings
Like the admin user the LDAP settings can be updated. Like the admin user the LDAP settings can be updated.
@ -738,7 +671,7 @@ gitea:
When using the rootless image the gpg key folder is not persistent by default. When using the rootless image the gpg key folder is not persistent by default.
If you consider using signed commits for internal Gitea activities (e.g. initial commit), you'd need to provide a signing key. If you consider using signed commits for internal Gitea activities (e.g. initial commit), you'd need to provide a signing key.
Prior to [PR186](https://gitea.com/gitea/helm-gitea/pulls/186), imported keys had to be re-imported once the container got replaced by another. Prior to [PR186](https://gitea.com/gitea/helm-chart/pulls/186), imported keys had to be re-imported once the container got replaced by another.
The mentioned PR introduced a new configuration object `signing` allowing you to configure prerequisites for commit signing. The mentioned PR introduced a new configuration object `signing` allowing you to configure prerequisites for commit signing.
By default this section is disabled to maintain backwards compatibility. By default this section is disabled to maintain backwards compatibility.
@ -792,21 +725,6 @@ gitea:
ENABLE_PPROF: true ENABLE_PPROF: true
``` ```
### Secure Metrics Endpoint
Metrics endpoint `/metrics` can be secured by using `Bearer` token authentication.
**Note:** Providing non-empty `TOKEN` value will also require authentication for `ServiceMonitor`.
```yaml
gitea:
metrics:
token: "secure-token"
enabled: true
serviceMonitor:
enabled: true
```
## Pod annotations ## Pod annotations
Annotations can be added to the Gitea pod. Annotations can be added to the Gitea pod.
@ -916,14 +834,13 @@ To comply with the Gitea helm chart definition of the digest parameter, a "custo
### Global ### Global
| Name | Description | Value | | Name | Description | Value |
| ------------------------- | ---------------------------------------------------------------------------------------------- | ----- | | ------------------------- | ------------------------------------------------------------------------- | ----- |
| `global.imageRegistry` | global image registry override | `""` | | `global.imageRegistry` | global image registry override | `""` |
| `global.imagePullSecrets` | global image pull secrets override; can be extended by `imagePullSecrets` | `[]` | | `global.imagePullSecrets` | global image pull secrets override; can be extended by `imagePullSecrets` | `[]` |
| `global.storageClass` | global storage class override | `""` | | `global.storageClass` | global storage class override | `""` |
| `global.hostAliases` | global hostAliases which will be added to the pod's hosts files | `[]` | | `global.hostAliases` | global hostAliases which will be added to the pod's hosts files | `[]` |
| `namespace` | An explicit namespace to deploy Gitea into. Defaults to the release namespace if not specified | `""` | | `replicaCount` | number of replicas for the deployment | `1` |
| `replicaCount` | number of replicas for the deployment | `1` |
### strategy ### strategy
@ -936,16 +853,16 @@ To comply with the Gitea helm chart definition of the digest parameter, a "custo
### Image ### Image
| Name | Description | Value | | Name | Description | Value |
| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | | -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- |
| `image.registry` | image registry, e.g. gcr.io,docker.io | `docker.gitea.com` | | `image.registry` | image registry, e.g. gcr.io,docker.io | `""` |
| `image.repository` | Image to start for this pod | `gitea` | | `image.repository` | Image to start for this pod | `gitea/gitea` |
| `image.tag` | Visit: [Image tag](https://hub.docker.com/r/gitea/gitea/tags?page=1&ordering=last_updated). Defaults to `appVersion` within Chart.yaml. | `""` | | `image.tag` | Visit: [Image tag](https://hub.docker.com/r/gitea/gitea/tags?page=1&ordering=last_updated). Defaults to `appVersion` within Chart.yaml. | `""` |
| `image.digest` | Image digest. Allows to pin the given image tag. Useful for having control over mutable tags like `latest` | `""` | | `image.digest` | Image digest. Allows to pin the given image tag. Useful for having control over mutable tags like `latest` | `""` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` | | `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `image.rootless` | Wether or not to pull the rootless version of Gitea, only works on Gitea 1.14.x or higher | `true` | | `image.rootless` | Wether or not to pull the rootless version of Gitea, only works on Gitea 1.14.x or higher | `true` |
| `image.fullOverride` | Completely overrides the image registry, path/image, tag and digest. **Adjust `image.rootless` accordingly and review [Rootless defaults](#rootless-defaults).** | `""` | | `image.fullOverride` | Completely overrides the image registry, path/image, tag and digest. **Adjust `image.rootless` accordingly and review [Rootless defaults](#rootless-defaults).** | `""` |
| `imagePullSecrets` | Secret to use for pulling the image | `[]` | | `imagePullSecrets` | Secret to use for pulling the image | `[]` |
### Security ### Security
@ -971,8 +888,6 @@ To comply with the Gitea helm chart definition of the digest parameter, a "custo
| `service.http.ipFamilies` | HTTP service dual-stack familiy selection,for dual-stack parameters see official kubernetes [dual-stack concept documentation](https://kubernetes.io/docs/concepts/services-networking/dual-stack/). | `nil` | | `service.http.ipFamilies` | HTTP service dual-stack familiy selection,for dual-stack parameters see official kubernetes [dual-stack concept documentation](https://kubernetes.io/docs/concepts/services-networking/dual-stack/). | `nil` |
| `service.http.loadBalancerSourceRanges` | Source range filter for http loadbalancer | `[]` | | `service.http.loadBalancerSourceRanges` | Source range filter for http loadbalancer | `[]` |
| `service.http.annotations` | HTTP service annotations | `{}` | | `service.http.annotations` | HTTP service annotations | `{}` |
| `service.http.labels` | HTTP service additional labels | `{}` |
| `service.http.loadBalancerClass` | Loadbalancer class | `nil` |
| `service.ssh.type` | Kubernetes service type for ssh traffic | `ClusterIP` | | `service.ssh.type` | Kubernetes service type for ssh traffic | `ClusterIP` |
| `service.ssh.port` | Port number for ssh traffic | `22` | | `service.ssh.port` | Port number for ssh traffic | `22` |
| `service.ssh.clusterIP` | ClusterIP setting for ssh autosetup for deployment is None | `None` | | `service.ssh.clusterIP` | ClusterIP setting for ssh autosetup for deployment is None | `None` |
@ -985,20 +900,19 @@ To comply with the Gitea helm chart definition of the digest parameter, a "custo
| `service.ssh.hostPort` | HostPort for ssh service | `nil` | | `service.ssh.hostPort` | HostPort for ssh service | `nil` |
| `service.ssh.loadBalancerSourceRanges` | Source range filter for ssh loadbalancer | `[]` | | `service.ssh.loadBalancerSourceRanges` | Source range filter for ssh loadbalancer | `[]` |
| `service.ssh.annotations` | SSH service annotations | `{}` | | `service.ssh.annotations` | SSH service annotations | `{}` |
| `service.ssh.labels` | SSH service additional labels | `{}` |
| `service.ssh.loadBalancerClass` | Loadbalancer class | `nil` |
### Ingress ### Ingress
| Name | Description | Value | | Name | Description | Value |
| -------------------------------- | ------------------------------- | ----------------- | | ------------------------------------ | --------------------------------------------------------------------------- | ----------------- |
| `ingress.enabled` | Enable ingress | `false` | | `ingress.enabled` | Enable ingress | `false` |
| `ingress.className` | DEPRECATED: Ingress class name. | `""` | | `ingress.className` | Ingress class name | `nil` |
| `ingress.pathType` | Ingress Path Type | `Prefix` | | `ingress.annotations` | Ingress annotations | `{}` |
| `ingress.annotations` | Ingress annotations | `{}` | | `ingress.hosts[0].host` | Default Ingress host | `git.example.com` |
| `ingress.hosts[0].host` | Default Ingress host | `git.example.com` | | `ingress.hosts[0].paths[0].path` | Default Ingress path | `/` |
| `ingress.hosts[0].paths[0].path` | Default Ingress path | `/` | | `ingress.hosts[0].paths[0].pathType` | Ingress path type | `Prefix` |
| `ingress.tls` | Ingress tls settings | `[]` | | `ingress.tls` | Ingress tls settings | `[]` |
| `ingress.apiVersion` | Specify APIVersion of ingress object. Mostly would only be used for argocd. | |
### deployment ### deployment
@ -1043,7 +957,6 @@ To comply with the Gitea helm chart definition of the digest parameter, a "custo
| `persistence.storageClass` | Name of the storage class to use | `nil` | | `persistence.storageClass` | Name of the storage class to use | `nil` |
| `persistence.subPath` | Subdirectory of the volume to mount at | `nil` | | `persistence.subPath` | Subdirectory of the volume to mount at | `nil` |
| `persistence.volumeName` | Name of persistent volume in PVC | `""` | | `persistence.volumeName` | Name of persistent volume in PVC | `""` |
| `extraContainers` | Additional sidecar containers to run in the pod | `[]` |
| `extraVolumes` | Additional volumes to mount to the Gitea deployment | `[]` | | `extraVolumes` | Additional volumes to mount to the Gitea deployment | `[]` |
| `extraContainerVolumeMounts` | Mounts that are only mapped into the Gitea runtime/main container, to e.g. override custom templates. | `[]` | | `extraContainerVolumeMounts` | Mounts that are only mapped into the Gitea runtime/main container, to e.g. override custom templates. | `[]` |
| `extraInitVolumeMounts` | Mounts that are only mapped into the init-containers. Can be used for additional preconfiguration. | `[]` | | `extraInitVolumeMounts` | Mounts that are only mapped into the init-containers. Can be used for additional preconfiguration. | `[]` |
@ -1051,13 +964,12 @@ To comply with the Gitea helm chart definition of the digest parameter, a "custo
### Init ### Init
| Name | Description | Value | | Name | Description | Value |
| ------------------------------------------ | ------------------------------------------------------------------------------------ | ------------ | | ------------------------------------------ | ------------------------------------------------------------------------------------ | ------- |
| `initPreScript` | Bash shell script copied verbatim to the start of the init-container. | `""` | | `initPreScript` | Bash shell script copied verbatim to the start of the init-container. | `""` |
| `initContainersScriptsVolumeMountPath` | Path to mount the scripts consumed from the Secrets | `/usr/sbinx` | | `initContainers.resources.limits` | initContainers.limits Kubernetes resource limits for init containers | `{}` |
| `initContainers.resources.limits` | initContainers.limits Kubernetes resource limits for init containers | `{}` | | `initContainers.resources.requests.cpu` | initContainers.requests.cpu Kubernetes cpu resource limits for init containers | `100m` |
| `initContainers.resources.requests.cpu` | initContainers.requests.cpu Kubernetes cpu resource limits for init containers | `100m` | | `initContainers.resources.requests.memory` | initContainers.requests.memory Kubernetes memory resource limits for init containers | `128Mi` |
| `initContainers.resources.requests.memory` | initContainers.requests.memory Kubernetes memory resource limits for init containers | `128Mi` |
### Signing ### Signing
@ -1065,34 +977,27 @@ To comply with the Gitea helm chart definition of the digest parameter, a "custo
| ------------------------ | ----------------------------------------------------------------- | ------------------ | | ------------------------ | ----------------------------------------------------------------- | ------------------ |
| `signing.enabled` | Enable commit/action signing | `false` | | `signing.enabled` | Enable commit/action signing | `false` |
| `signing.gpgHome` | GPG home directory | `/data/git/.gnupg` | | `signing.gpgHome` | GPG home directory | `/data/git/.gnupg` |
| `signing.privateKey` | Inline private gpg key for signed internal Git activity | `""` | | `signing.privateKey` | Inline private gpg key for signed Gitea actions | `""` |
| `signing.existingSecret` | Use an existing secret to store the value of `signing.privateKey` | `""` | | `signing.existingSecret` | Use an existing secret to store the value of `signing.privateKey` | `""` |
### Gitea ### Gitea
| Name | Description | Value | | Name | Description | Value |
| -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | -------------------- | | -------------------------------------- | ------------------------------------------------------------------------- | -------------------- |
| `gitea.admin.username` | Username for the Gitea admin user | `gitea_admin` | | `gitea.admin.username` | Username for the Gitea admin user | `gitea_admin` |
| `gitea.admin.existingSecret` | Use an existing secret to store admin user credentials | `nil` | | `gitea.admin.existingSecret` | Use an existing secret to store admin user credentials | `nil` |
| `gitea.admin.password` | Password for the Gitea admin user | `r8sA8CPHD9!bt6d` | | `gitea.admin.password` | Password for the Gitea admin user | `r8sA8CPHD9!bt6d` |
| `gitea.admin.email` | Email for the Gitea admin user | `gitea@local.domain` | | `gitea.admin.email` | Email for the Gitea admin user | `gitea@local.domain` |
| `gitea.admin.passwordMode` | Mode for how to set/update the admin user password. Options are: initialOnlyNoReset, initialOnlyRequireReset, and keepUpdated | `keepUpdated` | | `gitea.metrics.enabled` | Enable Gitea metrics | `false` |
| `gitea.metrics.enabled` | Enable Gitea metrics | `false` | | `gitea.metrics.serviceMonitor.enabled` | Enable Gitea metrics service monitor | `false` |
| `gitea.metrics.token` | used for `bearer` token authentication on metrics endpoint. If not specified or empty metrics endpoint is public. | `nil` | | `gitea.ldap` | LDAP configuration | `[]` |
| `gitea.metrics.serviceMonitor.enabled` | Enable Gitea metrics service monitor. Requires, that `gitea.metrics.enabled` is also set to true, to enable metrics generally. | `false` | | `gitea.oauth` | OAuth configuration | `[]` |
| `gitea.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. If not specified Prometheus' global scrape interval is used. | `""` | | `gitea.config.server.SSH_PORT` | SSH port for rootlful Gitea image | `22` |
| `gitea.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | | `gitea.config.server.SSH_LISTEN_PORT` | SSH port for rootless Gitea image | `2222` |
| `gitea.metrics.serviceMonitor.scheme` | HTTP scheme to use for scraping. For example `http` or `https`. Default is http. | `""` | | `gitea.additionalConfigSources` | Additional configuration from secret or configmap | `[]` |
| `gitea.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended. If not specified, global Prometheus scrape timeout is used. | `""` | | `gitea.additionalConfigFromEnvs` | Additional configuration sources from environment variables | `[]` |
| `gitea.metrics.serviceMonitor.tlsConfig` | TLS configuration to use when scraping the metric endpoint by Prometheus. | `{}` | | `gitea.podAnnotations` | Annotations for the Gitea pod | `{}` |
| `gitea.ldap` | LDAP configuration | `[]` | | `gitea.ssh.logLevel` | Configure OpenSSH's log level. Only available for root-based Gitea image. | `INFO` |
| `gitea.oauth` | OAuth configuration | `[]` |
| `gitea.config.server.SSH_PORT` | SSH port for rootlful Gitea image | `22` |
| `gitea.config.server.SSH_LISTEN_PORT` | SSH port for rootless Gitea image | `2222` |
| `gitea.additionalConfigSources` | Additional configuration from secret or configmap | `[]` |
| `gitea.additionalConfigFromEnvs` | Additional configuration sources from environment variables | `[]` |
| `gitea.podAnnotations` | Annotations for the Gitea pod | `{}` |
| `gitea.ssh.logLevel` | Configure OpenSSH's log level. Only available for root-based Gitea image. | `INFO` |
### LivenessProbe ### LivenessProbe
@ -1130,36 +1035,20 @@ To comply with the Gitea helm chart definition of the digest parameter, a "custo
| `gitea.startupProbe.successThreshold` | Success threshold for startup probe | `1` | | `gitea.startupProbe.successThreshold` | Success threshold for startup probe | `1` |
| `gitea.startupProbe.failureThreshold` | Failure threshold for startup probe | `10` | | `gitea.startupProbe.failureThreshold` | Failure threshold for startup probe | `10` |
### valkey-cluster ### redis-cluster
Valkey cluster and [Valkey](#valkey) cannot be enabled at the same time. | Name | Description | Value |
| -------------------------------- | -------------------------------------------- | ------- |
| `redis-cluster.enabled` | Enable redis | `true` |
| `redis-cluster.usePassword` | Whether to use password authentication | `false` |
| `redis-cluster.cluster.nodes` | Number of redis cluster master nodes | `3` |
| `redis-cluster.cluster.replicas` | Number of redis cluster master node replicas | `0` |
| Name | Description | Value | ### PostgreSQL-ha
| ------------------------------------- | -------------------------------------------------------------------- | ------- |
| `valkey-cluster.enabled` | Enable valkey cluster | `true` |
| `valkey-cluster.usePassword` | Whether to use password authentication | `false` |
| `valkey-cluster.usePasswordFiles` | Whether to mount passwords as files instead of environment variables | `false` |
| `valkey-cluster.cluster.nodes` | Number of valkey cluster master nodes | `3` |
| `valkey-cluster.cluster.replicas` | Number of valkey cluster master node replicas | `0` |
| `valkey-cluster.service.ports.valkey` | Port of Valkey service | `6379` |
### valkey
Valkey and [Valkey cluster](#valkey-cluster) cannot be enabled at the same time.
| Name | Description | Value |
| ------------------------------------ | ------------------------------------------- | ------------ |
| `valkey.enabled` | Enable valkey standalone or replicated | `false` |
| `valkey.architecture` | Whether to use standalone or replication | `standalone` |
| `valkey.global.valkey.password` | Required password | `changeme` |
| `valkey.master.count` | Number of Valkey master instances to deploy | `1` |
| `valkey.master.service.ports.valkey` | Port of Valkey service | `6379` |
### PostgreSQL HA
| Name | Description | Value | | Name | Description | Value |
| ------------------------------------------- | ---------------------------------------------------------------- | ----------- | | ------------------------------------------- | ---------------------------------------------------------------- | ----------- |
| `postgresql-ha.enabled` | Enable PostgreSQL HA | `true` | | `postgresql-ha.enabled` | Enable PostgreSQL-ha | `true` |
| `postgresql-ha.postgresql.password` | Password for the `gitea` user (overrides `auth.password`) | `changeme4` | | `postgresql-ha.postgresql.password` | Password for the `gitea` user (overrides `auth.password`) | `changeme4` |
| `postgresql-ha.global.postgresql.database` | Name for a custom database to create (overrides `auth.database`) | `gitea` | | `postgresql-ha.global.postgresql.database` | Name for a custom database to create (overrides `auth.database`) | `gitea` |
| `postgresql-ha.global.postgresql.username` | Name for a custom user to create (overrides `auth.username`) | `gitea` | | `postgresql-ha.global.postgresql.username` | Name for a custom user to create (overrides `auth.username`) | `gitea` |
@ -1168,7 +1057,7 @@ Valkey and [Valkey cluster](#valkey-cluster) cannot be enabled at the same time.
| `postgresql-ha.postgresql.postgresPassword` | postgres Password | `changeme1` | | `postgresql-ha.postgresql.postgresPassword` | postgres Password | `changeme1` |
| `postgresql-ha.pgpool.adminPassword` | pgpool adminPassword | `changeme3` | | `postgresql-ha.pgpool.adminPassword` | pgpool adminPassword | `changeme3` |
| `postgresql-ha.service.ports.postgresql` | PostgreSQL service port (overrides `service.ports.postgresql`) | `5432` | | `postgresql-ha.service.ports.postgresql` | PostgreSQL service port (overrides `service.ports.postgresql`) | `5432` |
| `postgresql-ha.persistence.size` | PVC Storage Request for PostgreSQL HA volume | `10Gi` | | `postgresql-ha.primary.persistence.size` | PVC Storage Request for PostgreSQL-ha volume | `10Gi` |
### PostgreSQL ### PostgreSQL
@ -1205,52 +1094,6 @@ If you miss this, blindly upgrading may delete your Postgres instance and you ma
<details> <details>
<summary>To 12.0.0</summary>
<!-- prettier-ignore-start -->
<!-- markdownlint-disable-next-line -->
**Breaking changes**
<!-- prettier-ignore-end -->
- Outsourced "Actions" related configuration.
To deploy and use "Actions", please see the new dedicated chart at <https://gitea.com/gitea/helm-actions>.
It is maintained by a seperate maintainer group and hasn't seen a release yet (at the time of the 12.0 release).
Feel encouraged to contribute if "Actions" is important to you!
This change was made to avoid overloading the existing helm chart, which is already quite large in size and configuration options.
In addition, the existing maintainers team was not actively using "Actions" which slowed down development and community contributions.
While the new chart is still young (and waiting for contributions! and maintainers), we believe that it is the best way moving forward for both parts.
- Migrated from Redis/Redis-cluster to Valkey/Valkey-cluster charts (#775).
While marked as breaking, there is no need to migrate data.
The cache will start to refill automatically.
- Migrated ingress from `networking.k8s.io/v1beta` to `networking.k8s.io/v1`.
We didn't make any changes to the syntax, so the upgrade should be seamless.
</details>
<details>
<summary>To 11.0.0</summary>
<!-- prettier-ignore-start -->
<!-- markdownlint-disable-next-line -->
**Breaking changes**
<!-- prettier-ignore-end -->
- Update Gitea to 1.23.x (review the [1.23 release blog post](https://blog.gitea.com/release-of-1.23.0/) for all application breaking changes)
- Update PostgreSQL sub-chart dependencies to appVersion 17.x
- Update Redis sub-chart to version 20.x (appVersion 7.4)
Although there are no breaking changes in the Redis Chart itself, it updates Redis from `7.2` to `7.4`. We recommend checking the release notes:
- [Redis Chart release notes (starting with v20.0.0)](https://github.com/bitnami/charts/blob/HEAD/bitnami/redis/CHANGELOG.md#2000-2024-08-09).
- [Redis 7.4 release notes](https://raw.githubusercontent.com/redis/redis/7.4/00-RELEASENOTES).
- Update Redis Cluster sub-chart to version 11.x (appVersion 7.4)
Although there are no breaking changes in the Redis Chart itself, it updates Redis from `7.2` to `7.4`. We recommend checking the release notes:
- [Redis Chart release notes (starting with v11.0.0)](https://github.com/bitnami/charts/blob/HEAD/bitnami/redis-cluster/CHANGELOG.md#1100-2024-08-09).
- [Redis 7.4 release notes](https://raw.githubusercontent.com/redis/redis/7.4/00-RELEASENOTES).
</details>
<details>
<summary>To 10.0.0</summary> <summary>To 10.0.0</summary>
<!-- prettier-ignore-start --> <!-- prettier-ignore-start -->
@ -1317,23 +1160,23 @@ The first item here (`<memcache service name>`) will be different compared to th
The above changes are motivated by the idea to tidy dependencies but also have HA-ready ones at the same time. The above changes are motivated by the idea to tidy dependencies but also have HA-ready ones at the same time.
The previous `memcache` default was not HA-ready, hence we decided to switch to `redis-cluster` by default. The previous `memcache` default was not HA-ready, hence we decided to switch to `redis-cluster` by default.
If you are coming from an existing deployment and [#356](https://gitea.com/gitea/helm-gitea/issues/356) is still open, you need to set the config sections for `cache`, `session` and `queue` explicitly: If you are coming from an existing deployment and [#356](https://gitea.com/gitea/helm-chart/issues/356) is still open, you need to set the config sections for `cache`, `session` and `queue` explicitly:
```yaml ```yaml
gitea: gitea:
config: config:
session: session:
PROVIDER: redis-cluster PROVIDER: redis-cluster
PROVIDER_CONFIG: redis+cluster://:gitea@gitea-valkey-cluster-headless.<namespace>.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s& PROVIDER_CONFIG: redis+cluster://:gitea@gitea-redis-cluster-headless.<namespace>.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
cache: cache:
ENABLED: true ENABLED: true
ADAPTER: redis-cluster ADAPTER: redis-cluster
HOST: redis+cluster://:gitea@gitea-valkey-cluster-headless.<namespace>.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s& HOST: redis+cluster://:gitea@gitea-redis-cluster-headless.<namespace>.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
queue: queue:
TYPE: redis TYPE: redis
CONN_STR: redis+cluster://:gitea@gitea-valkey-cluster-headless.<namespace>.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s& CONN_STR: redis+cluster://:gitea@gitea-redis-cluster-headless.<namespace>.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
``` ```
<!-- prettier-ignore-start --> <!-- prettier-ignore-start -->
@ -1342,7 +1185,7 @@ gitea:
<!-- prettier-ignore-end --> <!-- prettier-ignore-end -->
If you are facing errors like `WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED` due to this automatic transition: If you are facing errors like `WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED` due to this automatic transition:
Have a look at [this discussion](https://gitea.com/gitea/helm-gitea/issues/487#issue-220660) and either set `image.rootless: false` or manually update your `~/.ssh/known_hosts` file(s). Have a look at [this discussion](https://gitea.com/gitea/helm-chart/issues/487#issue-220660) and either set `image.rootless: false` or manually update your `~/.ssh/known_hosts` file(s).
<!-- prettier-ignore-start --> <!-- prettier-ignore-start -->
<!-- markdownlint-disable-next-line --> <!-- markdownlint-disable-next-line -->
@ -1398,7 +1241,7 @@ With respect to `values.yaml`, parameters `username`, `database` and `password`
Please adjust your `values.yaml` accordingly. Please adjust your `values.yaml` accordingly.
**Attention**: The Postgres upgrade is not automatically handled by the chart and must be done by yourself. **Attention**: The Postgres upgrade is not automatically handled by the chart and must be done by yourself.
See [this comment](https://gitea.com/gitea/helm-gitea/issues/452#issuecomment-740885) for an extensive walkthrough. See [this comment](https://gitea.com/gitea/helm-chart/issues/452#issuecomment-740885) for an extensive walkthrough.
We again highly encourage users to use an external (managed) database for production instances. We again highly encourage users to use an external (managed) database for production instances.
</details> </details>

View File

@ -25,7 +25,7 @@ In addition, the following components are required for full HA-readiness:
- A HA-ready issue (and optionally code) indexer: `elasticsearch` or `meilisearch` - A HA-ready issue (and optionally code) indexer: `elasticsearch` or `meilisearch`
- A HA-ready external object/asset storage (`minio`) (optional, assets can also be stored on the RWX file-system) - A HA-ready external object/asset storage (`minio`) (optional, assets can also be stored on the RWX file-system)
- A HA-ready cache (`valkey-cluster`) - A HA-ready cache (`redis-cluster`)
- A HA-ready DB - A HA-ready DB
`postgres.enabled`, which default to `true`, must be set to `false` for a HA setup. `postgres.enabled`, which default to `true`, must be set to `false` for a HA setup.
@ -72,33 +72,33 @@ persistence:
## Cache, session and queue ## Cache, session and queue
A `valkey` instance is required for the in-memory cache. A `redis` instance is required for the in-memory cache.
Two options exist: Two options exist:
- `valkey` - `redis`
- `valkey-cluster` - `redis-cluster`
The chart provides `valkey-cluster` as a dependency as this one can be used for both HA and non-HA setups. The chart provides `redis-cluster` as a dependency as this one can be used for both HA and non-HA setups.
You're also welcome to go with `valkey` if you prefer or already have a running instance. You're also welcome to go with `redis` if you prefer or already have a running instance.
It should be noted that `valkey-cluster` support is only available starting with Gitea 1.19.2. It should be noted that `redis-cluster` support is only available starting with Gitea 1.19.2.
You can also configure an external (managed) `valkey` instance to be used. You can also configure an external (managed) `redis` instance to be used.
To do so, you need to set the following configuration values yourself: To do so, you need to set the following configuration values yourself:
- `gitea.config.queue.TYPE`: valkey` - `gitea.config.queue.TYPE`: redis`
- `gitea.config.queue.CONN_STR`: `<your valkey connection string>` - `gitea.config.queue.CONN_STR`: `<your redis connection string>`
- `gitea.config.session.PROVIDER`: `valkey` - `gitea.config.session.PROVIDER`: `redis`
- `gitea.config.session.PROVIDER_CONFIG`: `<your valkey connection string>` - `gitea.config.session.PROVIDER_CONFIG`: `<your redis connection string>`
- `gitea.config.cache.ENABLED`: `true` - `gitea.config.cache.ENABLED`: `true`
- `gitea.config.cache.ADAPTER`: `valkey` - `gitea.config.cache.ADAPTER`: `redis`
- `gitea.config.cache.HOST`: `<your valkey connection string>` - `gitea.config.cache.HOST`: `<your redis connection string>`
By default, the `valkey-cluster` chart provisions three standalone master nodes of which each has a single replica. By default, the `redis-cluster` chart provisions three standalone master nodes of which each has a single replica.
To reduce the number of pods for a default Gitea deployment, we opted to omit the replicas (`replicas: 0`) by default. To reduce the number of pods for a default Gitea deployment, we opted to omit the replicas (`replicas: 0`) by default.
Only the minimum required number of master pods for a functional `valkey-cluster` deployment are provisioned. Only the minimum required number of master pods for a functional `redis-cluster` deployment are provisioned.
For a "proper" `valkey-cluster` setup however, we recommend to set `replicas: 1` and `nodes: 6`. For a "proper" `redis-cluster` setup however, we recommend to set `replicas: 1` and `nodes: 6`.
## Object and asset storage ## Object and asset storage

1089
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
{ {
"name": "gitea-helm", "name": "gitea-helm-chart",
"homepage": "https://gitea.com/gitea/helm-gitea.git", "homepage": "https://gitea.com/gitea/helm-chart.git",
"license": "MIT", "license": "MIT",
"private": true, "private": true,
"engineStrict": true, "engineStrict": true,
@ -14,6 +14,6 @@
}, },
"devDependencies": { "devDependencies": {
"@bitnami/readme-generator-for-helm": "^2.5.0", "@bitnami/readme-generator-for-helm": "^2.5.0",
"markdownlint-cli": "^0.44.0" "markdownlint-cli": "^0.38.0"
} }
} }

View File

@ -9,19 +9,13 @@
labels: [ labels: [
'kind/dependency', 'kind/dependency',
], ],
digest: {
automerge: true,
},
automergeStrategy: 'squash', automergeStrategy: 'squash',
'git-submodules': {
enabled: true,
},
customManagers: [ customManagers: [
{ {
description: 'Gitea-version of https://docs.renovatebot.com/presets-regexManagers/#regexmanagersgithubactionsversions', description: 'Gitea-version of https://docs.renovatebot.com/presets-regexManagers/#regexmanagersgithubactionsversions',
customType: 'regex', customType: 'regex',
managerFilePatterns: [ fileMatch: [
'/.gitea/workflows/.+\\.ya?ml$/', '.gitea/workflows/.+\\.ya?ml$',
], ],
matchStrings: [ matchStrings: [
'# renovate: datasource=(?<datasource>[a-z-.]+?) depName=(?<depName>[^\\s]+?)(?: (?:lookupName|packageName)=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[a-z-0-9]+?))?\\s+[A-Za-z0-9_]+?_VERSION\\s*:\\s*["\']?(?<currentValue>.+?)["\']?\\s', '# renovate: datasource=(?<datasource>[a-z-.]+?) depName=(?<depName>[^\\s]+?)(?: (?:lookupName|packageName)=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[a-z-0-9]+?))?\\s+[A-Za-z0-9_]+?_VERSION\\s*:\\s*["\']?(?<currentValue>.+?)["\']?\\s',
@ -30,24 +24,12 @@
{ {
description: 'Detect helm-unittest yaml schema file', description: 'Detect helm-unittest yaml schema file',
customType: 'regex', customType: 'regex',
managerFilePatterns: [ fileMatch: ['.vscode/settings\\.json$'],
'/.vscode/settings\\.json$/',
],
matchStrings: [ matchStrings: [
'https:\\/\\/raw\\.githubusercontent\\.com\\/(?<depName>[^\\s]+?)\\/(?<currentValue>v[0-9.]+?)\\/schema\\/helm-testsuite\\.json', 'https:\\/\\/raw\\.githubusercontent\\.com\\/(?<depName>[^\\s]+?)\\/(?<currentValue>v[0-9.]+?)\\/schema\\/helm-testsuite\\.json',
], ],
datasourceTemplate: 'github-releases', datasourceTemplate: 'github-releases',
}, },
{
description: 'Automatically detect new Gitea releases',
customType: 'regex',
managerFilePatterns: [
'/(^|/)Chart\\.yaml$/',
],
matchStrings: [
'# renovate datasource=(?<datasource>\\S+) depName=(?<depName>\\S+) extractVersion=(?<extractVersion>\\S+)\\nappVersion:\\s?(?<currentValue>\\S+)\\n',
],
},
], ],
packageRules: [ packageRules: [
{ {
@ -61,17 +43,6 @@
'digest', 'digest',
], ],
}, },
{
groupName: 'bats testing framework',
matchManagers: [
'git-submodules',
],
matchUpdateTypes: [
'minor',
'patch',
'digest',
],
},
{ {
groupName: 'workflow dependencies (minor & patch)', groupName: 'workflow dependencies (minor & patch)',
matchManagers: [ matchManagers: [
@ -84,41 +55,6 @@
'patch', 'patch',
'digest', 'digest',
], ],
matchFileNames: [
'!Chart.yaml',
],
},
{
description: 'Update README.md on changes in values.yaml',
matchManagers: [
'helm-values',
],
postUpgradeTasks: {
commands: [
'install-tool node',
'make readme',
],
fileFilters: [
'README.md',
],
executionMode: 'update',
},
},
{
description: 'Override changelog url for Helm image, to have release notes in our PRs',
matchDepNames: [
'alpine/helm',
],
changelogUrl: 'https://github.com/helm/helm',
},
{
description: 'Bump Gitea as fast as possible - not only on weekends',
matchDepNames: [
'go-gitea/gitea',
],
schedule: [
'at any time',
],
}, },
], ],
} }

View File

@ -1,43 +0,0 @@
#!/bin/sh
set -eu
timeout_delay=15
check_token() {
set +e
echo "Checking for existing token..."
token="$(kubectl get secret "$SECRET_NAME" -o jsonpath="{.data['token']}" 2> /dev/null)"
[ $? -ne 0 ] && return 1
[ -z "$token" ] && return 2
return 0
}
create_token() {
echo "Waiting for new token to be generated..."
begin=$(date +%s)
end=$((begin + timeout_delay))
while true; do
[ -f /data/actions/token ] && return 0
[ "$(date +%s)" -gt $end ] && return 1
sleep 5
done
}
store_token() {
echo "Storing the token in Kubernetes secret..."
kubectl patch secret "$SECRET_NAME" -p "{\"data\":{\"token\":\"$(base64 /data/actions/token | tr -d '\n')\"}}"
}
if check_token; then
echo "Key already in place, exiting."
exit
fi
if ! create_token; then
echo "Checking for an existing act runner token in secret $SECRET_NAME timed out after $timeout_delay"
exit 1
fi
store_token

View File

@ -1,154 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
function env2ini::log() {
printf "${1}\n"
}
function env2ini::read_config_to_env() {
local section="${1}"
local line="${2}"
if [[ -z "${line}" ]]; then
# skip empty line
return
fi
# 'xargs echo -n' trims all leading/trailing whitespaces and a trailing new line
local setting="$(awk -F '=' '{print $1}' <<< "${line}" | xargs echo -n)"
if [[ -z "${setting}" ]]; then
env2ini::log ' ! invalid setting'
exit 1
fi
local value=''
local regex="^${setting}(\s*)=(\s*)(.*)"
if [[ $line =~ $regex ]]; then
value="${BASH_REMATCH[3]}"
else
env2ini::log ' ! invalid setting'
exit 1
fi
env2ini::log " + '${setting}'"
if [[ -z "${section}" ]]; then
export "GITEA____${setting^^}=${value}" # '^^' makes the variable content uppercase
return
fi
local masked_section="${section//./_0X2E_}" # '//' instructs to replace all matches
masked_section="${masked_section//-/_0X2D_}"
export "GITEA__${masked_section^^}__${setting^^}=${value}" # '^^' makes the variable content uppercase
}
function env2ini::reload_preset_envs() {
env2ini::log "Reloading preset envs..."
while read -r line; do
if [[ -z "${line}" ]]; then
# skip empty line
return
fi
# 'xargs echo -n' trims all leading/trailing whitespaces and a trailing new line
local setting="$(awk -F '=' '{print $1}' <<< "${line}" | xargs echo -n)"
if [[ -z "${setting}" ]]; then
env2ini::log ' ! invalid setting'
exit 1
fi
local value=''
local regex="^${setting}(\s*)=(\s*)(.*)"
if [[ $line =~ $regex ]]; then
value="${BASH_REMATCH[3]}"
else
env2ini::log ' ! invalid setting'
exit 1
fi
env2ini::log " + '${setting}'"
export "${setting^^}=${value}" # '^^' makes the variable content uppercase
done < "$TMP_EXISTING_ENVS_FILE"
rm $TMP_EXISTING_ENVS_FILE
}
function env2ini::process_config_file() {
local config_file="${1}"
local section="$(basename "${config_file}")"
if [[ $section == '_generals_' ]]; then
env2ini::log " [ini root]"
section=''
else
env2ini::log " ${section}"
fi
while read -r line; do
env2ini::read_config_to_env "${section}" "${line}"
done < <(awk 1 "${config_file}") # Helm .toYaml trims the trailing new line which breaks line processing; awk 1 ... adds it back while reading
}
function env2ini::load_config_sources() {
local path="${1}"
if [[ -d "${path}" ]]; then
env2ini::log "Processing $(basename "${path}")..."
while read -d '' configFile; do
env2ini::process_config_file "${configFile}"
done < <(find "${path}" -type l -not -name '..data' -print0)
env2ini::log "\n"
fi
}
function env2ini::generate_initial_secrets() {
# These environment variables will either be
# - overwritten with user defined values,
# - initially used to set up Gitea
# Anyway, they won't harm existing app.ini files
export GITEA__SECURITY__INTERNAL_TOKEN=$(gitea generate secret INTERNAL_TOKEN)
export GITEA__SECURITY__SECRET_KEY=$(gitea generate secret SECRET_KEY)
export GITEA__OAUTH2__JWT_SECRET=$(gitea generate secret JWT_SECRET)
export GITEA__SERVER__LFS_JWT_SECRET=$(gitea generate secret LFS_JWT_SECRET)
env2ini::log "...Initial secrets generated\n"
}
# save existing envs prior to script execution. Necessary to keep order of preexisting and custom envs
env | (grep -e '^GITEA__' || [[ $? == 1 ]]) > $TMP_EXISTING_ENVS_FILE
# MUST BE CALLED BEFORE OTHER CONFIGURATION
env2ini::generate_initial_secrets
env2ini::load_config_sources "$ENV_TO_INI_MOUNT_POINT/inlines/"
env2ini::load_config_sources "$ENV_TO_INI_MOUNT_POINT/additionals/"
# load existing envs to override auto generated envs
env2ini::reload_preset_envs
env2ini::log "=== All configuration sources loaded ===\n"
# safety to prevent rewrite of secret keys if an app.ini already exists
if [ -f ${GITEA_APP_INI} ]; then
env2ini::log 'An app.ini file already exists. To prevent overwriting secret keys, these settings are dropped and remain unchanged:'
env2ini::log ' - security.INTERNAL_TOKEN'
env2ini::log ' - security.SECRET_KEY'
env2ini::log ' - oauth2.JWT_SECRET'
env2ini::log ' - server.LFS_JWT_SECRET'
unset GITEA__SECURITY__INTERNAL_TOKEN
unset GITEA__SECURITY__SECRET_KEY
unset GITEA__OAUTH2__JWT_SECRET
unset GITEA__SERVER__LFS_JWT_SECRET
fi
environment-to-ini -o $GITEA_APP_INI

View File

@ -1,4 +0,0 @@
#!/usr/bin/env bash
set -eu
gpg --batch --import "$TMP_RAW_GPG_KEY"

View File

@ -3,6 +3,26 @@
Expand the name of the chart. Expand the name of the chart.
*/}} */}}
{{- /* multiple replicas assertions */ -}}
{{- if gt .Values.replicaCount 1.0 -}}
{{- fail "When using multiple replicas, a RWX file system is required" -}}
{{- if eq (get (.Values.persistence.accessModes 0) "ReadWriteOnce") -}}
{{- fail "When using multiple replicas, a RWX file system is required" -}}
{{- end }}
{{- if eq (get .Values.gitea.config.indexer "ISSUE_INDEXER_TYPE") "bleve" -}}
{{- fail "When using multiple replicas, the repo indexer must be set to 'meilisearch' or 'elasticsearch'" -}}
{{- end }}
{{- if and (eq .Values.gitea.config.indexer.REPO_INDEXER_TYPE "bleve") (eq .Values.gitea.config.indexer.REPO_INDEXER_ENABLED "true") -}}
{{- fail "When using multiple replicas, the repo indexer must be set to 'meilisearch' or 'elasticsearch'" -}}
{{- end }}
{{- if eq .Values.gitea.config.indexer.ISSUE_INDEXER_TYPE "bleve" -}}
{{- (printf "DEBUG: When using multiple replicas, the repo indexer must be set to 'meilisearch' or 'elasticsearch'") | fail -}}
{{- end }}
{{- end }}
{{- define "gitea.name" -}} {{- define "gitea.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}} {{- end -}}
@ -25,13 +45,6 @@ If release name contains chart name it will be used as a full name.
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/*
Create a default worker name.
*/}}
{{- define "gitea.workername" -}}
{{- printf "%s-%s" .global.Release.Name .worker | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/* {{/*
Create chart name and version as used by the chart label. Create chart name and version as used by the chart label.
*/}} */}}
@ -47,7 +60,7 @@ Create image name and tag used by the deployment.
{{- $registry := .Values.global.imageRegistry | default .Values.image.registry -}} {{- $registry := .Values.global.imageRegistry | default .Values.image.registry -}}
{{- $repository := .Values.image.repository -}} {{- $repository := .Values.image.repository -}}
{{- $separator := ":" -}} {{- $separator := ":" -}}
{{- $tag := .Values.image.tag | default .Chart.AppVersion | toString -}} {{- $tag := .Values.image.tag | default .Chart.AppVersion -}}
{{- $rootless := ternary "-rootless" "" (.Values.image.rootless) -}} {{- $rootless := ternary "-rootless" "" (.Values.image.rootless) -}}
{{- $digest := "" -}} {{- $digest := "" -}}
{{- if .Values.image.digest }} {{- if .Values.image.digest }}
@ -81,7 +94,7 @@ imagePullSecrets:
Storage Class Storage Class
*/}} */}}
{{- define "gitea.persistence.storageClass" -}} {{- define "gitea.persistence.storageClass" -}}
{{- $storageClass := (tpl ( default "" .Values.persistence.storageClass) .) | default (tpl ( default "" .Values.global.storageClass) .) }} {{- $storageClass := .Values.global.storageClass | default .Values.persistence.storageClass }}
{{- if $storageClass }} {{- if $storageClass }}
storageClassName: {{ $storageClass | quote }} storageClassName: {{ $storageClass | quote }}
{{- end }} {{- end }}
@ -99,15 +112,6 @@ version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}} {{- end -}}
{{- define "gitea.labels.actRunner" -}}
helm.sh/chart: {{ include "gitea.chart" . }}
app: {{ include "gitea.name" . }}-act-runner
{{ include "gitea.selectorLabels.actRunner" . }}
app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/* {{/*
Selector labels Selector labels
*/}} */}}
@ -116,11 +120,6 @@ app.kubernetes.io/name: {{ include "gitea.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}} {{- end -}}
{{- define "gitea.selectorLabels.actRunner" -}}
app.kubernetes.io/name: {{ include "gitea.name" . }}-act-runner
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "postgresql-ha.dns" -}} {{- define "postgresql-ha.dns" -}}
{{- if (index .Values "postgresql-ha").enabled -}} {{- if (index .Values "postgresql-ha").enabled -}}
{{- printf "%s-postgresql-ha-pgpool.%s.svc.%s:%g" .Release.Name .Release.Namespace .Values.clusterDomain (index .Values "postgresql-ha" "service" "ports" "postgresql") -}} {{- printf "%s-postgresql-ha-pgpool.%s.svc.%s:%g" .Release.Name .Release.Namespace .Values.clusterDomain (index .Values "postgresql-ha" "service" "ports" "postgresql") -}}
@ -133,29 +132,21 @@ app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{- define "valkey.dns" -}} {{- define "redis.dns" -}}
{{- if and ((index .Values "valkey-cluster").enabled) ((index .Values "valkey").enabled) -}} {{- if (index .Values "redis-cluster").enabled -}}
{{- fail "valkey and valkey-cluster cannot be enabled at the same time. Please only choose one." -}} {{- printf "redis+cluster://:%s@%s-redis-cluster-headless.%s.svc.%s:%g/0?pool_size=100&idle_timeout=180s&" (index .Values "redis-cluster").global.redis.password .Release.Name .Release.Namespace .Values.clusterDomain (index .Values "redis-cluster").service.ports.redis -}}
{{- else if (index .Values "valkey-cluster").enabled -}}
{{- printf "redis+cluster://:%s@%s-valkey-cluster-headless.%s.svc.%s:%g/0?pool_size=100&idle_timeout=180s&" (index .Values "valkey-cluster").global.valkey.password .Release.Name .Release.Namespace .Values.clusterDomain (index .Values "valkey-cluster").service.ports.valkey -}}
{{- else if (index .Values "valkey").enabled -}}
{{- printf "redis://:%s@%s-valkey-headless.%s.svc.%s:%g/0?pool_size=100&idle_timeout=180s&" (index .Values "valkey").global.valkey.password .Release.Name .Release.Namespace .Values.clusterDomain (index .Values "valkey").master.service.ports.valkey -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{- define "valkey.port" -}} {{- define "redis.port" -}}
{{- if (index .Values "valkey-cluster").enabled -}} {{- if (index .Values "redis-cluster").enabled -}}
{{ (index .Values "valkey-cluster").service.ports.valkey }} {{ (index .Values "redis-cluster").service.ports.redis }}
{{- else if (index .Values "valkey").enabled -}}
{{ (index .Values "valkey").master.service.ports.valkey }}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{- define "valkey.servicename" -}} {{- define "redis.servicename" -}}
{{- if (index .Values "valkey-cluster").enabled -}} {{- if (index .Values "redis-cluster").enabled -}}
{{- printf "%s-valkey-cluster-headless.%s.svc.%s" .Release.Name .Release.Namespace .Values.clusterDomain -}} {{- printf "%s-redis-cluster-headless.%s.svc.%s" .Release.Name .Release.Namespace .Values.clusterDomain -}}
{{- else if (index .Values "valkey").enabled -}}
{{- printf "%s-valkey-headless.%s.svc.%s" .Release.Name .Release.Namespace .Values.clusterDomain -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
@ -284,9 +275,6 @@ https
{{- if not (hasKey .Values.gitea.config "indexer") -}} {{- if not (hasKey .Values.gitea.config "indexer") -}}
{{- $_ := set .Values.gitea.config "indexer" dict -}} {{- $_ := set .Values.gitea.config "indexer" dict -}}
{{- end -}} {{- end -}}
{{- if not (hasKey .Values.gitea.config "actions") -}}
{{- $_ := set .Values.gitea.config "actions" dict -}}
{{- end -}}
{{- end -}} {{- end -}}
{{- define "gitea.inline_configuration.defaults" -}} {{- define "gitea.inline_configuration.defaults" -}}
@ -302,17 +290,14 @@ https
{{- if not (hasKey .Values.gitea.config.metrics "ENABLED") -}} {{- if not (hasKey .Values.gitea.config.metrics "ENABLED") -}}
{{- $_ := set .Values.gitea.config.metrics "ENABLED" .Values.gitea.metrics.enabled -}} {{- $_ := set .Values.gitea.config.metrics "ENABLED" .Values.gitea.metrics.enabled -}}
{{- end -}} {{- end -}}
{{- if and (not (hasKey .Values.gitea.config.metrics "TOKEN")) (.Values.gitea.metrics.token) (.Values.gitea.metrics.enabled) -}} {{- /* redis queue */ -}}
{{- $_ := set .Values.gitea.config.metrics "TOKEN" .Values.gitea.metrics.token -}} {{- if (index .Values "redis-cluster").enabled -}}
{{- end -}}
{{- /* valkey queue */ -}}
{{- if or ((index .Values "valkey-cluster").enabled) ((index .Values "valkey").enabled) -}}
{{- $_ := set .Values.gitea.config.queue "TYPE" "redis" -}} {{- $_ := set .Values.gitea.config.queue "TYPE" "redis" -}}
{{- $_ := set .Values.gitea.config.queue "CONN_STR" (include "valkey.dns" .) -}} {{- $_ := set .Values.gitea.config.queue "CONN_STR" (include "redis.dns" .) -}}
{{- $_ := set .Values.gitea.config.session "PROVIDER" "redis" -}} {{- $_ := set .Values.gitea.config.session "PROVIDER" "redis" -}}
{{- $_ := set .Values.gitea.config.session "PROVIDER_CONFIG" (include "valkey.dns" .) -}} {{- $_ := set .Values.gitea.config.session "PROVIDER_CONFIG" (include "redis.dns" .) -}}
{{- $_ := set .Values.gitea.config.cache "ADAPTER" "redis" -}} {{- $_ := set .Values.gitea.config.cache "ADAPTER" "redis" -}}
{{- $_ := set .Values.gitea.config.cache "HOST" (include "valkey.dns" .) -}} {{- $_ := set .Values.gitea.config.cache "HOST" (include "redis.dns" .) -}}
{{- else -}} {{- else -}}
{{- if not (get .Values.gitea.config.session "PROVIDER") -}} {{- if not (get .Values.gitea.config.session "PROVIDER") -}}
{{- $_ := set .Values.gitea.config.session "PROVIDER" "memory" -}} {{- $_ := set .Values.gitea.config.session "PROVIDER" "memory" -}}
@ -427,45 +412,3 @@ https
{{- define "gitea.serviceAccountName" -}} {{- define "gitea.serviceAccountName" -}}
{{ .Values.serviceAccount.name | default (include "gitea.fullname" .) }} {{ .Values.serviceAccount.name | default (include "gitea.fullname" .) }}
{{- end -}} {{- end -}}
{{- define "ingress.annotations" -}}
{{- if .Values.ingress.annotations }}
annotations:
{{- $tp := typeOf .Values.ingress.annotations }}
{{- if eq $tp "string" }}
{{- tpl .Values.ingress.annotations . | nindent 4 }}
{{- else }}
{{- toYaml .Values.ingress.annotations | nindent 4 }}
{{- end }}
{{- end }}
{{- end -}}
{{- define "gitea.admin.passwordMode" -}}
{{- if has .Values.gitea.admin.passwordMode (tuple "keepUpdated" "initialOnlyNoReset" "initialOnlyRequireReset") -}}
{{ .Values.gitea.admin.passwordMode }}
{{- else -}}
{{ printf "gitea.admin.passwordMode must be set to one of 'keepUpdated', 'initialOnlyNoReset', or 'initialOnlyRequireReset'. Received: '%s'" .Values.gitea.admin.passwordMode | fail }}
{{- end -}}
{{- end -}}
{{/* Create a functioning probe object for rendering. Given argument must be either a livenessProbe, readinessProbe, or startupProbe */}}
{{- define "gitea.deployment.probe" -}}
{{- $probe := unset . "enabled" -}}
{{- $probeKeys := keys $probe -}}
{{- $containsCustomMethod := false -}}
{{- $chartDefaultMethod := "tcpSocket" -}}
{{- $nonChartDefaultMethods := list "exec" "httpGet" "grpc" -}}
{{- range $probeKeys -}}
{{- if has . $nonChartDefaultMethods -}}
{{- $containsCustomMethod = true -}}
{{- end -}}
{{- end -}}
{{- if $containsCustomMethod -}}
{{- $probe = unset . $chartDefaultMethod -}}
{{- end -}}
{{- toYaml $probe -}}
{{- end -}}
{{- define "gitea.metrics-secret-name" -}}
{{ default (printf "%s-metrics-secret" (include "gitea.fullname" .)) }}
{{- end -}}

View File

@ -1,3 +0,0 @@
{{- if .Values.actions -}}
{{- fail "The actions sub-chart has been outsourced to a dedicated chart available at https://gitea.com/gitea/helm-actions. For assistance with the migration process, check https://gitea.com/gitea/helm-actions/issues/9." -}}
{{- end -}}

View File

@ -2,7 +2,6 @@ apiVersion: v1
kind: Secret kind: Secret
metadata: metadata:
name: {{ include "gitea.fullname" . }}-inline-config name: {{ include "gitea.fullname" . }}-inline-config
namespace: {{ .Values.namespace | default .Release.Namespace }}
labels: labels:
{{- include "gitea.labels" . | nindent 4 }} {{- include "gitea.labels" . | nindent 4 }}
type: Opaque type: Opaque
@ -13,45 +12,193 @@ apiVersion: v1
kind: Secret kind: Secret
metadata: metadata:
name: {{ include "gitea.fullname" . }} name: {{ include "gitea.fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
labels: labels:
{{- include "gitea.labels" . | nindent 4 }} {{- include "gitea.labels" . | nindent 4 }}
type: Opaque type: Opaque
stringData: stringData:
{{ (.Files.Glob "scripts/init-containers/config/*.sh").AsConfig | indent 2 }}
assertions: | assertions: |
{{- /*assert that only one PG dep is enabled */ -}} {{- /*assert that only one PG dep is enabled */ -}}
{{- if and (.Values.postgresql.enabled) (index .Values "postgresql-ha" "enabled") -}} {{- if and (.Values.postgresql.enabled) (index .Values "postgresql-ha" "enabled") -}}
{{- fail "Only one of postgresql or postgresql-ha can be enabled at the same time." -}} {{- fail "Only one of postgresql or postgresql-ha can be enabled at the same time." -}}
{{- end }}
{{- /* multiple replicas assertions */ -}}
{{- if gt .Values.replicaCount 1.0 -}}
{{- if (get (get .Values.gitea.config "cron.GIT_GC_REPOS") "ENABLED") -}}
{{- fail "Invoking the garbage collector via CRON is not yet supported when running with multiple replicas. Please set 'cron.GIT_GC_REPOS.enabled = false'." -}}
{{- end }}
{{- if eq (first .Values.persistence.accessModes) "ReadWriteOnce" -}}
{{- fail "When using multiple replicas, a RWX file system is required and gitea.persistence.accessModes[0] must be set to ReadWriteMany." -}}
{{- end }}
{{- if eq (get .Values.gitea.config.indexer "ISSUE_INDEXER_TYPE") "bleve" -}}
{{- fail "When using multiple replicas, the issue indexer (gitea.config.indexer.ISSUE_INDEXER_TYPE) must be set to a HA-ready provider such as 'meilisearch', 'elasticsearch' or 'db' (if the DB is HA-ready)." -}}
{{- end }}
{{- if .Values.gitea.config.indexer.REPO_INDEXER_TYPE -}}
{{- if eq (get .Values.gitea.config.indexer "REPO_INDEXER_TYPE") "bleve" -}}
{{- if .Values.gitea.config.indexer.REPO_INDEXER_ENABLED -}}
{{- if eq (get .Values.gitea.config.indexer "REPO_INDEXER_ENABLED") "true" -}}
{{- fail "When using multiple replicas, the repo indexer (gitea.config.indexer.REPO_INDEXER_TYPE) must be set to 'meilisearch' or 'elasticsearch' or disabled." -}}
{{- end }}
{{- end }}
{{- end }} {{- end }}
{{- end }}
{{- /* multiple replicas assertions */ -}}
{{- if gt .Values.replicaCount 1.0 -}} {{- end }}
{{- if .Values.gitea.config.cron -}} config_environment.sh: |-
{{- if .Values.gitea.config.cron.GIT_GC_REPOS -}} #!/usr/bin/env bash
{{- if eq .Values.gitea.config.cron.GIT_GC_REPOS.ENABLED true -}} set -euo pipefail
{{ fail "Invoking the garbage collector via CRON is not yet supported when running with multiple replicas. Please set 'gitea.config.cron.GIT_GC_REPOS.enabled = false'." }}
{{- end }} function env2ini::log() {
{{- end }} printf "${1}\n"
{{- end }} }
{{- if eq (first .Values.persistence.accessModes) "ReadWriteOnce" -}} function env2ini::read_config_to_env() {
{{- fail "When using multiple replicas, a RWX file system is required and persistence.accessModes[0] must be set to ReadWriteMany." -}} local section="${1}"
{{- end }} local line="${2}"
{{- if .Values.gitea.config.indexer -}}
{{- if eq .Values.gitea.config.indexer.ISSUE_INDEXER_TYPE "bleve" -}} if [[ -z "${line}" ]]; then
{{- fail "When using multiple replicas, the issue indexer (gitea.config.indexer.ISSUE_INDEXER_TYPE) must be set to a HA-ready provider such as 'meilisearch', 'elasticsearch' or 'db' (if the DB is HA-ready)." -}} # skip empty line
{{- end }} return
{{- if .Values.gitea.config.indexer.REPO_INDEXER_TYPE -}} fi
{{- if eq .Values.gitea.config.indexer.REPO_INDEXER_TYPE "bleve" -}}
{{- if .Values.gitea.config.indexer.REPO_INDEXER_ENABLED -}}
{{- if eq .Values.gitea.config.indexer.REPO_INDEXER_ENABLED true -}}
{{- fail "When using multiple replicas, the repo indexer (gitea.config.indexer.REPO_INDEXER_TYPE) must be set to 'meilisearch' or 'elasticsearch' or disabled." -}}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }} # 'xargs echo -n' trims all leading/trailing whitespaces and a trailing new line
local setting="$(awk -F '=' '{print $1}' <<< "${line}" | xargs echo -n)"
if [[ -z "${setting}" ]]; then
env2ini::log ' ! invalid setting'
exit 1
fi
local value=''
local regex="^${setting}(\s*)=(\s*)(.*)"
if [[ $line =~ $regex ]]; then
value="${BASH_REMATCH[3]}"
else
env2ini::log ' ! invalid setting'
exit 1
fi
env2ini::log " + '${setting}'"
if [[ -z "${section}" ]]; then
export "GITEA____${setting^^}=${value}" # '^^' makes the variable content uppercase
return
fi
local masked_section="${section//./_0X2E_}" # '//' instructs to replace all matches
masked_section="${masked_section//-/_0X2D_}"
export "GITEA__${masked_section^^}__${setting^^}=${value}" # '^^' makes the variable content uppercase
}
function env2ini::reload_preset_envs() {
env2ini::log "Reloading preset envs..."
while read -r line; do
if [[ -z "${line}" ]]; then
# skip empty line
return
fi
# 'xargs echo -n' trims all leading/trailing whitespaces and a trailing new line
local setting="$(awk -F '=' '{print $1}' <<< "${line}" | xargs echo -n)"
if [[ -z "${setting}" ]]; then
env2ini::log ' ! invalid setting'
exit 1
fi
local value=''
local regex="^${setting}(\s*)=(\s*)(.*)"
if [[ $line =~ $regex ]]; then
value="${BASH_REMATCH[3]}"
else
env2ini::log ' ! invalid setting'
exit 1
fi
env2ini::log " + '${setting}'"
export "${setting^^}=${value}" # '^^' makes the variable content uppercase
done < "/tmp/existing-envs"
rm /tmp/existing-envs
}
function env2ini::process_config_file() {
local config_file="${1}"
local section="$(basename "${config_file}")"
if [[ $section == '_generals_' ]]; then
env2ini::log " [ini root]"
section=''
else
env2ini::log " ${section}"
fi
while read -r line; do
env2ini::read_config_to_env "${section}" "${line}"
done < <(awk 1 "${config_file}") # Helm .toYaml trims the trailing new line which breaks line processing; awk 1 ... adds it back while reading
}
function env2ini::load_config_sources() {
local path="${1}"
if [[ -d "${path}" ]]; then
env2ini::log "Processing $(basename "${path}")..."
while read -d '' configFile; do
env2ini::process_config_file "${configFile}"
done < <(find "${path}" -type l -not -name '..data' -print0)
env2ini::log "\n"
fi
}
function env2ini::generate_initial_secrets() {
# These environment variables will either be
# - overwritten with user defined values,
# - initially used to set up Gitea
# Anyway, they won't harm existing app.ini files
export GITEA__SECURITY__INTERNAL_TOKEN=$(gitea generate secret INTERNAL_TOKEN)
export GITEA__SECURITY__SECRET_KEY=$(gitea generate secret SECRET_KEY)
export GITEA__OAUTH2__JWT_SECRET=$(gitea generate secret JWT_SECRET)
export GITEA__SERVER__LFS_JWT_SECRET=$(gitea generate secret LFS_JWT_SECRET)
env2ini::log "...Initial secrets generated\n"
}
# save existing envs prior to script execution. Necessary to keep order of preexisting and custom envs
env | (grep -e '^GITEA__' || [[ $? == 1 ]]) > /tmp/existing-envs
# MUST BE CALLED BEFORE OTHER CONFIGURATION
env2ini::generate_initial_secrets
env2ini::load_config_sources '/env-to-ini-mounts/inlines/'
env2ini::load_config_sources '/env-to-ini-mounts/additionals/'
# load existing envs to override auto generated envs
env2ini::reload_preset_envs
env2ini::log "=== All configuration sources loaded ===\n"
# safety to prevent rewrite of secret keys if an app.ini already exists
if [ -f ${GITEA_APP_INI} ]; then
env2ini::log 'An app.ini file already exists. To prevent overwriting secret keys, these settings are dropped and remain unchanged:'
env2ini::log ' - security.INTERNAL_TOKEN'
env2ini::log ' - security.SECRET_KEY'
env2ini::log ' - oauth2.JWT_SECRET'
env2ini::log ' - server.LFS_JWT_SECRET'
unset GITEA__SECURITY__INTERNAL_TOKEN
unset GITEA__SECURITY__SECRET_KEY
unset GITEA__OAUTH2__JWT_SECRET
unset GITEA__SERVER__LFS_JWT_SECRET
fi
environment-to-ini -o $GITEA_APP_INI

View File

@ -2,16 +2,12 @@ apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: {{ include "gitea.fullname" . }} name: {{ include "gitea.fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
annotations: annotations:
{{- if .Values.deployment.annotations }} {{- if .Values.deployment.annotations }}
{{- toYaml .Values.deployment.annotations | nindent 4 }} {{- toYaml .Values.deployment.annotations | nindent 4 }}
{{- end }} {{- end }}
labels: labels:
{{- include "gitea.labels" . | nindent 4 }} {{- include "gitea.labels" . | nindent 4 }}
{{- if .Values.deployment.labels }}
{{- toYaml .Values.deployment.labels | nindent 4 }}
{{- end }}
spec: spec:
replicas: {{ .Values.replicaCount }} replicas: {{ .Values.replicaCount }}
strategy: strategy:
@ -62,8 +58,7 @@ spec:
- name: init-directories - name: init-directories
image: "{{ include "gitea.image" . }}" image: "{{ include "gitea.image" . }}"
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
command: command: ["/usr/sbin/init_directory_structure.sh"]
- "{{ .Values.initContainersScriptsVolumeMountPath }}/init_directory_structure.sh"
env: env:
- name: GITEA_APP_INI - name: GITEA_APP_INI
value: /data/gitea/conf/app.ini value: /data/gitea/conf/app.ini
@ -82,7 +77,7 @@ spec:
{{- end }} {{- end }}
volumeMounts: volumeMounts:
- name: init - name: init
mountPath: {{ .Values.initContainersScriptsVolumeMountPath }} mountPath: /usr/sbin
- name: temp - name: temp
mountPath: /tmp mountPath: /tmp
- name: data - name: data
@ -98,8 +93,7 @@ spec:
- name: init-app-ini - name: init-app-ini
image: "{{ include "gitea.image" . }}" image: "{{ include "gitea.image" . }}"
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
command: command: ["/usr/sbin/config_environment.sh"]
- "{{ .Values.initContainersScriptsVolumeMountPath }}/config_environment.sh"
env: env:
- name: GITEA_APP_INI - name: GITEA_APP_INI
value: /data/gitea/conf/app.ini value: /data/gitea/conf/app.ini
@ -109,19 +103,15 @@ spec:
value: /data value: /data
- name: GITEA_TEMP - name: GITEA_TEMP
value: /tmp/gitea value: /tmp/gitea
- name: TMP_EXISTING_ENVS_FILE
value: /tmp/existing-envs
- name: ENV_TO_INI_MOUNT_POINT
value: /env-to-ini-mounts
{{- if .Values.deployment.env }} {{- if .Values.deployment.env }}
{{- toYaml .Values.deployment.env | nindent 12 }} {{- toYaml .Values.deployment.env | nindent 12 }}
{{- end }} {{- end }}
{{- if .Values.gitea.additionalConfigFromEnvs }} {{- if .Values.gitea.additionalConfigFromEnvs }}
{{- tpl (toYaml .Values.gitea.additionalConfigFromEnvs) $ | nindent 12 }} {{- toYaml .Values.gitea.additionalConfigFromEnvs | nindent 12 }}
{{- end }} {{- end }}
volumeMounts: volumeMounts:
- name: config - name: config
mountPath: {{ .Values.initContainersScriptsVolumeMountPath }} mountPath: /usr/sbin
- name: temp - name: temp
mountPath: /tmp mountPath: /tmp
- name: data - name: data
@ -143,8 +133,7 @@ spec:
{{- if .Values.signing.enabled }} {{- if .Values.signing.enabled }}
- name: configure-gpg - name: configure-gpg
image: "{{ include "gitea.image" . }}" image: "{{ include "gitea.image" . }}"
command: command: ["/usr/sbin/configure_gpg_environment.sh"]
- "{{ .Values.initContainersScriptsVolumeMountPath }}/configure_gpg_environment.sh"
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext: securityContext:
{{- /* By default this container runs as user 1000 unless otherwise stated */ -}} {{- /* By default this container runs as user 1000 unless otherwise stated */ -}}
@ -156,11 +145,9 @@ spec:
env: env:
- name: GNUPGHOME - name: GNUPGHOME
value: {{ .Values.signing.gpgHome }} value: {{ .Values.signing.gpgHome }}
- name: TMP_RAW_GPG_KEY
value: /raw/private.asc
volumeMounts: volumeMounts:
- name: init - name: init
mountPath: {{ .Values.initContainersScriptsVolumeMountPath }} mountPath: /usr/sbin
- name: data - name: data
mountPath: /data mountPath: /data
{{- if .Values.persistence.subPath }} {{- if .Values.persistence.subPath }}
@ -177,8 +164,7 @@ spec:
{{- end }} {{- end }}
- name: configure-gitea - name: configure-gitea
image: "{{ include "gitea.image" . }}" image: "{{ include "gitea.image" . }}"
command: command: ["/usr/sbin/configure_gitea.sh"]
- "{{ .Values.initContainersScriptsVolumeMountPath }}/configure_gitea.sh"
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext: securityContext:
{{- /* By default this container runs as user 1000 unless otherwise stated */ -}} {{- /* By default this container runs as user 1000 unless otherwise stated */ -}}
@ -254,14 +240,12 @@ spec:
- name: GITEA_ADMIN_PASSWORD - name: GITEA_ADMIN_PASSWORD
value: {{ .Values.gitea.admin.password | quote }} value: {{ .Values.gitea.admin.password | quote }}
{{- end }} {{- end }}
- name: GITEA_ADMIN_PASSWORD_MODE
value: {{ include "gitea.admin.passwordMode" $ }}
{{- if .Values.deployment.env }} {{- if .Values.deployment.env }}
{{- toYaml .Values.deployment.env | nindent 12 }} {{- toYaml .Values.deployment.env | nindent 12 }}
{{- end }} {{- end }}
volumeMounts: volumeMounts:
- name: init - name: init
mountPath: {{ .Values.initContainersScriptsVolumeMountPath }} mountPath: /usr/sbin
- name: temp - name: temp
mountPath: /tmp mountPath: /tmp
- name: data - name: data
@ -295,13 +279,6 @@ spec:
value: /data value: /data
- name: GITEA_TEMP - name: GITEA_TEMP
value: /tmp/gitea value: /tmp/gitea
{{- if and (hasKey .Values.resources "limits") (hasKey .Values.resources.limits "cpu") }}
- name: GOMAXPROCS
valueFrom:
resourceFieldRef:
divisor: "1"
resource: limits.cpu
{{- end }}
- name: TMPDIR - name: TMPDIR
value: /tmp/gitea value: /tmp/gitea
{{- if .Values.image.rootless }} {{- if .Values.image.rootless }}
@ -329,15 +306,15 @@ spec:
{{- end }} {{- end }}
{{- if .Values.gitea.livenessProbe.enabled }} {{- if .Values.gitea.livenessProbe.enabled }}
livenessProbe: livenessProbe:
{{- include "gitea.deployment.probe" .Values.gitea.livenessProbe | nindent 12 }} {{- toYaml (omit .Values.gitea.livenessProbe "enabled") | nindent 12 }}
{{- end }} {{- end }}
{{- if .Values.gitea.readinessProbe.enabled }} {{- if .Values.gitea.readinessProbe.enabled }}
readinessProbe: readinessProbe:
{{- include "gitea.deployment.probe" .Values.gitea.readinessProbe | nindent 12 }} {{- toYaml (omit .Values.gitea.readinessProbe "enabled") | nindent 12 }}
{{- end }} {{- end }}
{{- if .Values.gitea.startupProbe.enabled }} {{- if .Values.gitea.startupProbe.enabled }}
startupProbe: startupProbe:
{{- include "gitea.deployment.probe" .Values.gitea.startupProbe | nindent 12 }} {{- toYaml (omit .Values.gitea.startupProbe "enabled") | nindent 12 }}
{{- end }} {{- end }}
resources: resources:
{{- toYaml .Values.resources | nindent 12 }} {{- toYaml .Values.resources | nindent 12 }}
@ -357,23 +334,20 @@ spec:
subPath: {{ .Values.persistence.subPath }} subPath: {{ .Values.persistence.subPath }}
{{- end }} {{- end }}
{{- include "gitea.container-additional-mounts" . | nindent 12 }} {{- include "gitea.container-additional-mounts" . | nindent 12 }}
{{- if .Values.extraContainers }}
{{- toYaml .Values.extraContainers | nindent 8 }}
{{- end }}
{{- with .Values.global.hostAliases }} {{- with .Values.global.hostAliases }}
hostAliases: hostAliases:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
{{- range $key, $value := .Values.nodeSelector }} {{- with .Values.nodeSelector }}
nodeSelector: nodeSelector:
{{ $key }}: {{ $value | quote }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
{{- with .Values.affinity }} {{- with .Values.affinity }}
affinity: affinity:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
{{- with .Values.topologySpreadConstraints }} {{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints: topologySpreadConstraints:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
{{- with .Values.tolerations }} {{- with .Values.tolerations }}

View File

@ -7,7 +7,6 @@ apiVersion: v1
kind: Secret kind: Secret
metadata: metadata:
name: {{ include "gitea.gpg-key-secret-name" . }} name: {{ include "gitea.gpg-key-secret-name" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
labels: labels:
{{- include "gitea.labels" . | nindent 4 }} {{- include "gitea.labels" . | nindent 4 }}
type: Opaque type: Opaque

View File

@ -2,21 +2,13 @@ apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: {{ include "gitea.fullname" . }}-http name: {{ include "gitea.fullname" . }}-http
namespace: {{ .Values.namespace | default .Release.Namespace }}
labels: labels:
{{- include "gitea.labels" . | nindent 4 }} {{- include "gitea.labels" . | nindent 4 }}
{{- if .Values.service.http.labels }}
{{- toYaml .Values.service.http.labels | nindent 4 }}
{{- end }}
annotations: annotations:
{{- toYaml .Values.service.http.annotations | nindent 4 }} {{- toYaml .Values.service.http.annotations | nindent 4 }}
spec: spec:
type: {{ .Values.service.http.type }} type: {{ .Values.service.http.type }}
{{- if eq .Values.service.http.type "LoadBalancer" }} {{- if and .Values.service.http.loadBalancerIP (eq .Values.service.http.type "LoadBalancer") }}
{{- if .Values.service.http.loadBalancerClass }}
loadBalancerClass: {{ .Values.service.http.loadBalancerClass }}
{{- end }}
{{- if and .Values.service.http.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.http.loadBalancerIP }} loadBalancerIP: {{ .Values.service.http.loadBalancerIP }}
{{- end }} {{- end }}
{{- if .Values.service.http.loadBalancerSourceRanges }} {{- if .Values.service.http.loadBalancerSourceRanges }}
@ -25,7 +17,6 @@ spec:
- {{ . }} - {{ . }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }}
{{- if .Values.service.http.externalIPs }} {{- if .Values.service.http.externalIPs }}
externalIPs: externalIPs:
{{- toYaml .Values.service.http.externalIPs | nindent 4 }} {{- toYaml .Values.service.http.externalIPs | nindent 4 }}

View File

@ -1,11 +1,18 @@
{{- if .Values.ingress.enabled -}} {{- if .Values.ingress.enabled -}}
{{- $fullName := include "gitea.fullname" . -}} {{- $fullName := include "gitea.fullname" . -}}
{{- $httpPort := .Values.service.http.port -}} {{- $httpPort := .Values.service.http.port -}}
apiVersion: networking.k8s.io/v1 {{- $apiVersion := "extensions/v1beta1" -}}
{{- if .Values.ingress.apiVersion -}}
{{- $apiVersion = .Values.ingress.apiVersion -}}
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}}
{{- $apiVersion = "networking.k8s.io/v1" }}
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" -}}
{{- $apiVersion = "networking.k8s.io/v1beta1" }}
{{- end }}
apiVersion: {{ $apiVersion }}
kind: Ingress kind: Ingress
metadata: metadata:
name: {{ $fullName }} name: {{ $fullName }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
labels: labels:
{{- include "gitea.labels" . | nindent 4 }} {{- include "gitea.labels" . | nindent 4 }}
annotations: annotations:
@ -13,7 +20,9 @@ metadata:
{{ $key }}: {{ $value | quote }} {{ $key }}: {{ $value | quote }}
{{- end }} {{- end }}
spec: spec:
ingressClassName: {{ tpl .Values.ingress.className . }} {{- if .Values.ingress.className }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }} {{- if .Values.ingress.tls }}
tls: tls:
{{- range .Values.ingress.tls }} {{- range .Values.ingress.tls }}
@ -29,34 +38,21 @@ spec:
- host: {{ tpl .host $ | quote }} - host: {{ tpl .host $ | quote }}
http: http:
paths: paths:
{{- if .paths }}
{{- range .paths }} {{- range .paths }}
{{- if kindIs "string" . }} - path: {{ .path }}
- path: {{ . }} {{- if and .pathType (eq $apiVersion "networking.k8s.io/v1") }}
pathType: {{ default "Prefix" $.Values.ingress.pathType }} pathType: {{ .pathType }}
backend: {{- end }}
service:
name: {{ $fullName }}-http
port:
number: {{ $httpPort }}
{{- else }}
- path: {{ .path | default "/" }}
pathType: {{ .pathType | default "Prefix" }}
backend:
service:
name: {{ $fullName }}-http
port:
number: {{ $httpPort }}
{{- end }}
{{- end }}
{{- else }}
- path: "/"
pathType: "Prefix"
backend: backend:
{{- if eq $apiVersion "networking.k8s.io/v1" }}
service: service:
name: {{ $fullName }}-http name: {{ $fullName }}-http
port: port:
number: {{ $httpPort }} number: {{ $httpPort }}
{{- else }}
serviceName: {{ $fullName }}-http
servicePort: {{ $httpPort }}
{{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}

View File

@ -2,12 +2,15 @@ apiVersion: v1
kind: Secret kind: Secret
metadata: metadata:
name: {{ include "gitea.fullname" . }}-init name: {{ include "gitea.fullname" . }}-init
namespace: {{ .Values.namespace | default .Release.Namespace }}
labels: labels:
{{- include "gitea.labels" . | nindent 4 }} {{- include "gitea.labels" . | nindent 4 }}
type: Opaque type: Opaque
stringData: stringData:
{{ (.Files.Glob "scripts/init-containers/init/*.sh").AsConfig | indent 2 }} configure_gpg_environment.sh: |-
#!/usr/bin/env bash
set -eu
gpg --batch --import /raw/private.asc
init_directory_structure.sh: |- init_directory_structure.sh: |-
#!/usr/bin/env bash #!/usr/bin/env bash
@ -21,25 +24,27 @@ stringData:
# END: initPreScript # END: initPreScript
{{- end }} {{- end }}
set -x
{{- if not .Values.image.rootless }} {{- if not .Values.image.rootless }}
chown -v 1000:1000 /data chown 1000:1000 /data
{{- end }} {{- end }}
mkdir -pv /data/git/.ssh mkdir -p /data/git/.ssh
chmod -Rv 700 /data/git/.ssh chmod -R 700 /data/git/.ssh
[ ! -d /data/gitea/conf ] && mkdir -pv /data/gitea/conf [ ! -d /data/gitea/conf ] && mkdir -p /data/gitea/conf
# prepare temp directory structure # prepare temp directory structure
mkdir -pv "${GITEA_TEMP}" mkdir -p "${GITEA_TEMP}"
{{- if not .Values.image.rootless }} {{- if not .Values.image.rootless }}
chown -v 1000:1000 "${GITEA_TEMP}" chown 1000:1000 "${GITEA_TEMP}"
{{- end }} {{- end }}
chmod -v ug+rwx "${GITEA_TEMP}" chmod ug+rwx "${GITEA_TEMP}"
{{ if .Values.signing.enabled -}} {{ if .Values.signing.enabled -}}
if [ ! -d "${GNUPGHOME}" ]; then if [ ! -d "${GNUPGHOME}" ]; then
mkdir -pv "${GNUPGHOME}" mkdir -p "${GNUPGHOME}"
chmod -v 700 "${GNUPGHOME}" chmod 700 "${GNUPGHOME}"
chown -v 1000:1000 "${GNUPGHOME}" chown 1000:1000 "${GNUPGHOME}"
fi fi
{{- end }} {{- end }}
@ -57,25 +62,25 @@ stringData:
exit 1 exit 1
} }
{{- if include "valkey.servicename" . }} {{- if include "redis.servicename" . }}
function test_valkey_connection() { function test_redis_connection() {
local RETRY=0 local RETRY=0
local MAX=30 local MAX=30
echo 'Wait for valkey to become avialable...' echo 'Wait for redis to become avialable...'
until [ "${RETRY}" -ge "${MAX}" ]; do until [ "${RETRY}" -ge "${MAX}" ]; do
nc -vz -w2 {{ include "valkey.servicename" . }} {{ include "valkey.port" . }} && break nc -vz -w2 {{ include "redis.servicename" . }} {{ include "redis.port" . }} && break
RETRY=$[${RETRY}+1] RETRY=$[${RETRY}+1]
echo "...not ready yet (${RETRY}/${MAX})" echo "...not ready yet (${RETRY}/${MAX})"
done done
if [ "${RETRY}" -ge "${MAX}" ]; then if [ "${RETRY}" -ge "${MAX}" ]; then
echo "Valkey not reachable after '${MAX}' attempts!" echo "Redis not reachable after '${MAX}' attempts!"
exit 1 exit 1
fi fi
} }
test_valkey_connection test_redis_connection
{{- end }} {{- end }}
@ -94,7 +99,7 @@ stringData:
echo "ERROR: 'configure_admin_user' was not able to determine the current list of admin users." echo "ERROR: 'configure_admin_user' was not able to determine the current list of admin users."
echo " Please review the output of 'gitea admin user list --admin' shown below." echo " Please review the output of 'gitea admin user list --admin' shown below."
echo " If you think it is an issue with the Helm Chart provisioning, file an issue at https://gitea.com/gitea/helm-gitea/issues." echo " If you think it is an issue with the Helm Chart provisioning, file an issue at https://gitea.com/gitea/helm-chart/issues."
echo "DEBUG: Output of 'gitea admin user list --admin'" echo "DEBUG: Output of 'gitea admin user list --admin'"
echo "--" echo "--"
echo "${full_admin_list}" echo "${full_admin_list}"
@ -104,33 +109,13 @@ stringData:
local ACCOUNT_ID=$(echo "${actual_user_table}" | grep -E "\s+${GITEA_ADMIN_USERNAME}\s+" | awk -F " " "{printf \$1}") local ACCOUNT_ID=$(echo "${actual_user_table}" | grep -E "\s+${GITEA_ADMIN_USERNAME}\s+" | awk -F " " "{printf \$1}")
if [[ -z "${ACCOUNT_ID}" ]]; then if [[ -z "${ACCOUNT_ID}" ]]; then
local -a create_args
create_args=(--admin --username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}" --email {{ .Values.gitea.admin.email | quote }})
if [[ "${GITEA_ADMIN_PASSWORD_MODE}" = initialOnlyRequireReset ]]; then
create_args+=(--must-change-password=true)
else
create_args+=(--must-change-password=false)
fi
echo "No admin user '${GITEA_ADMIN_USERNAME}' found. Creating now..." echo "No admin user '${GITEA_ADMIN_USERNAME}' found. Creating now..."
gitea admin user create "${create_args[@]}" gitea admin user create --admin --username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}" --email {{ .Values.gitea.admin.email | quote }} --must-change-password=false
echo '...created.' echo '...created.'
else else
if [[ "${GITEA_ADMIN_PASSWORD_MODE}" = keepUpdated ]]; then echo "Admin account '${GITEA_ADMIN_USERNAME}' already exist. Running update to sync password..."
echo "Admin account '${GITEA_ADMIN_USERNAME}' already exist. Running update to sync password..." gitea admin user change-password --username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}"
# See https://gitea.com/gitea/helm-gitea/issues/673 echo '...password sync done.'
# --must-change-password argument was added to change-password, defaulting to true, counter to the previous behavior
# which acted as if it were provided with =false. If the argument is present in this version of gitea, then we
# should add it to prevent requiring frequent admin password resets.
local -a change_args
change_args=(--username "${GITEA_ADMIN_USERNAME}" --password "${GITEA_ADMIN_PASSWORD}")
if gitea admin user change-password --help | grep -qF -- '--must-change-password'; then
change_args+=(--must-change-password=false)
fi
gitea admin user change-password "${change_args[@]}"
echo '...password sync done.'
else
echo "Admin account '${GITEA_ADMIN_USERNAME}' already exist, but update mode is set to '${GITEA_ADMIN_PASSWORD_MODE}'. Skipping."
fi
fi fi
} }
@ -154,7 +139,7 @@ stringData:
echo "ERROR: 'configure_ldap' was not able to determine the current list of authentication sources." echo "ERROR: 'configure_ldap' was not able to determine the current list of authentication sources."
echo " Please review the output of 'gitea admin auth list --vertical-bars' shown below." echo " Please review the output of 'gitea admin auth list --vertical-bars' shown below."
echo " If you think it is an issue with the Helm Chart provisioning, file an issue at https://gitea.com/gitea/helm-gitea/issues." echo " If you think it is an issue with the Helm Chart provisioning, file an issue at https://gitea.com/gitea/helm-chart/issues."
echo "DEBUG: Output of 'gitea admin auth list --vertical-bars'" echo "DEBUG: Output of 'gitea admin auth list --vertical-bars'"
echo "--" echo "--"
echo "${full_auth_list}" echo "${full_auth_list}"
@ -198,7 +183,7 @@ stringData:
echo "ERROR: 'configure_oauth' was not able to determine the current list of authentication sources." echo "ERROR: 'configure_oauth' was not able to determine the current list of authentication sources."
echo " Please review the output of 'gitea admin auth list --vertical-bars' shown below." echo " Please review the output of 'gitea admin auth list --vertical-bars' shown below."
echo " If you think it is an issue with the Helm Chart provisioning, file an issue at https://gitea.com/gitea/helm-gitea/issues." echo " If you think it is an issue with the Helm Chart provisioning, file an issue at https://gitea.com/gitea/helm-chart/issues."
echo "DEBUG: Output of 'gitea admin auth list --vertical-bars'" echo "DEBUG: Output of 'gitea admin auth list --vertical-bars'"
echo "--" echo "--"
echo "${full_auth_list}" echo "${full_auth_list}"
@ -225,4 +210,4 @@ stringData:
configure_oauth configure_oauth
echo '==== END GITEA CONFIGURATION ====' echo '==== END GITEA CONFIGURATION ===='

View File

@ -1,12 +0,0 @@
{{- if and (.Values.gitea.metrics.enabled) (.Values.gitea.metrics.serviceMonitor.enabled) (.Values.gitea.metrics.token) -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "gitea.metrics-secret-name" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
type: Opaque
data:
token: {{ .Values.gitea.metrics.token | b64enc }}
{{- end }}

View File

@ -7,7 +7,6 @@ apiVersion: policy/v1beta1
kind: PodDisruptionBudget kind: PodDisruptionBudget
metadata: metadata:
name: {{ include "gitea.fullname" . }} name: {{ include "gitea.fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
labels: labels:
{{- include "gitea.labels" . | nindent 4 }} {{- include "gitea.labels" . | nindent 4 }}
spec: spec:

View File

@ -3,11 +3,9 @@ kind: PersistentVolumeClaim
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: {{ .Values.persistence.claimName }} name: {{ .Values.persistence.claimName }}
namespace: {{ .Values.namespace | default .Release.Namespace }} namespace: {{ $.Release.Namespace }}
annotations: annotations:
{{ .Values.persistence.annotations | toYaml | indent 4}} {{ .Values.persistence.annotations | toYaml | indent 4}}
labels:
{{ .Values.persistence.labels | toYaml | indent 4}}
spec: spec:
accessModes: accessModes:
{{- if gt .Values.replicaCount 1.0 }} {{- if gt .Values.replicaCount 1.0 }}
@ -16,7 +14,9 @@ spec:
{{- .Values.persistence.accessModes | toYaml | nindent 4 }} {{- .Values.persistence.accessModes | toYaml | nindent 4 }}
{{- end }} {{- end }}
volumeMode: Filesystem volumeMode: Filesystem
{{- include "gitea.persistence.storageClass" . | nindent 2 }} {{- if .Values.persistence.storageClass }}
storageClassName: {{ .Values.persistence.storageClass }}
{{- end }}
{{- with .Values.persistence.volumeName }} {{- with .Values.persistence.volumeName }}
volumeName: {{ . }} volumeName: {{ . }}
{{- end }} {{- end }}

View File

@ -3,7 +3,7 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ include "gitea.serviceAccountName" . }} name: {{ include "gitea.serviceAccountName" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }} namespace: {{ .Release.Namespace | quote }}
labels: labels:
{{- include "gitea.labels" . | nindent 4 }} {{- include "gitea.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.labels }} {{- with .Values.serviceAccount.labels }}

View File

@ -1,9 +1,8 @@
{{- if and .Values.gitea.metrics.enabled .Values.gitea.metrics.serviceMonitor.enabled -}} {{- if .Values.gitea.metrics.serviceMonitor.enabled -}}
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor kind: ServiceMonitor
metadata: metadata:
name: {{ include "gitea.fullname" . }} name: {{ include "gitea.fullname" . }}
namespace: {{ .Values.namespace | default .Release.Namespace }}
labels: labels:
{{- include "gitea.labels" . | nindent 4 }} {{- include "gitea.labels" . | nindent 4 }}
{{- if .Values.gitea.metrics.serviceMonitor.additionalLabels }} {{- if .Values.gitea.metrics.serviceMonitor.additionalLabels }}
@ -15,29 +14,4 @@ spec:
{{- include "gitea.selectorLabels" . | nindent 6 }} {{- include "gitea.selectorLabels" . | nindent 6 }}
endpoints: endpoints:
- port: http - port: http
{{- if .Values.gitea.metrics.serviceMonitor.interval }}
interval: {{ .Values.gitea.metrics.serviceMonitor.interval }}
{{- end }}
{{- with .Values.gitea.metrics.serviceMonitor.relabelings }}
relabelings:
{{- . | toYaml | nindent 6 }}
{{- end }}
{{- if .Values.gitea.metrics.serviceMonitor.scheme }}
scheme: {{ .Values.gitea.metrics.serviceMonitor.scheme }}
{{- end }}
{{- if .Values.gitea.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.gitea.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
{{- with .Values.gitea.metrics.serviceMonitor.tlsConfig }}
tlsConfig:
{{- . | toYaml | nindent 6 }}
{{- end }}
{{- if .Values.gitea.metrics.token }}
authorization:
type: Bearer
credentials:
name: {{ include "gitea.metrics-secret-name" . }}
key: token
optional: false
{{- end }}
{{- end -}} {{- end -}}

View File

@ -2,20 +2,13 @@ apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: {{ include "gitea.fullname" . }}-ssh name: {{ include "gitea.fullname" . }}-ssh
namespace: {{ .Values.namespace | default .Release.Namespace }}
labels: labels:
{{- include "gitea.labels" . | nindent 4 }} {{- include "gitea.labels" . | nindent 4 }}
{{- if .Values.service.ssh.labels }}
{{- toYaml .Values.service.ssh.labels | nindent 4 }}
{{- end }}
annotations: annotations:
{{- toYaml .Values.service.ssh.annotations | nindent 4 }} {{- toYaml .Values.service.ssh.annotations | nindent 4 }}
spec: spec:
type: {{ .Values.service.ssh.type }} type: {{ .Values.service.ssh.type }}
{{- if eq .Values.service.ssh.type "LoadBalancer" }} {{- if eq .Values.service.ssh.type "LoadBalancer" }}
{{- if .Values.service.ssh.loadBalancerClass }}
loadBalancerClass: {{ .Values.service.ssh.loadBalancerClass }}
{{- end }}
{{- if .Values.service.ssh.loadBalancerIP }} {{- if .Values.service.ssh.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.ssh.loadBalancerIP }} loadBalancerIP: {{ .Values.service.ssh.loadBalancerIP }}
{{- end -}} {{- end -}}

View File

@ -3,7 +3,6 @@ apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: "{{ include "gitea.fullname" . }}-test-connection" name: "{{ include "gitea.fullname" . }}-test-connection"
namespace: {{ .Values.namespace | default .Release.Namespace }}
labels: labels:
{{ include "gitea.labels" . | nindent 4 }} {{ include "gitea.labels" . | nindent 4 }}
annotations: annotations:

@ -1 +0,0 @@
Subproject commit 5ec2d815109358b3ad86f5aabf289d96e6535ac5

@ -1 +0,0 @@
Subproject commit b93143a1bfbde41d9b7343aab0d36f3ef6549e6b

@ -1 +0,0 @@
Subproject commit 93e0128b8787db05a2632c70501cd667dd35d253

@ -1 +0,0 @@
Subproject commit d007fc1f451abbad55204fa9c9eb3e6ed5dc5f61

View File

@ -1,7 +0,0 @@
#!/usr/bin/env bash
function common_setup() {
load "$TEST_ROOT/test_helper/bats-support/load"
load "$TEST_ROOT/test_helper/bats-assert/load"
load "$TEST_ROOT/test_helper/bats-mock/stub"
}

View File

@ -1,204 +0,0 @@
#!/usr/bin/env bats
function setup() {
PROJECT_ROOT="$(git rev-parse --show-toplevel)"
TEST_ROOT="$PROJECT_ROOT/unittests/bash"
load "$TEST_ROOT/test_helper/common-setup"
common_setup
export GITEA_APP_INI="$BATS_TEST_TMPDIR/app.ini"
export TMP_EXISTING_ENVS_FILE="$BATS_TEST_TMPDIR/existing-envs"
export ENV_TO_INI_MOUNT_POINT="$BATS_TEST_TMPDIR/env-to-ini-mounts"
stub gitea \
"generate secret INTERNAL_TOKEN : echo 'mocked-internal-token'" \
"generate secret SECRET_KEY : echo 'mocked-secret-key'" \
"generate secret JWT_SECRET : echo 'mocked-jwt-secret'" \
"generate secret LFS_JWT_SECRET : echo 'mocked-lfs-jwt-secret'"
}
function teardown() {
unstub gitea
# This condition exists due to https://github.com/jasonkarns/bats-mock/pull/37 being still open
if [ $ENV_TO_INI_EXPECTED -eq 1 ]; then
unstub environment-to-ini
fi
}
# This function exists due to https://github.com/jasonkarns/bats-mock/pull/37 being still open
function expect_environment_to_ini_call() {
export ENV_TO_INI_EXPECTED=1
stub environment-to-ini \
"-o $GITEA_APP_INI : echo 'Stubbed environment-to-ini was called!'"
}
function execute_test_script() {
currentEnvsBefore=$(env | sort)
source $PROJECT_ROOT/scripts/init-containers/config/config_environment.sh
local exitCode=$?
currentEnvsAfter=$(env | sort)
# diff as unified +/- output without context before/after
diff --unified=0 <(echo "$currentEnvsBefore") <(echo "$currentEnvsAfter")
exit $exitCode
}
function write_mounted_file() {
# either "inlines" or "additionals"
scope="${1}"
file="${2}"
content="${3}"
mkdir -p "$ENV_TO_INI_MOUNT_POINT/$scope/..data/"
echo "${content}" > "$ENV_TO_INI_MOUNT_POINT/$scope/..data/$file"
ln -sf "$ENV_TO_INI_MOUNT_POINT/$scope/..data/$file" "$ENV_TO_INI_MOUNT_POINT/$scope/$file"
}
@test "works as expected when nothing is configured" {
expect_environment_to_ini_call
run $PROJECT_ROOT/scripts/init-containers/config/config_environment.sh
assert_success
assert_line '...Initial secrets generated'
assert_line 'Reloading preset envs...'
assert_line '=== All configuration sources loaded ==='
assert_line 'Stubbed environment-to-ini was called!'
}
@test "exports initial secrets" {
expect_environment_to_ini_call
run execute_test_script
assert_success
assert_line '+GITEA__OAUTH2__JWT_SECRET=mocked-jwt-secret'
assert_line '+GITEA__SECURITY__INTERNAL_TOKEN=mocked-internal-token'
assert_line '+GITEA__SECURITY__SECRET_KEY=mocked-secret-key'
assert_line '+GITEA__SERVER__LFS_JWT_SECRET=mocked-lfs-jwt-secret'
}
@test "does NOT export initial secrets when app.ini already exists" {
expect_environment_to_ini_call
touch $GITEA_APP_INI
run execute_test_script
assert_success
assert_line --partial 'An app.ini file already exists.'
refute_line '+GITEA__OAUTH2__JWT_SECRET=mocked-jwt-secret'
refute_line '+GITEA__SECURITY__INTERNAL_TOKEN=mocked-internal-token'
refute_line '+GITEA__SECURITY__SECRET_KEY=mocked-secret-key'
refute_line '+GITEA__SERVER__LFS_JWT_SECRET=mocked-lfs-jwt-secret'
}
@test "ensures that preset environment variables take precedence over auto-generated ones" {
expect_environment_to_ini_call
export GITEA__OAUTH2__JWT_SECRET="pre-defined-jwt-secret"
run execute_test_script
assert_success
refute_line '+GITEA__OAUTH2__JWT_SECRET=mocked-jwt-secret'
}
@test "ensures that preset environment variables take precedence over mounted ones" {
expect_environment_to_ini_call
export GITEA__OAUTH2__JWT_SECRET="pre-defined-jwt-secret"
write_mounted_file "inlines" "oauth2" "$(cat << EOF
JWT_SECRET=inline-jwt-secret
EOF
)"
run execute_test_script
assert_success
refute_line '+GITEA__OAUTH2__JWT_SECRET=mocked-jwt-secret'
refute_line '+GITEA__OAUTH2__JWT_SECRET=inline-jwt-secret'
}
@test "ensures that additionals take precedence over inlines" {
expect_environment_to_ini_call
write_mounted_file "inlines" "oauth2" "$(cat << EOF
JWT_SECRET=inline-jwt-secret
EOF
)"
write_mounted_file "additionals" "oauth2" "$(cat << EOF
JWT_SECRET=additional-jwt-secret
EOF
)"
run execute_test_script
assert_success
refute_line '+GITEA__OAUTH2__JWT_SECRET=mocked-jwt-secret'
refute_line '+GITEA__OAUTH2__JWT_SECRET=inline-jwt-secret'
assert_line '+GITEA__OAUTH2__JWT_SECRET=additional-jwt-secret'
}
@test "ensures that dotted/dashed sections are properly masked" {
expect_environment_to_ini_call
write_mounted_file "inlines" "repository.pull-request" "$(cat << EOF
WORK_IN_PROGRESS_PREFIXES=WIP:,[WIP]
EOF
)"
run execute_test_script
assert_success
assert_line '+GITEA__REPOSITORY_0X2E_PULL_0X2D_REQUEST__WORK_IN_PROGRESS_PREFIXES=WIP:,[WIP]'
}
###############################################################
##### THIS IS A BUG, BUT I WANT IT TO BE COVERED BY TESTS #####
###############################################################
@test "ensures uppercase section and setting names (🐞)" {
expect_environment_to_ini_call
export GITEA__oauth2__JwT_Secret="pre-defined-jwt-secret"
write_mounted_file "inlines" "repository.pull-request" "$(cat << EOF
WORK_IN_progress_PREFIXES=WIP:,[WIP]
EOF
)"
run execute_test_script
assert_success
assert_line '+GITEA__REPOSITORY_0X2E_PULL_0X2D_REQUEST__WORK_IN_PROGRESS_PREFIXES=WIP:,[WIP]'
assert_line '+GITEA__OAUTH2__JWT_SECRET=pre-defined-jwt-secret'
}
@test "treats top-level configuration as section-less" {
expect_environment_to_ini_call
write_mounted_file "inlines" "_generals_" "$(cat << EOF
APP_NAME=Hello top-level configuration
RUN_MODE=dev
EOF
)"
run execute_test_script
assert_success
assert_line '+GITEA____APP_NAME=Hello top-level configuration'
assert_line '+GITEA____RUN_MODE=dev'
}
@test "fails on invalid setting" {
write_mounted_file "inlines" "_generals_" "$(cat << EOF
some random invalid string
EOF
)"
run execute_test_script
assert_failure
}
@test "treats empty setting name as invalid setting" {
write_mounted_file "inlines" "_generals_" "$(cat << EOF
=value
EOF
)"
run execute_test_script
assert_failure
}

View File

@ -0,0 +1,45 @@
suite: config template | cache config
release:
name: gitea-unittests
namespace: testing
tests:
- it: "cache is configured correctly for redis-cluster"
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: true
asserts:
- documentIndex: 0
equal:
path: stringData.cache
value: |-
ADAPTER=redis
HOST=redis+cluster://:@gitea-unittests-redis-cluster-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
- it: "cache is configured correctly for 'memory' when redis-cluster is disabled"
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.cache
value: |-
ADAPTER=memory
HOST=
- it: "cache can be customized when redis-cluster is disabled"
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
gitea.config.cache.ADAPTER: custom-adapter
gitea.config.cache.HOST: custom-host
asserts:
- documentIndex: 0
equal:
path: stringData.cache
value: |-
ADAPTER=custom-adapter
HOST=custom-host

View File

@ -0,0 +1,30 @@
suite: config template | database section (postgresql-ha)
release:
name: gitea-unittests
namespace: testing
tests:
- it: connects to pgpool service
template: templates/gitea/config.yaml
set:
postgresql:
enabled: false
postgresql-ha:
enabled: true
asserts:
- documentIndex: 0
matchRegex:
path: stringData.database
pattern: HOST=gitea-unittests-postgresql-ha-pgpool.testing.svc.cluster.local:5432
- it: renders the referenced service
template: charts/postgresql-ha/templates/pgpool/service.yaml
set:
postgresql:
enabled: false
postgresql-ha:
enabled: true
asserts:
- containsDocument:
kind: Service
apiVersion: v1
name: gitea-unittests-postgresql-ha-pgpool
namespace: testing

View File

@ -0,0 +1,30 @@
suite: config template | database section (postgresql)
release:
name: gitea-unittests
namespace: testing
tests:
- it: "connects to postgresql service"
template: templates/gitea/config.yaml
set:
postgresql:
enabled: true
postgresql-ha:
enabled: false
asserts:
- documentIndex: 0
matchRegex:
path: stringData.database
pattern: HOST=gitea-unittests-postgresql.testing.svc.cluster.local:5432
- it: "renders the referenced service"
template: charts/postgresql/templates/primary/svc.yaml
set:
postgresql:
enabled: true
postgresql-ha:
enabled: false
asserts:
- containsDocument:
kind: Service
apiVersion: v1
name: gitea-unittests-postgresql
namespace: testing

View File

@ -0,0 +1,45 @@
suite: config template | queue config
release:
name: gitea-unittests
namespace: testing
tests:
- it: "queue is configured correctly for redis-cluster"
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: true
asserts:
- documentIndex: 0
equal:
path: stringData.queue
value: |-
CONN_STR=redis+cluster://:@gitea-unittests-redis-cluster-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
TYPE=redis
- it: "queue is configured correctly for 'levelDB' when redis-cluster is disabled"
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.queue
value: |-
CONN_STR=
TYPE=level
- it: "queue can be customized when redis-cluster is disabled"
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
gitea.config.queue.TYPE: custom-type
gitea.config.queue.CONN_STR: custom-connection-string
asserts:
- documentIndex: 0
equal:
path: stringData.queue
value: |-
CONN_STR=custom-connection-string
TYPE=custom-type

View File

@ -0,0 +1,45 @@
suite: config template | session config
release:
name: gitea-unittests
namespace: testing
tests:
- it: "session is configured correctly for redis-cluster"
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: true
asserts:
- documentIndex: 0
equal:
path: stringData.session
value: |-
PROVIDER=redis
PROVIDER_CONFIG=redis+cluster://:@gitea-unittests-redis-cluster-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
- it: "session is configured correctly for 'memory' when redis-cluster is disabled"
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.session
value: |-
PROVIDER=memory
PROVIDER_CONFIG=
- it: "session can be customized when redis-cluster is disabled"
template: templates/gitea/config.yaml
set:
redis-cluster:
enabled: false
gitea.config.session.PROVIDER: custom-provider
gitea.config.session.PROVIDER_CONFIG: custom-provider-config
asserts:
- documentIndex: 0
equal:
path: stringData.session
value: |-
PROVIDER=custom-provider
PROVIDER_CONFIG=custom-provider-config

View File

@ -1,4 +1,4 @@
suite: Dependency checks | Major image bumps suite: Dependency update consistency
release: release:
name: gitea-unittests name: gitea-unittests
namespace: testing namespace: testing
@ -15,7 +15,7 @@ tests:
matchRegex: matchRegex:
path: spec.template.spec.containers[0].image path: spec.template.spec.containers[0].image
# IN CASE OF AN INTENTIONAL MAJOR BUMP, ADJUST THIS TEST # IN CASE OF AN INTENTIONAL MAJOR BUMP, ADJUST THIS TEST
pattern: bitnami/postgresql-repmgr:17.+$ pattern: ^docker.io/bitnami/postgresql-repmgr:16.+$
- it: "[postgresql] ensures we detect major image version upgrades" - it: "[postgresql] ensures we detect major image version upgrades"
template: charts/postgresql/templates/primary/statefulset.yaml template: charts/postgresql/templates/primary/statefulset.yaml
set: set:
@ -28,30 +28,15 @@ tests:
matchRegex: matchRegex:
path: spec.template.spec.containers[0].image path: spec.template.spec.containers[0].image
# IN CASE OF AN INTENTIONAL MAJOR BUMP, ADJUST THIS TEST # IN CASE OF AN INTENTIONAL MAJOR BUMP, ADJUST THIS TEST
pattern: bitnami/postgresql:17.+$ pattern: ^docker.io/bitnami/postgresql:16.+$
- it: "[valkey-cluster] ensures we detect major image version upgrades" - it: "[redis-cluster] ensures we detect major image version upgrades"
template: charts/valkey-cluster/templates/valkey-statefulset.yaml template: charts/redis-cluster/templates/redis-statefulset.yaml
set: set:
valkey-cluster: redis-cluster:
enabled: true
valkey:
enabled: false
asserts:
- documentIndex: 0
matchRegex:
path: spec.template.spec.containers[0].image
# IN CASE OF AN INTENTIONAL MAJOR BUMP, ADJUST THIS TEST
pattern: bitnami/valkey-cluster:8.+$
- it: "[valkey] ensures we detect major image version upgrades"
template: charts/valkey/templates/primary/application.yaml
set:
valkey-cluster:
enabled: false
valkey:
enabled: true enabled: true
asserts: asserts:
- documentIndex: 0 - documentIndex: 0
matchRegex: matchRegex:
path: spec.template.spec.containers[0].image path: spec.template.spec.containers[0].image
# IN CASE OF AN INTENTIONAL MAJOR BUMP, ADJUST THIS TEST # IN CASE OF AN INTENTIONAL MAJOR BUMP, ADJUST THIS TEST
pattern: bitnami/valkey:8.+$ pattern: ^docker.io/bitnami/redis-cluster:7.+$

View File

@ -0,0 +1,17 @@
suite: deployment template (basic)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: renders a deployment
template: templates/gitea/deployment.yaml
asserts:
- hasDocuments:
count: 1
- containsDocument:
kind: Deployment
apiVersion: apps/v1
name: gitea-unittests

View File

@ -14,7 +14,7 @@ tests:
asserts: asserts:
- equal: - equal:
path: spec.template.spec.containers[0].image path: spec.template.spec.containers[0].image
value: "docker.gitea.com/gitea:1.19.3-rootless" value: "gitea/gitea:1.19.3-rootless"
- it: tag override - it: tag override
template: templates/gitea/deployment.yaml template: templates/gitea/deployment.yaml
set: set:
@ -22,7 +22,7 @@ tests:
asserts: asserts:
- equal: - equal:
path: spec.template.spec.containers[0].image path: spec.template.spec.containers[0].image
value: "docker.gitea.com/gitea:1.19.4-rootless" value: "gitea/gitea:1.19.4-rootless"
- it: root-based image - it: root-based image
template: templates/gitea/deployment.yaml template: templates/gitea/deployment.yaml
set: set:
@ -30,7 +30,7 @@ tests:
asserts: asserts:
- equal: - equal:
path: spec.template.spec.containers[0].image path: spec.template.spec.containers[0].image
value: "docker.gitea.com/gitea:1.19.3" value: "gitea/gitea:1.19.3"
- it: scoped registry - it: scoped registry
template: templates/gitea/deployment.yaml template: templates/gitea/deployment.yaml
set: set:
@ -38,7 +38,7 @@ tests:
asserts: asserts:
- equal: - equal:
path: spec.template.spec.containers[0].image path: spec.template.spec.containers[0].image
value: "example.com/gitea:1.19.3-rootless" value: "example.com/gitea/gitea:1.19.3-rootless"
- it: global registry - it: global registry
template: templates/gitea/deployment.yaml template: templates/gitea/deployment.yaml
set: set:
@ -46,7 +46,7 @@ tests:
asserts: asserts:
- equal: - equal:
path: spec.template.spec.containers[0].image path: spec.template.spec.containers[0].image
value: "global.example.com/gitea:1.19.3-rootless" value: "global.example.com/gitea/gitea:1.19.3-rootless"
- it: digest for rootless image - it: digest for rootless image
template: templates/gitea/deployment.yaml template: templates/gitea/deployment.yaml
set: set:
@ -56,12 +56,12 @@ tests:
asserts: asserts:
- equal: - equal:
path: spec.template.spec.containers[0].image path: spec.template.spec.containers[0].image
value: "docker.gitea.com/gitea:1.19.3-rootless@sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a" value: "gitea/gitea:1.19.3-rootless@sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a"
- it: image fullOverride (does not append rootless) - it: image fullOverride (does not append rootless)
template: templates/gitea/deployment.yaml template: templates/gitea/deployment.yaml
set: set:
image: image:
fullOverride: docker.gitea.com/gitea:1.19.3 fullOverride: gitea/gitea:1.19.3
# setting rootless, registry, repository, tag, and digest to prove that override works # setting rootless, registry, repository, tag, and digest to prove that override works
rootless: true rootless: true
registry: example.com registry: example.com
@ -71,7 +71,7 @@ tests:
asserts: asserts:
- equal: - equal:
path: spec.template.spec.containers[0].image path: spec.template.spec.containers[0].image
value: "docker.gitea.com/gitea:1.19.3" value: "gitea/gitea:1.19.3"
- it: digest for root-based image - it: digest for root-based image
template: templates/gitea/deployment.yaml template: templates/gitea/deployment.yaml
set: set:
@ -81,7 +81,7 @@ tests:
asserts: asserts:
- equal: - equal:
path: spec.template.spec.containers[0].image path: spec.template.spec.containers[0].image
value: "docker.gitea.com/gitea:1.19.3@sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a" value: "gitea/gitea:1.19.3@sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a"
- it: digest and global registry - it: digest and global registry
template: templates/gitea/deployment.yaml template: templates/gitea/deployment.yaml
set: set:
@ -90,21 +90,4 @@ tests:
asserts: asserts:
- equal: - equal:
path: spec.template.spec.containers[0].image path: spec.template.spec.containers[0].image
value: "global.example.com/gitea:1.19.3-rootless@sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a" value: "global.example.com/gitea/gitea:1.19.3-rootless@sha256:b28e8f3089b52ebe6693295df142f8c12eff354e9a4a5bfbb5c10f296c3a537a"
- it: correctly renders floating tag references
template: templates/gitea/deployment.yaml
set:
image.tag: 1.21 # use non-quoted value on purpose. See: https://gitea.com/gitea/helm-gitea/issues/631
asserts:
- equal:
path: spec.template.spec.initContainers[0].image
value: "docker.gitea.com/gitea:1.21-rootless"
- equal:
path: spec.template.spec.initContainers[1].image
value: "docker.gitea.com/gitea:1.21-rootless"
- equal:
path: spec.template.spec.initContainers[2].image
value: "docker.gitea.com/gitea:1.21-rootless"
- equal:
path: spec.template.spec.containers[0].image
value: "docker.gitea.com/gitea:1.21-rootless"

View File

@ -0,0 +1,23 @@
suite: ingress template
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/ingress.yaml
tests:
- it: hostname using TPL
set:
global.giteaHostName: "gitea.example.com"
ingress.enabled: true
ingress.hosts[0].host: "{{ .Values.global.giteaHostName }}"
ingress.tls:
- secretName: gitea-tls
hosts:
- "{{ .Values.global.giteaHostName }}"
asserts:
- equal:
path: spec.tls[0].hosts[0]
value: "gitea.example.com"
- equal:
path: spec.rules[0].host
value: "gitea.example.com"

View File

@ -18,7 +18,7 @@ tests:
value: configure-gpg value: configure-gpg
- equal: - equal:
path: spec.template.spec.initContainers[2].command path: spec.template.spec.initContainers[2].command
value: ["/usr/sbinx/configure_gpg_environment.sh"] value: ["/usr/sbin/configure_gpg_environment.sh"]
- equal: - equal:
path: spec.template.spec.initContainers[2].securityContext path: spec.template.spec.initContainers[2].securityContext
value: value:
@ -28,13 +28,11 @@ tests:
value: value:
- name: GNUPGHOME - name: GNUPGHOME
value: /data/git/.gnupg value: /data/git/.gnupg
- name: TMP_RAW_GPG_KEY
value: /raw/private.asc
- equal: - equal:
path: spec.template.spec.initContainers[2].volumeMounts path: spec.template.spec.initContainers[2].volumeMounts
value: value:
- name: init - name: init
mountPath: /usr/sbinx mountPath: /usr/sbin
- name: data - name: data
mountPath: /data mountPath: /data
- name: gpg-private-key - name: gpg-private-key

View File

@ -30,7 +30,7 @@ tests:
- it: supports overriding SSH log level (even when image.fullOverride set) - it: supports overriding SSH log level (even when image.fullOverride set)
template: templates/gitea/deployment.yaml template: templates/gitea/deployment.yaml
set: set:
image.fullOverride: docker.gitea.com/gitea:1.19.3 image.fullOverride: gitea/gitea:1.19.3
image.rootless: false image.rootless: false
gitea.ssh.logLevel: "DEBUG" gitea.ssh.logLevel: "DEBUG"
asserts: asserts:
@ -53,7 +53,7 @@ tests:
- it: skips SSH_LOG_LEVEL for rootless image (even when image.fullOverride set) - it: skips SSH_LOG_LEVEL for rootless image (even when image.fullOverride set)
template: templates/gitea/deployment.yaml template: templates/gitea/deployment.yaml
set: set:
image.fullOverride: docker.gitea.com/gitea:1.19.3 image.fullOverride: gitea/gitea:1.19.3
image.rootless: true image.rootless: true
gitea.ssh.logLevel: "DEBUG" # explicitly defining a non-standard level here gitea.ssh.logLevel: "DEBUG" # explicitly defining a non-standard level here
asserts: asserts:

View File

@ -1,12 +0,0 @@
suite: Check if actions raises an error
release:
name: gitea-unittests
namespace: testing
tests:
- it: fails when trying to configure actions due to removal
set:
actions:
enabled: true
asserts:
- failedTemplate:
errorMessage: The actions sub-chart has been outsourced to a dedicated chart available at https://gitea.com/gitea/helm-actions. For assistance with the migration process, check https://gitea.com/gitea/helm-actions/issues/9.

View File

@ -1,24 +0,0 @@
suite: config template | actions config
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/config.yaml
tests:
- it: "actions are enabled by default (based on vanilla Gitea behavior)"
template: templates/gitea/config.yaml
asserts:
- documentIndex: 0
notExists:
path: stringData.actions
- it: "actions can be disabled via inline config"
template: templates/gitea/config.yaml
set:
gitea.config.actions.ENABLED: false
asserts:
- documentIndex: 0
equal:
path: stringData.actions
value: |-
ENABLED=false

View File

@ -1,66 +0,0 @@
suite: config template | cache config
release:
name: gitea-unittests
namespace: testing
tests:
- it: "cache is configured correctly for valkey-cluster"
template: templates/gitea/config.yaml
set:
valkey-cluster:
enabled: true
valkey:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.cache
value: |-
ADAPTER=redis
HOST=redis+cluster://:@gitea-unittests-valkey-cluster-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
- it: "cache is configured correctly for valkey"
template: templates/gitea/config.yaml
set:
valkey-cluster:
enabled: false
valkey:
enabled: true
asserts:
- documentIndex: 0
equal:
path: stringData.cache
value: |-
ADAPTER=redis
HOST=redis://:changeme@gitea-unittests-valkey-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
- it: "cache is configured correctly for 'memory' when valkey (or valkey-cluster) is disabled"
template: templates/gitea/config.yaml
set:
valkey-cluster:
enabled: false
valkey:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.cache
value: |-
ADAPTER=memory
HOST=
- it: "cache can be customized when valkey (or valkey-cluster) is disabled"
template: templates/gitea/config.yaml
set:
valkey-cluster:
enabled: false
valkey:
enabled: false
gitea.config.cache.ADAPTER: custom-adapter
gitea.config.cache.HOST: custom-host
asserts:
- documentIndex: 0
equal:
path: stringData.cache
value: |-
ADAPTER=custom-adapter
HOST=custom-host

View File

@ -1,58 +0,0 @@
suite: config template | metrics section (metrics token)
release:
name: gitea-unittests
namespace: testing
tests:
- it: metrics token is set
template: templates/gitea/config.yaml
set:
gitea:
metrics:
enabled: true
token: "somepassword"
asserts:
- documentIndex: 0
equal:
path: stringData.metrics
value: |-
ENABLED=true
TOKEN=somepassword
- it: metrics token is empty
template: templates/gitea/config.yaml
set:
gitea:
metrics:
enabled: true
token: ""
asserts:
- documentIndex: 0
equal:
path: stringData.metrics
value: |-
ENABLED=true
- it: metrics token is nil
template: templates/gitea/config.yaml
set:
gitea:
metrics:
enabled: true
token:
asserts:
- documentIndex: 0
equal:
path: stringData.metrics
value: |-
ENABLED=true
- it: does not configures a token if metrics are disabled
template: templates/gitea/config.yaml
set:
gitea:
metrics:
enabled: false
token: "somepassword"
asserts:
- documentIndex: 0
equal:
path: stringData.metrics
value: |-
ENABLED=false

View File

@ -1,66 +0,0 @@
suite: config template | queue config
release:
name: gitea-unittests
namespace: testing
tests:
- it: "queue is configured correctly for valkey-cluster"
template: templates/gitea/config.yaml
set:
valkey-cluster:
enabled: true
valkey:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.queue
value: |-
CONN_STR=redis+cluster://:@gitea-unittests-valkey-cluster-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
TYPE=redis
- it: "queue is configured correctly for valkey"
template: templates/gitea/config.yaml
set:
valkey-cluster:
enabled: false
valkey:
enabled: true
asserts:
- documentIndex: 0
equal:
path: stringData.queue
value: |-
CONN_STR=redis://:changeme@gitea-unittests-valkey-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
TYPE=redis
- it: "queue is configured correctly for 'levelDB' when valkey (and valkey-cluster) is disabled"
template: templates/gitea/config.yaml
set:
valkey-cluster:
enabled: false
valkey:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.queue
value: |-
CONN_STR=
TYPE=level
- it: "queue can be customized when valkey (and valkey-cluster) are disabled"
template: templates/gitea/config.yaml
set:
valkey-cluster:
enabled: false
valkey:
enabled: false
gitea.config.queue.TYPE: custom-type
gitea.config.queue.CONN_STR: custom-connection-string
asserts:
- documentIndex: 0
equal:
path: stringData.queue
value: |-
CONN_STR=custom-connection-string
TYPE=custom-type

View File

@ -1,66 +0,0 @@
suite: config template | session config
release:
name: gitea-unittests
namespace: testing
tests:
- it: "session is configured correctly for valkey-cluster"
template: templates/gitea/config.yaml
set:
valkey-cluster:
enabled: true
valkey:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.session
value: |-
PROVIDER=redis
PROVIDER_CONFIG=redis+cluster://:@gitea-unittests-valkey-cluster-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
- it: "session is configured correctly for valkey"
template: templates/gitea/config.yaml
set:
valkey-cluster:
enabled: false
valkey:
enabled: true
asserts:
- documentIndex: 0
equal:
path: stringData.session
value: |-
PROVIDER=redis
PROVIDER_CONFIG=redis://:changeme@gitea-unittests-valkey-headless.testing.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s&
- it: "session is configured correctly for 'memory' when valkey (and valkey-cluster) is disabled"
template: templates/gitea/config.yaml
set:
valkey-cluster:
enabled: false
valkey:
enabled: false
asserts:
- documentIndex: 0
equal:
path: stringData.session
value: |-
PROVIDER=memory
PROVIDER_CONFIG=
- it: "session can be customized when valkey (and valkey-cluster) is disabled"
template: templates/gitea/config.yaml
set:
valkey-cluster:
enabled: false
valkey:
enabled: false
gitea.config.session.PROVIDER: custom-provider
gitea.config.session.PROVIDER_CONFIG: custom-provider-config
asserts:
- documentIndex: 0
equal:
path: stringData.session
value: |-
PROVIDER=custom-provider
PROVIDER_CONFIG=custom-provider-config

View File

@ -1,121 +0,0 @@
suite: Dependency checks | Customization integrity | postgresql-ha
release:
name: gitea-unittests
namespace: testing
set:
postgresql:
enabled: false
postgresql-ha:
enabled: true
global:
postgresql:
database: gitea-database
password: gitea-password
username: gitea-username
postgresql:
repmgrPassword: custom-password-repmgr
postgresPassword: custom-password-postgres
password: custom-password-overwritten-by-global-postgresql-password
pgpool:
adminPassword: custom-password-pgpool
service:
ports:
postgresql: 1234
persistence:
size: 1337Mi
tests:
- it: "[postgresql-ha] DB settings are applied as expected"
template: charts/postgresql-ha/templates/postgresql/statefulset.yaml
asserts:
- documentIndex: 0
contains:
path: spec.template.spec.containers[0].env
content:
name: POSTGRES_DB
value: "gitea-database"
- documentIndex: 0
contains:
path: spec.template.spec.containers[0].env
content:
name: POSTGRES_USER
value: "gitea-username"
- it: "[postgresql-ha] DB passwords are applied as expected"
template: charts/postgresql-ha/templates/postgresql/secrets.yaml
asserts:
- documentIndex: 0
equal:
path: data["repmgr-password"]
value: "Y3VzdG9tLXBhc3N3b3JkLXJlcG1ncg=="
- documentIndex: 0
equal:
path: data["postgres-password"]
value: "Y3VzdG9tLXBhc3N3b3JkLXBvc3RncmVz"
- documentIndex: 0
equal:
path: data["password"]
value: "Z2l0ZWEtcGFzc3dvcmQ=" # postgresql-ha.postgresql.password is overwritten by postgresql-ha.global.postgresql.password and should not be referenced here
- it: "[postgresql-ha] pgpool.adminPassword is applied as expected"
template: charts/postgresql-ha/templates/pgpool/secrets.yaml
asserts:
- documentIndex: 0
equal:
path: data["admin-password"]
value: "Y3VzdG9tLXBhc3N3b3JkLXBncG9vbA=="
- it: "[postgresql-ha] pgpool.adminPassword is applied as expected"
template: charts/postgresql-ha/templates/pgpool/secrets.yaml
asserts:
- documentIndex: 0
equal:
path: data["admin-password"]
value: "Y3VzdG9tLXBhc3N3b3JkLXBncG9vbA=="
- it: "[postgresql-ha] pgpool.adminPassword is applied as expected"
template: charts/postgresql-ha/templates/pgpool/secrets.yaml
asserts:
- documentIndex: 0
equal:
path: data["admin-password"]
value: "Y3VzdG9tLXBhc3N3b3JkLXBncG9vbA=="
- it: "[postgresql-ha] persistence.size is applied as expected"
template: charts/postgresql-ha/templates/postgresql/statefulset.yaml
asserts:
- documentIndex: 0
equal:
path: spec.volumeClaimTemplates[0].spec.resources.requests.storage
value: "1337Mi"
- it: "[postgresql-ha] service.ports.postgresql is applied as expected"
template: charts/postgresql-ha/templates/pgpool/service.yaml
asserts:
- documentIndex: 0
equal:
path: spec.ports[0].port
value: 1234
- it: "[postgresql-ha] renders the referenced service"
template: charts/postgresql-ha/templates/pgpool/service.yaml
asserts:
- containsDocument:
kind: Service
apiVersion: v1
name: gitea-unittests-postgresql-ha-pgpool
namespace: testing
- it: "[gitea] connects to pgpool service"
template: templates/gitea/config.yaml
asserts:
- documentIndex: 0
matchRegex:
path: stringData.database
pattern: HOST=gitea-unittests-postgresql-ha-pgpool.testing.svc.cluster.local:1234
- it: "[gitea] connects to configured database"
template: templates/gitea/config.yaml
asserts:
- documentIndex: 0
matchRegex:
path: stringData.database
pattern: NAME=gitea-database
- documentIndex: 0
matchRegex:
path: stringData.database
pattern: USER=gitea-username
- documentIndex: 0
matchRegex:
path: stringData.database
pattern: PASSWD=gitea-password

View File

@ -1,88 +0,0 @@
suite: Dependency checks | Customization integrity | postgresql
release:
name: gitea-unittests
namespace: testing
set:
postgresql-ha:
enabled: false
postgresql:
enabled: true
global:
postgresql:
auth:
password: gitea-password
database: gitea-database
username: gitea-username
service:
ports:
postgresql: 1234
primary:
persistence:
size: 1337Mi
tests:
- it: "[postgresql] DB settings are applied as expected"
template: charts/postgresql/templates/primary/statefulset.yaml
asserts:
- documentIndex: 0
contains:
path: spec.template.spec.containers[0].env
content:
name: POSTGRES_DATABASE
value: "gitea-database"
- documentIndex: 0
contains:
path: spec.template.spec.containers[0].env
content:
name: POSTGRES_USER
value: "gitea-username"
- it: "[postgresql] DB password is applied as expected"
template: charts/postgresql/templates/secrets.yaml
asserts:
- documentIndex: 0
equal:
path: data["password"]
value: "Z2l0ZWEtcGFzc3dvcmQ="
- it: "[postgresql] primary.persistence.size is applied as expected"
template: charts/postgresql/templates/primary/statefulset.yaml
asserts:
- documentIndex: 0
equal:
path: spec.volumeClaimTemplates[0].spec.resources.requests.storage
value: "1337Mi"
- it: "[postgresql] global.postgresql.service.ports.postgresql is applied as expected"
template: charts/postgresql/templates/primary/svc.yaml
asserts:
- documentIndex: 0
equal:
path: spec.ports[0].port
value: 1234
- it: "[postgresql] renders the referenced service"
template: charts/postgresql/templates/primary/svc.yaml
asserts:
- containsDocument:
kind: Service
apiVersion: v1
name: gitea-unittests-postgresql
namespace: testing
- it: "[gitea] connects to postgresql service"
template: templates/gitea/config.yaml
asserts:
- documentIndex: 0
matchRegex:
path: stringData.database
pattern: HOST=gitea-unittests-postgresql.testing.svc.cluster.local:1234
- it: "[gitea] connects to configured database"
template: templates/gitea/config.yaml
asserts:
- documentIndex: 0
matchRegex:
path: stringData.database
pattern: NAME=gitea-database
- documentIndex: 0
matchRegex:
path: stringData.database
pattern: USER=gitea-username
- documentIndex: 0
matchRegex:
path: stringData.database
pattern: PASSWD=gitea-password

View File

@ -1,90 +0,0 @@
suite: Dependency checks | Customization integrity | valkey-cluster
release:
name: gitea-unittests
namespace: testing
set:
valkey:
enabled: false
valkey-cluster:
enabled: true
usePassword: false
cluster:
nodes: 5
replicas: 2
tests:
- it: "[valkey-cluster] configures correct nodes/replicas"
template: charts/valkey-cluster/templates/valkey-statefulset.yaml
asserts:
- documentIndex: 0
equal:
path: spec.replicas
value: 5
- documentIndex: 0
matchRegex:
path: spec.template.spec.containers[0].args[0]
pattern: VALKEY_CLUSTER_REPLICAS="2"
- it: "[valkey-cluster] support auth-less connections"
asserts:
- template: charts/valkey-cluster/templates/secret.yaml
hasDocuments:
count: 0
- template: charts/valkey-cluster/templates/valkey-statefulset.yaml
documentIndex: 0
contains:
path: spec.template.spec.containers[0].env
content:
name: ALLOW_EMPTY_PASSWORD
value: "yes"
- it: "[valkey-cluster] support auth-full connections"
set:
valkey-cluster:
usePassword: true
asserts:
- template: charts/valkey-cluster/templates/secret.yaml
containsDocument:
kind: Secret
apiVersion: v1
name: gitea-unittests-valkey-cluster
namespace: testing
- template: charts/valkey-cluster/templates/valkey-statefulset.yaml
documentIndex: 0
contains:
path: spec.template.spec.containers[0].env
content:
name: REDISCLI_AUTH
valueFrom:
secretKeyRef:
name: gitea-unittests-valkey-cluster
key: valkey-password
- template: charts/valkey-cluster/templates/valkey-statefulset.yaml
documentIndex: 0
contains:
path: spec.template.spec.containers[0].env
content:
name: REDISCLI_AUTH
valueFrom:
secretKeyRef:
name: gitea-unittests-valkey-cluster
key: valkey-password
- it: "[valkey-cluster] renders the referenced service"
template: charts/valkey-cluster/templates/headless-svc.yaml
asserts:
- containsDocument:
kind: Service
apiVersion: v1
name: gitea-unittests-valkey-cluster-headless
namespace: testing
- documentIndex: 0
contains:
path: spec.ports
content:
name: tcp-redis
port: 6379
targetPort: tcp-redis
- it: "[gitea] waits for valkey-cluster to be up and running"
template: templates/gitea/init.yaml
asserts:
- documentIndex: 0
matchRegex:
path: stringData["configure_gitea.sh"]
pattern: nc -vz -w2 gitea-unittests-valkey-cluster-headless.testing.svc.cluster.local 6379

View File

@ -1,52 +0,0 @@
suite: Dependency checks | Customization integrity | valkey
release:
name: gitea-unittests
namespace: testing
set:
valkey-cluster:
enabled: false
valkey:
enabled: true
architecture: standalone
global:
valkey:
password: gitea-password
master:
count: 2
tests:
- it: "[valkey] configures correct 'master' nodes"
template: charts/valkey/templates/primary/application.yaml
asserts:
- documentIndex: 0
equal:
path: spec.replicas
value: 1
- it: "[valkey] valkey.global.valkey.password is applied as expected"
template: charts/valkey/templates/secret.yaml
asserts:
- documentIndex: 0
equal:
path: data["valkey-password"]
value: "Z2l0ZWEtcGFzc3dvcmQ="
- it: "[valkey] renders the referenced service"
template: charts/valkey/templates/headless-svc.yaml
asserts:
- containsDocument:
kind: Service
apiVersion: v1
name: gitea-unittests-valkey-headless
namespace: testing
- documentIndex: 0
contains:
path: spec.ports
content:
name: tcp-redis
port: 6379
targetPort: redis
- it: "[gitea] waits for valkey to be up and running"
template: templates/gitea/init.yaml
asserts:
- documentIndex: 0
matchRegex:
path: stringData["configure_gitea.sh"]
pattern: nc -vz -w2 gitea-unittests-valkey-headless.testing.svc.cluster.local 6379

View File

@ -1,59 +0,0 @@
suite: deployment template (HA)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: fails with multiple replicas and "GIT_GC_REPOS" enabled
template: templates/gitea/deployment.yaml
set:
replicaCount: 2
persistence:
accessModes:
- ReadWriteMany
gitea:
config:
cron:
GIT_GC_REPOS:
ENABLED: true
asserts:
- failedTemplate:
errorMessage: "Invoking the garbage collector via CRON is not yet supported when running with multiple replicas. Please set 'gitea.config.cron.GIT_GC_REPOS.enabled = false'."
- it: fails with multiple replicas and RWX file system not set
template: templates/gitea/deployment.yaml
set:
replicaCount: 2
asserts:
- failedTemplate:
errorMessage: "When using multiple replicas, a RWX file system is required and persistence.accessModes[0] must be set to ReadWriteMany."
- it: fails with multiple replicas and bleve issue indexer
template: templates/gitea/deployment.yaml
set:
replicaCount: 2
persistence:
accessModes:
- ReadWriteMany
gitea:
config:
indexer:
ISSUE_INDEXER_TYPE: bleve
asserts:
- failedTemplate:
errorMessage: "When using multiple replicas, the issue indexer (gitea.config.indexer.ISSUE_INDEXER_TYPE) must be set to a HA-ready provider such as 'meilisearch', 'elasticsearch' or 'db' (if the DB is HA-ready)."
- it: fails with multiple replicas and bleve repo indexer
template: templates/gitea/deployment.yaml
set:
replicaCount: 2
persistence:
accessModes:
- ReadWriteMany
gitea:
config:
indexer:
REPO_INDEXER_TYPE: bleve
REPO_INDEXER_ENABLED: true
asserts:
- failedTemplate:
errorMessage: "When using multiple replicas, the repo indexer (gitea.config.indexer.REPO_INDEXER_TYPE) must be set to 'meilisearch' or 'elasticsearch' or disabled."

View File

@ -1,95 +0,0 @@
suite: deployment template (basic)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: renders a deployment
template: templates/gitea/deployment.yaml
asserts:
- hasDocuments:
count: 1
- containsDocument:
kind: Deployment
apiVersion: apps/v1
name: gitea-unittests
- it: deployment labels are set
template: templates/gitea/deployment.yaml
set:
deployment.labels:
hello: world
asserts:
- isSubset:
path: metadata.labels
content:
hello: world
- isSubset:
path: spec.template.metadata.labels
content:
hello: world
- it: "injects TMP_EXISTING_ENVS_FILE as environment variable to 'init-app-ini' init container"
template: templates/gitea/deployment.yaml
asserts:
- contains:
path: spec.template.spec.initContainers[1].env
content:
name: TMP_EXISTING_ENVS_FILE
value: /tmp/existing-envs
- it: "injects ENV_TO_INI_MOUNT_POINT as environment variable to 'init-app-ini' init container"
template: templates/gitea/deployment.yaml
asserts:
- contains:
path: spec.template.spec.initContainers[1].env
content:
name: ENV_TO_INI_MOUNT_POINT
value: /env-to-ini-mounts
- it: CPU resources are defined as well as GOMAXPROCS
template: templates/gitea/deployment.yaml
set:
resources:
limits:
cpu: 200ms
memory: 200Mi
requests:
cpu: 100ms
memory: 100Mi
asserts:
- contains:
path: spec.template.spec.containers[0].env
content:
name: GOMAXPROCS
valueFrom:
resourceFieldRef:
divisor: "1"
resource: limits.cpu
- equal:
path: spec.template.spec.containers[0].resources
value:
limits:
cpu: 200ms
memory: 200Mi
requests:
cpu: 100ms
memory: 100Mi
- it: Init containers have correct volumeMount path
template: templates/gitea/deployment.yaml
set:
initContainersScriptsVolumeMountPath: "/custom/init/path"
asserts:
- equal:
path: spec.template.spec.initContainers[*].volumeMounts[?(@.name=="init")].mountPath
value: "/custom/init/path"
- equal:
path: spec.template.spec.initContainers[*].volumeMounts[?(@.name=="config")].mountPath
value: "/custom/init/path"
- it: Init containers have correct volumeMount path if there is no override
template: templates/gitea/deployment.yaml
asserts:
- equal:
path: spec.template.spec.initContainers[*].volumeMounts[?(@.name=="init")].mountPath
value: "/usr/sbinx"
- equal:
path: spec.template.spec.initContainers[*].volumeMounts[?(@.name=="config")].mountPath
value: "/usr/sbinx"

View File

@ -1,150 +0,0 @@
suite: deployment template
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: Renders a deployment
template: templates/gitea/deployment.yaml
asserts:
- hasDocuments:
count: 1
- containsDocument:
kind: Deployment
apiVersion: apps/v1
name: gitea-unittests
- it: Deployment with empty additionalConfigFromEnvs
template: templates/gitea/deployment.yaml
set:
gitea.additionalConfigFromEnvs: []
asserts:
- hasDocuments:
count: 1
- exists:
path: spec.template.spec.initContainers[1].env
- lengthEqual:
path: spec.template.spec.initContainers[1].env
count: 6
- isSubset:
path: spec.template.spec.initContainers[1]
content:
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
- name: TMP_EXISTING_ENVS_FILE
value: /tmp/existing-envs
- name: ENV_TO_INI_MOUNT_POINT
value: /env-to-ini-mounts
- it: Deployment with standard additionalConfigFromEnvs
template: templates/gitea/deployment.yaml
set:
gitea.additionalConfigFromEnvs: [{name: GITEA_database_HOST, value: my-db:123}, {name: GITEA_database_USER, value: my-user}]
asserts:
- hasDocuments:
count: 1
- exists:
path: spec.template.spec.initContainers[1].env
- lengthEqual:
path: spec.template.spec.initContainers[1].env
count: 8
- isSubset:
path: spec.template.spec.initContainers[1]
content:
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
- name: TMP_EXISTING_ENVS_FILE
value: /tmp/existing-envs
- name: ENV_TO_INI_MOUNT_POINT
value: /env-to-ini-mounts
- name: GITEA_database_HOST
value: my-db:123
- name: GITEA_database_USER
value: my-user
- it: Deployment with templated additionalConfigFromEnvs
template: templates/gitea/deployment.yaml
set:
gitea.misc.host: my-db-host:321
gitea.misc.user: my-db-user
gitea.additionalConfigFromEnvs: [{name: GITEA_database_HOST, value: "{{ .Values.gitea.misc.host }}"}, {name: GITEA_database_USER, value: "{{ .Values.gitea.misc.user }}"}]
asserts:
- hasDocuments:
count: 1
- exists:
path: spec.template.spec.initContainers[1].env
- lengthEqual:
path: spec.template.spec.initContainers[1].env
count: 8
- isSubset:
path: spec.template.spec.initContainers[1]
content:
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
- name: TMP_EXISTING_ENVS_FILE
value: /tmp/existing-envs
- name: ENV_TO_INI_MOUNT_POINT
value: /env-to-ini-mounts
- name: GITEA_database_HOST
value: my-db-host:321
- name: GITEA_database_USER
value: my-db-user
- it: Deployment with additionalConfigFromEnvs templated secret name
template: templates/gitea/deployment.yaml
set:
gitea.misc.existingSecret: my-db-secret
gitea.additionalConfigFromEnvs[0]:
name: GITEA_database_HOST
valueFrom:
secretKeyRef:
name: "{{ .Values.gitea.misc.existingSecret }}"
key: password
asserts:
- hasDocuments:
count: 1
- exists:
path: spec.template.spec.initContainers[1].env
- lengthEqual:
path: spec.template.spec.initContainers[1].env
count: 7
- isSubset:
path: spec.template.spec.initContainers[1]
content:
env:
- name: GITEA_APP_INI
value: /data/gitea/conf/app.ini
- name: GITEA_CUSTOM
value: /data/gitea
- name: GITEA_WORK_DIR
value: /data
- name: GITEA_TEMP
value: /tmp/gitea
- name: TMP_EXISTING_ENVS_FILE
value: /tmp/existing-envs
- name: ENV_TO_INI_MOUNT_POINT
value: /env-to-ini-mounts
- name: GITEA_database_HOST
valueFrom:
secretKeyRef:
name: "my-db-secret"
key: password

View File

@ -1,45 +0,0 @@
suite: Test ingress tpl use
templates:
- templates/gitea/ingress.yaml
tests:
- it: Ingress Class using TPL
set:
global.ingress.className: "ingress-class"
ingress.className: "{{ .Values.global.ingress.className }}"
ingress.enabled: true
ingress.hosts[0].host: "some-host"
ingress.tls:
- secretName: gitea-tls
hosts:
- "some-host"
asserts:
- isKind:
of: Ingress
- equal:
path: spec.tls[0].hosts[0]
value: "some-host"
- equal:
path: spec.rules[0].host
value: "some-host"
- equal:
path: spec.ingressClassName
value: "ingress-class"
- it: hostname using TPL
set:
global.giteaHostName: "gitea.example.com"
ingress.enabled: true
ingress.hosts[0].host: "{{ .Values.global.giteaHostName }}"
ingress.tls:
- secretName: gitea-tls
hosts:
- "{{ .Values.global.giteaHostName }}"
asserts:
- isKind:
of: Ingress
- equal:
path: spec.tls[0].hosts[0]
value: "gitea.example.com"
- equal:
path: spec.rules[0].host
value: "gitea.example.com"

View File

@ -1,188 +0,0 @@
suite: deployment template (probes)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: renders default liveness probe
template: templates/gitea/deployment.yaml
asserts:
- notExists:
path: spec.template.spec.containers[0].livenessProbe.enabled
- isSubset:
path: spec.template.spec.containers[0].livenessProbe
content:
failureThreshold: 10
initialDelaySeconds: 200
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 1
- it: renders default readiness probe
template: templates/gitea/deployment.yaml
asserts:
- notExists:
path: spec.template.spec.containers[0].readinessProbe.enabled
- isSubset:
path: spec.template.spec.containers[0].readinessProbe
content:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 1
- it: does not render a default startup probe
template: templates/gitea/deployment.yaml
asserts:
- notExists:
path: spec.template.spec.containers[0].startupProbe
- it: allows enabling a startup probe
template: templates/gitea/deployment.yaml
set:
gitea.startupProbe.enabled: true
asserts:
- notExists:
path: spec.template.spec.containers[0].startupProbe.enabled
- isSubset:
path: spec.template.spec.containers[0].startupProbe
content:
failureThreshold: 10
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 1
- it: allows overwriting the default port of the liveness probe
template: templates/gitea/deployment.yaml
set:
gitea:
livenessProbe:
tcpSocket:
port: my-port
asserts:
- isSubset:
path: spec.template.spec.containers[0].livenessProbe
content:
tcpSocket:
port: my-port
- it: allows overwriting the default port of the readiness probe
template: templates/gitea/deployment.yaml
set:
gitea:
readinessProbe:
tcpSocket:
port: my-port
asserts:
- isSubset:
path: spec.template.spec.containers[0].readinessProbe
content:
tcpSocket:
port: my-port
- it: allows overwriting the default port of the startup probe
template: templates/gitea/deployment.yaml
set:
gitea:
startupProbe:
enabled: true
tcpSocket:
port: my-port
asserts:
- isSubset:
path: spec.template.spec.containers[0].startupProbe
content:
tcpSocket:
port: my-port
- it: allows using a non-default method as liveness probe
template: templates/gitea/deployment.yaml
set:
gitea:
livenessProbe:
httpGet:
path: /api/healthz
port: http
initialDelaySeconds: 13371
timeoutSeconds: 13372
periodSeconds: 13373
successThreshold: 13374
failureThreshold: 13375
asserts:
- notExists:
path: spec.template.spec.containers[0].livenessProbe.tcpSocket
- isSubset:
path: spec.template.spec.containers[0].livenessProbe
content:
failureThreshold: 13375
initialDelaySeconds: 13371
periodSeconds: 13373
successThreshold: 13374
httpGet:
path: /api/healthz
port: http
timeoutSeconds: 13372
- it: allows using a non-default method as readiness probe
template: templates/gitea/deployment.yaml
set:
gitea:
readinessProbe:
httpGet:
path: /api/healthz
port: http
initialDelaySeconds: 13371
timeoutSeconds: 13372
periodSeconds: 13373
successThreshold: 13374
failureThreshold: 13375
asserts:
- notExists:
path: spec.template.spec.containers[0].readinessProbe.tcpSocket
- isSubset:
path: spec.template.spec.containers[0].readinessProbe
content:
failureThreshold: 13375
initialDelaySeconds: 13371
periodSeconds: 13373
successThreshold: 13374
httpGet:
path: /api/healthz
port: http
timeoutSeconds: 13372
- it: allows using a non-default method as startup probe
template: templates/gitea/deployment.yaml
set:
gitea:
startupProbe:
enabled: true
httpGet:
path: /api/healthz
port: http
initialDelaySeconds: 13371
timeoutSeconds: 13372
periodSeconds: 13373
successThreshold: 13374
failureThreshold: 13375
asserts:
- notExists:
path: spec.template.spec.containers[0].startupProbe.tcpSocket
- isSubset:
path: spec.template.spec.containers[0].startupProbe
content:
failureThreshold: 13375
initialDelaySeconds: 13371
periodSeconds: 13373
successThreshold: 13374
httpGet:
path: /api/healthz
port: http
timeoutSeconds: 13372

View File

@ -1,21 +0,0 @@
suite: sidecar container
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/deployment.yaml
- templates/gitea/config.yaml
tests:
- it: supports adding a sidecar container
template: templates/gitea/deployment.yaml
set:
extraContainers:
- name: sidecar-bob
image: busybox
asserts:
- equal:
path: spec.template.spec.containers[1].name
value: "sidecar-bob"
- equal:
path: spec.template.spec.containers[1].image
value: "busybox"

View File

@ -1,39 +0,0 @@
# File: tests/gitea-storageclass-tests.yaml
suite: storage class configuration tests
release:
name: gitea-storageclass-tests
namespace: testing
templates:
- templates/gitea/pvc.yaml
tests:
- it: should set storageClassName when persistence.storageClass is defined
template: templates/gitea/pvc.yaml
set:
persistence.storageClass: "my-storage-class"
asserts:
- equal:
path: "spec.storageClassName"
value: "my-storage-class"
- it: should set global.storageClass when persistence.storageClass is not defined
template: templates/gitea/pvc.yaml
set:
global.storageClass: "default-storage-class"
asserts:
- equal:
path: spec.storageClassName
value: "default-storage-class"
- it: should set storageClassName when persistence.storageClass is defined and global.storageClass is defined
template: templates/gitea/pvc.yaml
set:
global.storageClass: "default-storage-class"
persistence.storageClass: "my-storage-class"
asserts:
- equal:
path: spec.storageClassName
value: "my-storage-class"

View File

@ -1,118 +0,0 @@
suite: ssh-svc / http-svc template (Services configuration)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/ssh-svc.yaml
- templates/gitea/http-svc.yaml
tests:
- it: supports adding custom labels to ssh-svc
template: templates/gitea/ssh-svc.yaml
set:
service:
ssh:
labels:
gitea/testkey: testvalue
asserts:
- equal:
path: metadata.labels["gitea/testkey"]
value: "testvalue"
- it: keeps existing labels (ssh)
template: templates/gitea/ssh-svc.yaml
set:
service:
ssh:
labels: {}
asserts:
- exists:
path: metadata.labels["app"]
- it: supports adding custom labels to http-svc
template: templates/gitea/http-svc.yaml
set:
service:
http:
labels:
gitea/testkey: testvalue
asserts:
- equal:
path: metadata.labels["gitea/testkey"]
value: "testvalue"
- it: keeps existing labels (http)
template: templates/gitea/http-svc.yaml
set:
service:
http:
labels: {}
asserts:
- exists:
path: metadata.labels["app"]
- it: render service.ssh.loadBalancerClass if set and type is LoadBalancer
template: templates/gitea/ssh-svc.yaml
set:
service:
ssh:
loadBalancerClass: "example.com/class"
type: LoadBalancer
loadBalancerIP: "1.2.3.4"
loadBalancerSourceRanges:
- "1.2.3.4/32"
- "5.6.7.8/32"
asserts:
- equal:
path: spec.loadBalancerClass
value: "example.com/class"
- equal:
path: spec.loadBalancerIP
value: "1.2.3.4"
- equal:
path: spec.loadBalancerSourceRanges
value: ["1.2.3.4/32", "5.6.7.8/32"]
- it: does not render when loadbalancer properties are set but type is not loadBalancerClass
template: templates/gitea/http-svc.yaml
set:
service:
http:
type: ClusterIP
loadBalancerClass: "example.com/class"
loadBalancerIP: "1.2.3.4"
loadBalancerSourceRanges:
- "1.2.3.4/32"
- "5.6.7.8/32"
asserts:
- notExists:
path: spec.loadBalancerClass
- notExists:
path: spec.loadBalancerIP
- notExists:
path: spec.loadBalancerSourceRanges
- it: does not render loadBalancerClass by default even when type is LoadBalancer
template: templates/gitea/http-svc.yaml
set:
service:
http:
type: LoadBalancer
loadBalancerIP: "1.2.3.4"
asserts:
- notExists:
path: spec.loadBalancerClass
- equal:
path: spec.loadBalancerIP
value: "1.2.3.4"
- it: both ssh and http services exist
templates:
- templates/gitea/ssh-svc.yaml
- templates/gitea/http-svc.yaml
asserts:
- matchRegex:
path: metadata.name
pattern: "^gitea-unittests-(?:ssh|http)$"
- matchRegex:
path: spec.ports[0].name
pattern: "^(?:ssh|http)$"

View File

@ -1,93 +0,0 @@
suite: Test ingress.yaml
templates:
- templates/gitea/ingress.yaml
tests:
- it: should enable ingress when ingress.enabled is true
set:
ingress.enabled: true
ingress.apiVersion: networking.k8s.io/v1
ingress.annotations:
kubernetes.io/ingress.class: nginx
ingress.className: nginx
ingress.tls:
- hosts:
- example.com
secretName: tls-secret
ingress.hosts:
- host: example.com
paths: ["/"]
asserts:
- hasDocuments:
count: 1
- isKind:
of: Ingress
- equal:
path: metadata.name
value: RELEASE-NAME-gitea
- matchRegex:
path: apiVersion
pattern: networking.k8s.io/v1
- equal:
path: spec.ingressClassName
value: nginx
- equal:
path: spec.rules[0].host
value: "example.com"
- equal:
path: spec.tls[0].hosts[0]
value: "example.com"
- equal:
path: spec.tls[0].secretName
value: tls-secret
- equal:
path: metadata.annotations["kubernetes.io/ingress.class"]
value: nginx
- it: should not create ingress when ingress.enabled is false
set:
ingress.enabled: false
asserts:
- hasDocuments:
count: 0
- it: Ingress Class using TPL
set:
global.ingress.className: "ingress-class"
ingress.className: "{{ .Values.global.ingress.className }}"
ingress.enabled: true
ingress.hosts[0].host: "some-host"
ingress.tls:
- secretName: gitea-tls
hosts:
- "some-host"
asserts:
- isKind:
of: Ingress
- equal:
path: spec.tls[0].hosts[0]
value: "some-host"
- equal:
path: spec.rules[0].host
value: "some-host"
- equal:
path: spec.ingressClassName
value: "ingress-class"
- it: hostname using TPL
set:
global.giteaHostName: "gitea.example.com"
ingress.enabled: true
ingress.hosts[0].host: "{{ .Values.global.giteaHostName }}"
ingress.tls:
- secretName: gitea-tls
hosts:
- "{{ .Values.global.giteaHostName }}"
asserts:
- isKind:
of: Ingress
- equal:
path: spec.tls[0].hosts[0]
value: "gitea.example.com"
- equal:
path: spec.rules[0].host
value: "gitea.example.com"

View File

@ -1,23 +0,0 @@
suite: Test ingress with implicit path defaults
templates:
- templates/gitea/ingress.yaml
tests:
- it: should use default path and pathType when no paths are specified
set:
ingress.enabled: true
ingress.hosts:
- host: git.example.com
asserts:
- hasDocuments:
count: 1
- isKind:
of: Ingress
- equal:
path: spec.rules[0].host
value: "git.example.com"
- equal:
path: spec.rules[0].http.paths[0].path
value: "/"
- equal:
path: spec.rules[0].http.paths[0].pathType
value: "Prefix"

View File

@ -1,45 +0,0 @@
suite: Test ingress tpl use
templates:
- templates/gitea/ingress.yaml
tests:
- it: Ingress Class using TPL
set:
global.ingress.className: "ingress-class"
ingress.className: "{{ .Values.global.ingress.className }}"
ingress.enabled: true
ingress.hosts[0].host: "some-host"
ingress.tls:
- secretName: gitea-tls
hosts:
- "some-host"
asserts:
- isKind:
of: Ingress
- equal:
path: spec.tls[0].hosts[0]
value: "some-host"
- equal:
path: spec.rules[0].host
value: "some-host"
- equal:
path: spec.ingressClassName
value: "ingress-class"
- it: hostname using TPL
set:
global.giteaHostName: "gitea.example.com"
ingress.enabled: true
ingress.hosts[0].host: "{{ .Values.global.giteaHostName }}"
ingress.tls:
- secretName: gitea-tls
hosts:
- "{{ .Values.global.giteaHostName }}"
asserts:
- isKind:
of: Ingress
- equal:
path: spec.tls[0].hosts[0]
value: "gitea.example.com"
- equal:
path: spec.rules[0].host
value: "gitea.example.com"

View File

@ -1,26 +0,0 @@
suite: Test ingress with structured paths
templates:
- templates/gitea/ingress.yaml
tests:
- it: should work with structured path definitions
set:
ingress.enabled: true
ingress.hosts:
- host: git.devxy.io
paths:
- path: /
pathType: Prefix
asserts:
- hasDocuments:
count: 1
- isKind:
of: Ingress
- equal:
path: spec.rules[0].host
value: "git.devxy.io"
- equal:
path: spec.rules[0].http.paths[0].path
value: "/"
- equal:
path: spec.rules[0].http.paths[0].pathType
value: "Prefix"

View File

@ -1,23 +0,0 @@
suite: Metrics secret template (monitoring disabled)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/metrics-secret.yaml
tests:
- it: renders nothing if monitoring disabled and gitea.metrics.token empty
set:
gitea.metrics.enabled: false
gitea.metrics.serviceMonitor.enabled: false
gitea.metrics.token: ""
asserts:
- hasDocuments:
count: 0
- it: renders nothing if monitoring disabled and gitea.metrics.token not empty
set:
gitea.metrics.enabled: false
gitea.metrics.serviceMonitor.enabled: false
gitea.metrics.token: "test-token"
asserts:
- hasDocuments:
count: 0

View File

@ -1,33 +0,0 @@
suite: Metrics secret template (monitoring enabled)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/metrics-secret.yaml
tests:
- it: renders nothing if monitoring enabled and gitea.metrics.token empty
set:
gitea.metrics.enabled: true
gitea.metrics.serviceMonitor.enabled: true
gitea.metrics.token: ""
asserts:
- hasDocuments:
count: 0
- it: renders Secret if monitoring enabled and gitea.metrics.token not empty
set:
gitea.metrics.enabled: true
gitea.metrics.serviceMonitor.enabled: true
gitea.metrics.token: "test-token"
asserts:
- hasDocuments:
count: 1
- documentIndex: 0
containsDocument:
kind: Secret
apiVersion: v1
name: gitea-unittests-metrics-secret
- isNotNullOrEmpty:
path: metadata.labels
- equal:
path: data.token
value: "dGVzdC10b2tlbg=="

View File

@ -1,19 +0,0 @@
suite: PVC template
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/pvc.yaml
tests:
- it: Storage Class using TPL
set:
global.persistence.storageClass: "storage-class"
persistence.enabled: true
persistence.create: true
persistence.storageClass: "{{ .Values.global.persistence.storageClass }}"
asserts:
- isKind:
of: PersistentVolumeClaim
- equal:
path: spec.storageClassName
value: "storage-class"

View File

@ -1,89 +0,0 @@
suite: ServiceMonitor template (basic)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/servicemonitor.yaml
tests:
- it: skips rendering by default
asserts:
- hasDocuments:
count: 0
- it: renders default ServiceMonitor object with gitea.metrics.enabled=true
set:
gitea.metrics.enabled: true
asserts:
- hasDocuments:
count: 0
- it: renders default ServiceMonitor object with gitea.metrics.serviceMonitor.enabled=true
set:
gitea.metrics.serviceMonitor.enabled: true
asserts:
- hasDocuments:
count: 0
- it: renders defaults
set:
gitea.metrics.enabled: true
gitea.metrics.serviceMonitor.enabled: true
asserts:
- hasDocuments:
count: 1
- containsDocument:
kind: ServiceMonitor
apiVersion: monitoring.coreos.com/v1
name: gitea-unittests
- notExists:
path: metadata.annotations
- notExists:
path: spec.endpoints[0].interval
- equal:
path: spec.endpoints[0].port
value: http
- notExists:
path: spec.endpoints[0].scheme
- notExists:
path: spec.endpoints[0].scrapeTimeout
- notExists:
path: spec.endpoints[0].tlsConfig
- it: renders custom scrape interval
set:
gitea.metrics.enabled: true
gitea.metrics.serviceMonitor.enabled: true
gitea.metrics.serviceMonitor.interval: 30s
gitea.metrics.serviceMonitor.scrapeTimeout: 5s
asserts:
- equal:
path: spec.endpoints[0].interval
value: 30s
- equal:
path: spec.endpoints[0].scrapeTimeout
value: 5s
- it: renders custom tls config
set:
gitea.metrics.enabled: true
gitea.metrics.serviceMonitor.enabled: true
gitea.metrics.serviceMonitor.scheme: https
gitea.metrics.serviceMonitor.tlsConfig.caFile: /etc/prometheus/tls/ca.crt
gitea.metrics.serviceMonitor.tlsConfig.certFile: /etc/prometheus/tls/tls.crt
gitea.metrics.serviceMonitor.tlsConfig.keyFile: /etc/prometheus/tls/tls.key
gitea.metrics.serviceMonitor.tlsConfig.insecureSkipVerify: false
gitea.metrics.serviceMonitor.tlsConfig.serverName: gitea-unittest
asserts:
- equal:
path: spec.endpoints[0].scheme
value: https
- equal:
path: spec.endpoints[0].tlsConfig.caFile
value: /etc/prometheus/tls/ca.crt
- equal:
path: spec.endpoints[0].tlsConfig.certFile
value: /etc/prometheus/tls/tls.crt
- equal:
path: spec.endpoints[0].tlsConfig.keyFile
value: /etc/prometheus/tls/tls.key
- equal:
path: spec.endpoints[0].tlsConfig.insecureSkipVerify
value: false
- equal:
path: spec.endpoints[0].tlsConfig.serverName
value: gitea-unittest

View File

@ -1,23 +0,0 @@
suite: ServiceMonitor template (monitoring disabled)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/servicemonitor.yaml
tests:
- it: renders nothing if gitea.metrics.serviceMonitor disabled and gitea.metrics.token empty
set:
gitea.metrics.enabled: false
gitea.metrics.token: ""
gitea.metrics.serviceMonitor.enabled: false
asserts:
- hasDocuments:
count: 0
- it: renders nothing if gitea.metrics.serviceMonitor disabled and gitea.metrics.token not empty
set:
gitea.metrics.enabled: false
gitea.metrics.token: "test-token"
gitea.metrics.serviceMonitor.enabled: false
asserts:
- hasDocuments:
count: 0

View File

@ -1,70 +0,0 @@
suite: ServiceMonitor template (monitoring enabled)
release:
name: gitea-unittests
namespace: testing
templates:
- templates/gitea/servicemonitor.yaml
tests:
- it: renders unsecure ServiceMonitor if gitea.metrics.token nil
set:
gitea.metrics.enabled: true
gitea.metrics.token:
gitea.metrics.serviceMonitor.enabled: true
asserts:
- hasDocuments:
count: 1
- documentIndex: 0
containsDocument:
kind: ServiceMonitor
apiVersion: monitoring.coreos.com/v1
name: gitea-unittests
- isNotNullOrEmpty:
path: metadata.labels
- equal:
path: spec.endpoints
value:
- port: http
- it: renders unsecure ServiceMonitor if gitea.metrics.token empty
set:
gitea.metrics.enabled: true
gitea.metrics.token: ""
gitea.metrics.serviceMonitor.enabled: true
asserts:
- hasDocuments:
count: 1
- documentIndex: 0
containsDocument:
kind: ServiceMonitor
apiVersion: monitoring.coreos.com/v1
name: gitea-unittests
- isNotNullOrEmpty:
path: metadata.labels
- equal:
path: spec.endpoints
value:
- port: http
- it: renders secure ServiceMonitor if gitea.metrics.token not empty
set:
gitea.metrics.enabled: true
gitea.metrics.token: "test-token"
gitea.metrics.serviceMonitor.enabled: true
asserts:
- hasDocuments:
count: 1
- documentIndex: 0
containsDocument:
kind: ServiceMonitor
apiVersion: monitoring.coreos.com/v1
name: gitea-unittests
- isNotNullOrEmpty:
path: metadata.labels
- equal:
path: spec.endpoints
value:
- port: http
authorization:
type: Bearer
credentials:
name: gitea-unittests-metrics-secret
key: token
optional: false

View File

@ -1,14 +0,0 @@
suite: Values conflicting checks
release:
name: gitea-unittests
namespace: testing
tests:
- it: fails when trying to configure valkey and valkey-cluster the same time
set:
valkey-cluster:
enabled: true
valkey:
enabled: true
asserts:
- failedTemplate:
errorMessage: valkey and valkey-cluster cannot be enabled at the same time. Please only choose one.

View File

@ -15,11 +15,11 @@ tests:
asserts: asserts:
- equal: - equal:
path: stringData["configure_gpg_environment.sh"] path: stringData["configure_gpg_environment.sh"]
value: | value: |-
#!/usr/bin/env bash #!/usr/bin/env bash
set -eu set -eu
gpg --batch --import "$TMP_RAW_GPG_KEY" gpg --batch --import /raw/private.asc
- it: skips gpg script block for disabled signing - it: skips gpg script block for disabled signing
asserts: asserts:
- equal: - equal:
@ -28,13 +28,15 @@ tests:
#!/usr/bin/env bash #!/usr/bin/env bash
set -euo pipefail set -euo pipefail
mkdir -pv /data/git/.ssh
chmod -Rv 700 /data/git/.ssh set -x
[ ! -d /data/gitea/conf ] && mkdir -pv /data/gitea/conf mkdir -p /data/git/.ssh
chmod -R 700 /data/git/.ssh
[ ! -d /data/gitea/conf ] && mkdir -p /data/gitea/conf
# prepare temp directory structure # prepare temp directory structure
mkdir -pv "${GITEA_TEMP}" mkdir -p "${GITEA_TEMP}"
chmod -v ug+rwx "${GITEA_TEMP}" chmod ug+rwx "${GITEA_TEMP}"
- it: adds gpg script block for enabled signing - it: adds gpg script block for enabled signing
set: set:
signing.enabled: true signing.enabled: true
@ -49,23 +51,25 @@ tests:
#!/usr/bin/env bash #!/usr/bin/env bash
set -euo pipefail set -euo pipefail
mkdir -pv /data/git/.ssh
chmod -Rv 700 /data/git/.ssh set -x
[ ! -d /data/gitea/conf ] && mkdir -pv /data/gitea/conf mkdir -p /data/git/.ssh
chmod -R 700 /data/git/.ssh
[ ! -d /data/gitea/conf ] && mkdir -p /data/gitea/conf
# prepare temp directory structure # prepare temp directory structure
mkdir -pv "${GITEA_TEMP}" mkdir -p "${GITEA_TEMP}"
chmod -v ug+rwx "${GITEA_TEMP}" chmod ug+rwx "${GITEA_TEMP}"
if [ ! -d "${GNUPGHOME}" ]; then if [ ! -d "${GNUPGHOME}" ]; then
mkdir -pv "${GNUPGHOME}" mkdir -p "${GNUPGHOME}"
chmod -v 700 "${GNUPGHOME}" chmod 700 "${GNUPGHOME}"
chown -v 1000:1000 "${GNUPGHOME}" chown 1000:1000 "${GNUPGHOME}"
fi fi
- it: it does not chown /data even when image.fullOverride is set - it: it does not chown /data even when image.fullOverride is set
template: templates/gitea/init.yaml template: templates/gitea/init.yaml
set: set:
image.fullOverride: docker.gitea.com/gitea:1.20.5 image.fullOverride: gitea/gitea:1.20.5
asserts: asserts:
- equal: - equal:
path: stringData["init_directory_structure.sh"] path: stringData["init_directory_structure.sh"]
@ -73,10 +77,12 @@ tests:
#!/usr/bin/env bash #!/usr/bin/env bash
set -euo pipefail set -euo pipefail
mkdir -pv /data/git/.ssh
chmod -Rv 700 /data/git/.ssh set -x
[ ! -d /data/gitea/conf ] && mkdir -pv /data/gitea/conf mkdir -p /data/git/.ssh
chmod -R 700 /data/git/.ssh
[ ! -d /data/gitea/conf ] && mkdir -p /data/gitea/conf
# prepare temp directory structure # prepare temp directory structure
mkdir -pv "${GITEA_TEMP}" mkdir -p "${GITEA_TEMP}"
chmod -v ug+rwx "${GITEA_TEMP}" chmod ug+rwx "${GITEA_TEMP}"

View File

@ -16,11 +16,11 @@ tests:
asserts: asserts:
- equal: - equal:
path: stringData["configure_gpg_environment.sh"] path: stringData["configure_gpg_environment.sh"]
value: | value: |-
#!/usr/bin/env bash #!/usr/bin/env bash
set -eu set -eu
gpg --batch --import "$TMP_RAW_GPG_KEY" gpg --batch --import /raw/private.asc
- it: skips gpg script block for disabled signing - it: skips gpg script block for disabled signing
set: set:
image.rootless: false image.rootless: false
@ -31,15 +31,17 @@ tests:
#!/usr/bin/env bash #!/usr/bin/env bash
set -euo pipefail set -euo pipefail
chown -v 1000:1000 /data
mkdir -pv /data/git/.ssh set -x
chmod -Rv 700 /data/git/.ssh chown 1000:1000 /data
[ ! -d /data/gitea/conf ] && mkdir -pv /data/gitea/conf mkdir -p /data/git/.ssh
chmod -R 700 /data/git/.ssh
[ ! -d /data/gitea/conf ] && mkdir -p /data/gitea/conf
# prepare temp directory structure # prepare temp directory structure
mkdir -pv "${GITEA_TEMP}" mkdir -p "${GITEA_TEMP}"
chown -v 1000:1000 "${GITEA_TEMP}" chown 1000:1000 "${GITEA_TEMP}"
chmod -v ug+rwx "${GITEA_TEMP}" chmod ug+rwx "${GITEA_TEMP}"
- it: adds gpg script block for enabled signing - it: adds gpg script block for enabled signing
set: set:
image.rootless: false image.rootless: false
@ -55,18 +57,20 @@ tests:
#!/usr/bin/env bash #!/usr/bin/env bash
set -euo pipefail set -euo pipefail
chown -v 1000:1000 /data
mkdir -pv /data/git/.ssh set -x
chmod -Rv 700 /data/git/.ssh chown 1000:1000 /data
[ ! -d /data/gitea/conf ] && mkdir -pv /data/gitea/conf mkdir -p /data/git/.ssh
chmod -R 700 /data/git/.ssh
[ ! -d /data/gitea/conf ] && mkdir -p /data/gitea/conf
# prepare temp directory structure # prepare temp directory structure
mkdir -pv "${GITEA_TEMP}" mkdir -p "${GITEA_TEMP}"
chown -v 1000:1000 "${GITEA_TEMP}" chown 1000:1000 "${GITEA_TEMP}"
chmod -v ug+rwx "${GITEA_TEMP}" chmod ug+rwx "${GITEA_TEMP}"
if [ ! -d "${GNUPGHOME}" ]; then if [ ! -d "${GNUPGHOME}" ]; then
mkdir -pv "${GNUPGHOME}" mkdir -p "${GNUPGHOME}"
chmod -v 700 "${GNUPGHOME}" chmod 700 "${GNUPGHOME}"
chown -v 1000:1000 "${GNUPGHOME}" chown 1000:1000 "${GNUPGHOME}"
fi fi

View File

@ -20,9 +20,6 @@ global:
# hostnames: # hostnames:
# - example.com # - example.com
## @param namespace An explicit namespace to deploy gitea into. Defaults to the release namespace if not specified
namespace: ""
## @param replicaCount number of replicas for the deployment ## @param replicaCount number of replicas for the deployment
replicaCount: 1 replicaCount: 1
@ -48,8 +45,8 @@ clusterDomain: cluster.local
## @param image.rootless Wether or not to pull the rootless version of Gitea, only works on Gitea 1.14.x or higher ## @param image.rootless Wether or not to pull the rootless version of Gitea, only works on Gitea 1.14.x or higher
## @param image.fullOverride Completely overrides the image registry, path/image, tag and digest. **Adjust `image.rootless` accordingly and review [Rootless defaults](#rootless-defaults).** ## @param image.fullOverride Completely overrides the image registry, path/image, tag and digest. **Adjust `image.rootless` accordingly and review [Rootless defaults](#rootless-defaults).**
image: image:
registry: "docker.gitea.com" registry: ""
repository: gitea repository: gitea/gitea
# Overrides the image tag whose default is the chart appVersion. # Overrides the image tag whose default is the chart appVersion.
tag: "" tag: ""
digest: "" digest: ""
@ -76,7 +73,7 @@ containerSecurityContext: {}
# # run pods on nodes that use the container runtime cri-o. Otherwise, you will # # run pods on nodes that use the container runtime cri-o. Otherwise, you will
# # get an error message from the SSH server that it is not possible to read from # # get an error message from the SSH server that it is not possible to read from
# # the repository. # # the repository.
# # https://gitea.com/gitea/helm-gitea/issues/161 # # https://gitea.com/gitea/helm-chart/issues/161
# add: # add:
# - SYS_CHROOT # - SYS_CHROOT
# privileged: false # privileged: false
@ -109,8 +106,6 @@ service:
## @param service.http.ipFamilies HTTP service dual-stack familiy selection,for dual-stack parameters see official kubernetes [dual-stack concept documentation](https://kubernetes.io/docs/concepts/services-networking/dual-stack/). ## @param service.http.ipFamilies HTTP service dual-stack familiy selection,for dual-stack parameters see official kubernetes [dual-stack concept documentation](https://kubernetes.io/docs/concepts/services-networking/dual-stack/).
## @param service.http.loadBalancerSourceRanges Source range filter for http loadbalancer ## @param service.http.loadBalancerSourceRanges Source range filter for http loadbalancer
## @param service.http.annotations HTTP service annotations ## @param service.http.annotations HTTP service annotations
## @param service.http.labels HTTP service additional labels
## @param service.http.loadBalancerClass Loadbalancer class
http: http:
type: ClusterIP type: ClusterIP
port: 3000 port: 3000
@ -123,8 +118,6 @@ service:
ipFamilies: ipFamilies:
loadBalancerSourceRanges: [] loadBalancerSourceRanges: []
annotations: {} annotations: {}
labels: {}
loadBalancerClass:
## @param service.ssh.type Kubernetes service type for ssh traffic ## @param service.ssh.type Kubernetes service type for ssh traffic
## @param service.ssh.port Port number for ssh traffic ## @param service.ssh.port Port number for ssh traffic
## @param service.ssh.clusterIP ClusterIP setting for ssh autosetup for deployment is None ## @param service.ssh.clusterIP ClusterIP setting for ssh autosetup for deployment is None
@ -137,8 +130,6 @@ service:
## @param service.ssh.hostPort HostPort for ssh service ## @param service.ssh.hostPort HostPort for ssh service
## @param service.ssh.loadBalancerSourceRanges Source range filter for ssh loadbalancer ## @param service.ssh.loadBalancerSourceRanges Source range filter for ssh loadbalancer
## @param service.ssh.annotations SSH service annotations ## @param service.ssh.annotations SSH service annotations
## @param service.ssh.labels SSH service additional labels
## @param service.ssh.loadBalancerClass Loadbalancer class
ssh: ssh:
type: ClusterIP type: ClusterIP
port: 22 port: 22
@ -152,30 +143,36 @@ service:
hostPort: hostPort:
loadBalancerSourceRanges: [] loadBalancerSourceRanges: []
annotations: {} annotations: {}
labels: {}
loadBalancerClass:
## @section Ingress ## @section Ingress
## @param ingress.enabled Enable ingress ## @param ingress.enabled Enable ingress
## @param ingress.className DEPRECATED: Ingress class name. ## @param ingress.className Ingress class name
## @param ingress.pathType Ingress Path Type
## @param ingress.annotations Ingress annotations ## @param ingress.annotations Ingress annotations
## @param ingress.hosts[0].host Default Ingress host ## @param ingress.hosts[0].host Default Ingress host
## @param ingress.hosts[0].paths[0].path Default Ingress path ## @param ingress.hosts[0].paths[0].path Default Ingress path
## @param ingress.hosts[0].paths[0].pathType Ingress path type
## @param ingress.tls Ingress tls settings ## @param ingress.tls Ingress tls settings
## @extra ingress.apiVersion Specify APIVersion of ingress object. Mostly would only be used for argocd.
ingress: ingress:
enabled: false enabled: false
className: "" # className: nginx
pathType: Prefix className:
annotations: {} annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts: hosts:
- host: git.example.com - host: git.example.com
paths: paths:
- path: / - path: /
pathType: Prefix
tls: [] tls: []
# - secretName: chart-example-tls # - secretName: chart-example-tls
# hosts: # hosts:
# - git.example.com # - git.example.com
# Mostly for argocd or any other CI that uses `helm template | kubectl apply` or similar
# If helm doesn't correctly detect your ingress API version you can set it here.
# apiVersion: networking.k8s.io/v1
## @section deployment ## @section deployment
# #
@ -275,12 +272,6 @@ persistence:
annotations: annotations:
helm.sh/resource-policy: keep helm.sh/resource-policy: keep
## @param extraContainers Additional sidecar containers to run in the pod
extraContainers: []
# - name: sidecar-bob
# image: busybox
# command: [/bin/sh, -c, 'echo "Hello world"; sleep 86400']
## @param extraVolumes Additional volumes to mount to the Gitea deployment ## @param extraVolumes Additional volumes to mount to the Gitea deployment
extraVolumes: [] extraVolumes: []
# - name: postgres-ssl-vol # - name: postgres-ssl-vol
@ -306,8 +297,6 @@ extraVolumeMounts: []
## @section Init ## @section Init
## @param initPreScript Bash shell script copied verbatim to the start of the init-container. ## @param initPreScript Bash shell script copied verbatim to the start of the init-container.
initPreScript: "" initPreScript: ""
## @param initContainersScriptsVolumeMountPath Path to mount the scripts consumed from the Secrets
initContainersScriptsVolumeMountPath: "/usr/sbinx"
# #
# initPreScript: | # initPreScript: |
# mkdir -p /data/git/.postgresql # mkdir -p /data/git/.postgresql
@ -330,7 +319,7 @@ initContainers:
# #
## @param signing.enabled Enable commit/action signing ## @param signing.enabled Enable commit/action signing
## @param signing.gpgHome GPG home directory ## @param signing.gpgHome GPG home directory
## @param signing.privateKey Inline private gpg key for signed internal Git activity ## @param signing.privateKey Inline private gpg key for signed Gitea actions
## @param signing.existingSecret Use an existing secret to store the value of `signing.privateKey` ## @param signing.existingSecret Use an existing secret to store the value of `signing.privateKey`
signing: signing:
enabled: false enabled: false
@ -349,35 +338,21 @@ gitea:
## @param gitea.admin.existingSecret Use an existing secret to store admin user credentials ## @param gitea.admin.existingSecret Use an existing secret to store admin user credentials
## @param gitea.admin.password Password for the Gitea admin user ## @param gitea.admin.password Password for the Gitea admin user
## @param gitea.admin.email Email for the Gitea admin user ## @param gitea.admin.email Email for the Gitea admin user
## @param gitea.admin.passwordMode Mode for how to set/update the admin user password. Options are: initialOnlyNoReset, initialOnlyRequireReset, and keepUpdated
admin: admin:
# existingSecret: gitea-admin-secret # existingSecret: gitea-admin-secret
existingSecret: existingSecret:
username: gitea_admin username: gitea_admin
password: r8sA8CPHD9!bt6d password: r8sA8CPHD9!bt6d
email: "gitea@local.domain" email: "gitea@local.domain"
passwordMode: keepUpdated
## @param gitea.metrics.enabled Enable Gitea metrics ## @param gitea.metrics.enabled Enable Gitea metrics
## @param gitea.metrics.token used for `bearer` token authentication on metrics endpoint. If not specified or empty metrics endpoint is public. ## @param gitea.metrics.serviceMonitor.enabled Enable Gitea metrics service monitor
## @param gitea.metrics.serviceMonitor.enabled Enable Gitea metrics service monitor. Requires, that `gitea.metrics.enabled` is also set to true, to enable metrics generally.
## @param gitea.metrics.serviceMonitor.interval Interval at which metrics should be scraped. If not specified Prometheus' global scrape interval is used.
## @param gitea.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping.
## @param gitea.metrics.serviceMonitor.scheme HTTP scheme to use for scraping. For example `http` or `https`. Default is http.
## @param gitea.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended. If not specified, global Prometheus scrape timeout is used.
## @param gitea.metrics.serviceMonitor.tlsConfig TLS configuration to use when scraping the metric endpoint by Prometheus.
metrics: metrics:
enabled: false enabled: false
token:
serviceMonitor: serviceMonitor:
enabled: false enabled: false
# additionalLabels: # additionalLabels:
# prometheus-release: prom1 # prometheus-release: prom1
interval: ""
relabelings: []
scheme: ""
scrapeTimeout: ""
tlsConfig: {}
## @param gitea.ldap LDAP configuration ## @param gitea.ldap LDAP configuration
ldap: ldap:
@ -501,55 +476,21 @@ gitea:
successThreshold: 1 successThreshold: 1
failureThreshold: 10 failureThreshold: 10
## @section valkey-cluster ## @section redis-cluster
## @param valkey-cluster.enabled Enable valkey cluster ## @param redis-cluster.enabled Enable redis
# ⚠️ The valkey charts do not work well with special characters in the password (<https://gitea.com/gitea/helm-chart/issues/690>). ## @param redis-cluster.usePassword Whether to use password authentication
# Consider omitting such or open an issue in the Bitnami repo and let us know once this got fixed. ## @param redis-cluster.cluster.nodes Number of redis cluster master nodes
## @param valkey-cluster.usePassword Whether to use password authentication ## @param redis-cluster.cluster.replicas Number of redis cluster master node replicas
## @param valkey-cluster.usePasswordFiles Whether to mount passwords as files instead of environment variables redis-cluster:
## @param valkey-cluster.cluster.nodes Number of valkey cluster master nodes
## @param valkey-cluster.cluster.replicas Number of valkey cluster master node replicas
## @param valkey-cluster.service.ports.valkey Port of Valkey service
## @descriptionStart
## Valkey cluster and [Valkey](#valkey) cannot be enabled at the same time.
## @descriptionEnd
valkey-cluster:
enabled: true enabled: true
usePassword: false usePassword: false
usePasswordFiles: false
cluster: cluster:
nodes: 3 # default: 6 nodes: 3 # default: 6
replicas: 0 # default: 1 replicas: 0 # default: 1
service:
ports:
valkey: 6379
## @section valkey ## @section postgresql-ha
## @param valkey.enabled Enable valkey standalone or replicated
## @param valkey.architecture Whether to use standalone or replication
# ⚠️ The valkey charts do not work well with special characters in the password (<https://gitea.com/gitea/helm-chart/issues/690>).
# Consider omitting such or open an issue in the Bitnami repo and let us know once this got fixed.
## @param valkey.global.valkey.password Required password
## @param valkey.master.count Number of Valkey master instances to deploy
## @param valkey.master.service.ports.valkey Port of Valkey service
## @descriptionStart
## Valkey and [Valkey cluster](#valkey-cluster) cannot be enabled at the same time.
## @descriptionEnd
valkey:
enabled: false
architecture: standalone
global:
valkey:
password: changeme
master:
count: 1
service:
ports:
valkey: 6379
## @section PostgreSQL HA
# #
## @param postgresql-ha.enabled Enable PostgreSQL HA ## @param postgresql-ha.enabled Enable postgresql-ha
## @param postgresql-ha.postgresql.password Password for the `gitea` user (overrides `auth.password`) ## @param postgresql-ha.postgresql.password Password for the `gitea` user (overrides `auth.password`)
## @param postgresql-ha.global.postgresql.database Name for a custom database to create (overrides `auth.database`) ## @param postgresql-ha.global.postgresql.database Name for a custom database to create (overrides `auth.database`)
## @param postgresql-ha.global.postgresql.username Name for a custom user to create (overrides `auth.username`) ## @param postgresql-ha.global.postgresql.username Name for a custom user to create (overrides `auth.username`)
@ -557,8 +498,8 @@ valkey:
## @param postgresql-ha.postgresql.repmgrPassword Repmgr Password ## @param postgresql-ha.postgresql.repmgrPassword Repmgr Password
## @param postgresql-ha.postgresql.postgresPassword postgres Password ## @param postgresql-ha.postgresql.postgresPassword postgres Password
## @param postgresql-ha.pgpool.adminPassword pgpool adminPassword ## @param postgresql-ha.pgpool.adminPassword pgpool adminPassword
## @param postgresql-ha.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) ## @param postgresql-ha.service.ports.postgresql postgresql service port (overrides `service.ports.postgresql`)
## @param postgresql-ha.persistence.size PVC Storage Request for PostgreSQL HA volume ## @param postgresql-ha.primary.persistence.size PVC Storage Request for postgresql-ha volume
postgresql-ha: postgresql-ha:
global: global:
postgresql: postgresql:
@ -575,8 +516,9 @@ postgresql-ha:
service: service:
ports: ports:
postgresql: 5432 postgresql: 5432
persistence: primary:
size: 10Gi persistence:
size: 10Gi
## @section PostgreSQL ## @section PostgreSQL
# #