commit c975cd40eb9c236b43da86bb12dc9a64540e628b Author: Markus Pesch Date: Wed Jul 19 21:44:02 2023 +0200 Initial Commit diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..86f582a --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +dcmerge \ No newline at end of file diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 0000000..102b180 --- /dev/null +++ b/.drone.yml @@ -0,0 +1,862 @@ +--- +kind: pipeline +type: kubernetes +name: linter + +clone: + disable: true + +platform: + os: linux + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: markdown lint + commands: + - markdownlint *.md + image: git.cryptic.systems/volker.raschek/markdownlint:0.35.0 + resources: + limits: + cpu: 150 + memory: 150M + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + resources: + limits: + cpu: 150 + memory: 150M + when: + status: + - changed + - failure + +trigger: + event: + exclude: + - tag + +--- +kind: pipeline +type: docker +name: dry-run-amd64 + +clone: + disable: true + +depends_on: +- linter + +platform: + os: linux + arch: amd64 + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: build + image: docker.io/plugins/docker:20.10.9 + settings: + auto_tag: false + dockerfile: Dockerfile + dry_run: true + force_tag: true + no_cache: true + purge: true + mirror: + from_secret: docker_io_mirror + registry: git.cryptic.systems + repo: git.cryptic.systems/volker.raschek/dcmerge + tags: latest-amd64 + username: + from_secret: git_cryptic_systems_container_registry_user + password: + from_secret: git_cryptic_systems_container_registry_password + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + when: + status: + - changed + - failure + +trigger: + branch: + exclude: + - master + event: + - pull_request + - push + repo: + - volker.raschek/dcmerge + +--- +kind: pipeline +type: docker +name: dry-run-arm-v7 + +clone: + disable: true + +depends_on: +- linter + +platform: + os: linux + arch: arm + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: build + image: docker.io/plugins/docker:20.10.9 + settings: + auto_tag: false + dockerfile: Dockerfile + dry_run: true + force_tag: true + no_cache: true + purge: true + mirror: + from_secret: docker_io_mirror + registry: git.cryptic.systems + repo: git.cryptic.systems/volker.raschek/dcmerge + tags: latest-arm-v7 + username: + from_secret: git_cryptic_systems_container_registry_user + password: + from_secret: git_cryptic_systems_container_registry_password + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + when: + status: + - changed + - failure + +trigger: + branch: + exclude: + - master + event: + - pull_request + - push + repo: + - volker.raschek/dcmerge + +--- +kind: pipeline +type: docker +name: dry-run-arm64-v8 + +clone: + disable: true + +depends_on: +- linter + +platform: + os: linux + arch: arm64 + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: build + image: docker.io/plugins/docker:20.10.9 + settings: + auto_tag: false + dockerfile: Dockerfile + dry_run: true + force_tag: true + no_cache: true + purge: true + mirror: + from_secret: docker_io_mirror + registry: git.cryptic.systems + repo: git.cryptic.systems/volker.raschek/dcmerge + tags: latest-arm64-v8 + username: + from_secret: git_cryptic_systems_container_registry_user + password: + from_secret: git_cryptic_systems_container_registry_password + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + when: + status: + - changed + - failure + +trigger: + branch: + exclude: + - master + event: + - pull_request + - push + repo: + - volker.raschek/dcmerge + +--- +kind: pipeline +type: docker +name: latest-amd64 + +clone: + disable: true + +depends_on: +- linter + +platform: + os: linux + arch: amd64 + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: build + image: docker.io/plugins/docker:20.10.9 + settings: + auto_tag: false + dockerfile: Dockerfile + force_tag: true + no_cache: true + purge: true + mirror: + from_secret: docker_io_mirror + registry: git.cryptic.systems + repo: git.cryptic.systems/volker.raschek/dcmerge + tags: latest-amd64 + username: + from_secret: git_cryptic_systems_container_registry_user + password: + from_secret: git_cryptic_systems_container_registry_password + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + when: + status: + - changed + - failure + +trigger: + branch: + - master + event: + - cron + - push + repo: + - volker.raschek/dcmerge + +--- +kind: pipeline +type: docker +name: latest-arm-v7 + +clone: + disable: true + +depends_on: +- linter + +platform: + os: linux + arch: arm + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: build + image: docker.io/plugins/docker:20.10.9 + settings: + auto_tag: false + dockerfile: Dockerfile + force_tag: true + no_cache: true + purge: true + mirror: + from_secret: docker_io_mirror + registry: git.cryptic.systems + repo: git.cryptic.systems/volker.raschek/dcmerge + tags: latest-arm-v7 + username: + from_secret: git_cryptic_systems_container_registry_user + password: + from_secret: git_cryptic_systems_container_registry_password + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + when: + status: + - changed + - failure + +trigger: + branch: + - master + event: + - cron + - push + repo: + - volker.raschek/dcmerge + +--- +kind: pipeline +type: docker +name: latest-arm64-v8 + +clone: + disable: true + +depends_on: +- linter + +platform: + os: linux + arch: arm64 + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: build + image: docker.io/plugins/docker:20.10.9 + settings: + auto_tag: false + dockerfile: Dockerfile + force_tag: true + no_cache: true + purge: true + mirror: + from_secret: docker_io_mirror + registry: git.cryptic.systems + repo: git.cryptic.systems/volker.raschek/dcmerge + tags: latest-arm64-v8 + username: + from_secret: git_cryptic_systems_container_registry_user + password: + from_secret: git_cryptic_systems_container_registry_password + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + when: + status: + - changed + - failure + +trigger: + branch: + - master + event: + - cron + - push + repo: + - volker.raschek/dcmerge + +--- +kind: pipeline +type: kubernetes +name: latest-manifest + +clone: + disable: true + +depends_on: +- latest-amd64 +- latest-arm-v7 +- latest-arm64-v8 + +# docker.io/plugins/manifest only for amd64 architectures available +node_selector: + kubernetes.io/os: linux + kubernetes.io/arch: amd64 + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: build-manifest + image: docker.io/plugins/manifest:1.4.0 + settings: + auto_tag: false + ignore_missing: true + spec: manifest.tmpl + username: + from_secret: git_cryptic_systems_container_registry_user + password: + from_secret: git_cryptic_systems_container_registry_password + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + resources: + limits: + cpu: 150 + memory: 150M + when: + status: + - changed + - failure + +trigger: + branch: + - master + event: + - cron + - push + repo: + - volker.raschek/dcmerge + +--- +kind: pipeline +type: kubernetes +name: latest-sync + +clone: + disable: true + +depends_on: +- latest-manifest + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: latest-sync + commands: + - skopeo sync --all --src=docker --src-creds=$SRC_CRED_USERNAME:$SRC_CRED_PASSWORD --dest=docker --dest-creds=$DEST_CRED_USERNAME:$DEST_CRED_PASSWORD git.cryptic.systems/volker.raschek/dcmerge docker.io/volkerraschek + environment: + SRC_CRED_USERNAME: + from_secret: git_cryptic_systems_container_registry_user + SRC_CRED_PASSWORD: + from_secret: git_cryptic_systems_container_registry_password + DEST_CRED_USERNAME: + from_secret: container_image_registry_user + DEST_CRED_PASSWORD: + from_secret: container_image_registry_password + image: quay.io/skopeo/stable:v1.13.0 + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + resources: + limits: + cpu: 150 + memory: 150M + when: + status: + - changed + - failure + +trigger: + branch: + - master + event: + - cron + - push + repo: + - volker.raschek/dcmerge + +--- +kind: pipeline +type: docker +name: tagged-amd64 + +clone: + disable: true + +platform: + os: linux + arch: amd64 + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: build + image: docker.io/plugins/docker:20.10.9 + settings: + auto_tag: true + auto_tag_suffix: amd64 + dockerfile: Dockerfile + force_tag: true + no_cache: true + purge: true + mirror: + from_secret: docker_io_mirror + registry: git.cryptic.systems + repo: git.cryptic.systems/volker.raschek/dcmerge + username: + from_secret: git_cryptic_systems_container_registry_user + password: + from_secret: git_cryptic_systems_container_registry_password + build_args: + - VERSION=${DRONE_TAG} + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + when: + status: + - changed + - failure + +trigger: + event: + - tag + repo: + - volker.raschek/dcmerge + +--- +kind: pipeline +type: docker +name: tagged-arm-v7 + +clone: + disable: true + +platform: + os: linux + arch: arm + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: build + image: docker.io/plugins/docker:20.10.9 + settings: + auto_tag: true + auto_tag_suffix: arm-v7 + dockerfile: Dockerfile + force_tag: true + no_cache: true + purge: true + mirror: + from_secret: docker_io_mirror + registry: git.cryptic.systems + repo: git.cryptic.systems/volker.raschek/dcmerge + username: + from_secret: git_cryptic_systems_container_registry_user + password: + from_secret: git_cryptic_systems_container_registry_password + build_args: + - VERSION=${DRONE_TAG} + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + when: + status: + - changed + - failure + +trigger: + event: + - tag + repo: + - volker.raschek/dcmerge + +--- +kind: pipeline +type: docker +name: tagged-arm64-v8 + +clone: + disable: true + +platform: + os: linux + arch: arm64 + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: build + image: docker.io/plugins/docker:20.10.9 + settings: + auto_tag: true + auto_tag_suffix: arm64-v8 + dockerfile: Dockerfile + force_tag: true + no_cache: true + purge: true + mirror: + from_secret: docker_io_mirror + registry: git.cryptic.systems + repo: git.cryptic.systems/volker.raschek/dcmerge + username: + from_secret: git_cryptic_systems_container_registry_user + password: + from_secret: git_cryptic_systems_container_registry_password + build_args: + - VERSION=${DRONE_TAG} + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + when: + status: + - changed + - failure + +trigger: + event: + - tag + repo: + - volker.raschek/dcmerge + +--- +kind: pipeline +type: kubernetes +name: tagged-manifest + +clone: + disable: true + +depends_on: +- tagged-amd64 +- tagged-arm-v7 +- tagged-arm64-v8 + +# docker.io/plugins/manifest only for amd64 architectures available +node_selector: + kubernetes.io/os: linux + kubernetes.io/arch: amd64 + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: build-manifest + image: docker.io/plugins/manifest:1.4.0 + settings: + auto_tag: true + ignore_missing: true + spec: manifest.tmpl + username: + from_secret: git_cryptic_systems_container_registry_user + password: + from_secret: git_cryptic_systems_container_registry_password + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + resources: + limits: + cpu: 150 + memory: 150M + when: + status: + - changed + - failure + +trigger: + event: + - tag + repo: + - volker.raschek/dcmerge + +--- +kind: pipeline +type: kubernetes +name: tagged-sync + +clone: + disable: true + +depends_on: +- tagged-manifest + +steps: +- name: clone + image: git.cryptic.systems/volker.raschek/git:1.2.1 + +- name: tagged-sync + commands: + - skopeo sync --all --src=docker --src-creds=$SRC_CRED_USERNAME:$SRC_CRED_PASSWORD --dest=docker --dest-creds=$DEST_CRED_USERNAME:$DEST_CRED_PASSWORD git.cryptic.systems/volker.raschek/dcmerge docker.io/volkerraschek + environment: + SRC_CRED_USERNAME: + from_secret: git_cryptic_systems_container_registry_user + SRC_CRED_PASSWORD: + from_secret: git_cryptic_systems_container_registry_password + DEST_CRED_USERNAME: + from_secret: container_image_registry_user + DEST_CRED_PASSWORD: + from_secret: container_image_registry_password + image: quay.io/skopeo/stable:v1.13.0 + +- name: email-notification + environment: + SMTP_FROM_ADDRESS: + from_secret: smtp_from_address + SMTP_FROM_NAME: + from_secret: smtp_from_name + SMTP_HOST: + from_secret: smtp_host + SMTP_USERNAME: + from_secret: smtp_username + SMTP_PASSWORD: + from_secret: smtp_password + image: git.cryptic.systems/volker.raschek/drone-email:0.1.2 + resources: + limits: + cpu: 150 + memory: 150M + when: + status: + - changed + - failure + +trigger: + event: + - tag + repo: + - volker.raschek/dcmerge diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..86f582a --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +dcmerge \ No newline at end of file diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 0000000..0e98dd1 --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,143 @@ +# markdownlint YAML configuration +# https://github.com/DavidAnson/markdownlint/blob/main/schema/.markdownlint.yaml + +# Default state for all rules +default: true + +# Path to configuration file to extend +extends: null + +# MD003/heading-style/header-style - Heading style +MD003: + # Heading style + style: "atx" + +# MD004/ul-style - Unordered list style +MD004: + style: "dash" + +# MD007/ul-indent - Unordered list indentation +MD007: + # Spaces for indent + indent: 2 + # Whether to indent the first level of the list + start_indented: false + +# MD009/no-trailing-spaces - Trailing spaces +MD009: + # Spaces for line break + br_spaces: 2 + # Allow spaces for empty lines in list items + list_item_empty_lines: false + # Include unnecessary breaks + strict: false + +# MD010/no-hard-tabs - Hard tabs +MD010: + # Include code blocks + code_blocks: true + +# MD012/no-multiple-blanks - Multiple consecutive blank lines +MD012: + # Consecutive blank lines + maximum: 1 + +# MD013/line-length - Line length +MD013: + # Number of characters + line_length: 80 + # Number of characters for headings + heading_line_length: 80 + # Number of characters for code blocks + code_block_line_length: 80 + # Include code blocks + code_blocks: false + # Include tables + tables: false + # Include headings + headings: true + # Include headings + headers: true + # Strict length checking + strict: false + # Stern length checking + stern: false + +# MD022/blanks-around-headings/blanks-around-headers - Headings should be surrounded by blank lines +MD022: + # Blank lines above heading + lines_above: 1 + # Blank lines below heading + lines_below: 1 + +# MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the same content +MD024: + # Only check sibling headings + allow_different_nesting: true + +# MD025/single-title/single-h1 - Multiple top-level headings in the same document +MD025: + # Heading level + level: 1 + # RegExp for matching title in front matter + front_matter_title: "^\\s*title\\s*[:=]" + +# MD026/no-trailing-punctuation - Trailing punctuation in heading +MD026: + # Punctuation characters + punctuation: ".,;:!。,;:!" + +# MD029/ol-prefix - Ordered list item prefix +MD029: + # List style + style: "one_or_ordered" + +# MD030/list-marker-space - Spaces after list markers +MD030: + # Spaces for single-line unordered list items + ul_single: 1 + # Spaces for single-line ordered list items + ol_single: 1 + # Spaces for multi-line unordered list items + ul_multi: 1 + # Spaces for multi-line ordered list items + ol_multi: 1 + +# MD033/no-inline-html - Inline HTML +MD033: + # Allowed elements + allowed_elements: [] + +# MD035/hr-style - Horizontal rule style +MD035: + # Horizontal rule style + style: "---" + +# MD036/no-emphasis-as-heading/no-emphasis-as-header - Emphasis used instead of a heading +MD036: + # Punctuation characters + punctuation: ".,;:!?。,;:!?" + +# MD041/first-line-heading/first-line-h1 - First line in a file should be a top-level heading +MD041: + # Heading level + level: 1 + # RegExp for matching title in front matter + front_matter_title: "^\\s*title\\s*[:=]" + +# MD044/proper-names - Proper names should have the correct capitalization +MD044: + # List of proper names + names: [] + # Include code blocks + code_blocks: false + +# MD046/code-block-style - Code block style +MD046: + # Block style + style: "fenced" + +# MD048/code-fence-style - Code fence style +MD048: + # Code fence syle + style: "backtick" diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..82cd1cf --- /dev/null +++ b/Dockerfile @@ -0,0 +1,20 @@ +FROM docker.io/library/golang:1.20.6-alpine3.18 AS build + +RUN apk add git make + +WORKDIR /workspace +ADD ./ /workspace + +RUN make install \ + DESTDIR=/cache \ + PREFIX=/usr \ + VERSION=${VERSION} + +FROM docker.io/library/alpine:3.18.2 + +COPY --from=build /cache / + +WORKDIR /workspace +VOLUME [ "/workspace" ] + +ENTRYPOINT [ "/usr/bin/dcmerge" ] \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..08cb636 --- /dev/null +++ b/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2023 Markus Pesch + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..2e7946f --- /dev/null +++ b/Makefile @@ -0,0 +1,114 @@ +EXECUTABLE=dcmerge +VERSION?=$(shell git describe --abbrev=0)+hash.$(shell git rev-parse --short HEAD) + +# Destination directory and prefix to place the compiled binaries, documentaions +# and other files. +DESTDIR?= +PREFIX?=/usr/local + +# CONTAINER_RUNTIME +# The CONTAINER_RUNTIME variable will be used to specified the path to a +# container runtime. This is needed to start and run a container image. +CONTAINER_RUNTIME?=$(shell which podman) + +# DCMERGE_IMAGE_REGISTRY_NAME +# Defines the name of the new container to be built using several variables. +DCMERGE_IMAGE_REGISTRY_NAME:=git.cryptic.systems +DCMERGE_IMAGE_REGISTRY_USER:=volker.raschek + +DCMERGE_IMAGE_NAMESPACE?=${DCMERGE_IMAGE_REGISTRY_USER} +DCMERGE_IMAGE_NAME:=${EXECUTABLE} +DCMERGE_IMAGE_VERSION?=latest +DCMERGE_IMAGE_FULLY_QUALIFIED=${DCMERGE_IMAGE_REGISTRY_NAME}/${DCMERGE_IMAGE_NAMESPACE}/${DCMERGE_IMAGE_NAME}:${DCMERGE_IMAGE_VERSION} +DCMERGE_IMAGE_UNQUALIFIED=${DCMERGE_IMAGE_NAMESPACE}/${DCMERGE_IMAGE_NAME}:${DCMERGE_IMAGE_VERSION} + +# BIN +# ============================================================================== +dcmerge: + CGO_ENABLED=0 \ + GOPRIVATE=$(shell go env GOPRIVATE) \ + GOPROXY=$(shell go env GOPROXY) \ + GONOPROXY=$(shell go env GONOPROXY) \ + GONOSUMDB=$(shell go env GONOSUMDB) \ + GOSUMDB=$(shell go env GOSUMDB) \ + go build -ldflags "-X 'main.version=${VERSION}'" -o ${@} main.go + +# CLEAN +# ============================================================================== +PHONY+=clean +clean: + rm --force --recursive dcmerge + +# TESTS +# ============================================================================== +PHONY+=test/unit +test/unit: + go test -v -p 1 -coverprofile=coverage.txt -covermode=count -timeout 1200s ./pkg/... + +PHONY+=test/integration +test/integration: + go test -v -p 1 -count=1 -timeout 1200s ./it/... + +PHONY+=test/coverage +test/coverage: test/unit + go tool cover -html=coverage.txt + +# GOLANGCI-LINT +# ============================================================================== +PHONY+=golangci-lint +golangci-lint: + golangci-lint run --concurrency=$(shell nproc) + +# INSTALL +# ============================================================================== +PHONY+=uninstall +install: dcmerge + install --directory ${DESTDIR}/etc/bash_completion.d + ./dcmerge completion bash > ${DESTDIR}/etc/bash_completion.d/${EXECUTABLE} + + install --directory ${DESTDIR}${PREFIX}/bin + install --mode 0755 ${EXECUTABLE} ${DESTDIR}${PREFIX}/bin/${EXECUTABLE} + + install --directory ${DESTDIR}${PREFIX}/share/licenses/${EXECUTABLE} + install --mode 0644 LICENSE ${DESTDIR}${PREFIX}/share/licenses/${EXECUTABLE}/LICENSE + +# UNINSTALL +# ============================================================================== +PHONY+=uninstall +uninstall: + -rm --force --recursive \ + ${DESTDIR}/etc/bash_completion.d/${EXECUTABLE} \ + ${DESTDIR}${PREFIX}/bin/${EXECUTABLE} \ + ${DESTDIR}${PREFIX}/share/licenses/${EXECUTABLE} + +# BUILD CONTAINER IMAGE +# ============================================================================== +PHONY+=container-image/build +container-image/build: + ${CONTAINER_RUNTIME} build \ + --build-arg VERSION=${VERSION} \ + --file Dockerfile \ + --no-cache \ + --pull \ + --tag ${DCMERGE_IMAGE_FULLY_QUALIFIED} \ + --tag ${DCMERGE_IMAGE_UNQUALIFIED} \ + . + +# DELETE CONTAINER IMAGE +# ============================================================================== +PHONY:=container-image/delete +container-image/delete: + - ${CONTAINER_RUNTIME} image rm ${DCMERGE_IMAGE_FULLY_QUALIFIED} ${DCMERGE_IMAGE_UNQUALIFIED} + +# PUSH CONTAINER IMAGE +# ============================================================================== +PHONY+=container-image/push +container-image/push: + echo ${DCMERGE_IMAGE_REGISTRY_PASSWORD} | ${CONTAINER_RUNTIME} login ${DCMERGE_IMAGE_REGISTRY_NAME} --username ${DCMERGE_IMAGE_REGISTRY_USER} --password-stdin + ${CONTAINER_RUNTIME} push ${DCMERGE_IMAGE_FULLY_QUALIFIED} + +# PHONY +# ============================================================================== +# Declare the contents of the PHONY variable as phony. We keep that information +# in a variable so we can use it in if_changed. +.PHONY: ${PHONY} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..8cea04c --- /dev/null +++ b/README.md @@ -0,0 +1,129 @@ +# dcmerge + +[![Build Status](https://drone.cryptic.systems/api/badges/volker.raschek/dcmerge/status.svg)](https://drone.cryptic.systems/volker.raschek/dcmerge) +[![Docker Pulls](https://img.shields.io/docker/pulls/volkerraschek/dcmerge)](https://hub.docker.com/r/volkerraschek/dcmerge) + +`dcmerge` is a small program to merge docker-compose files from multiple +sources. It is available via RPM and docker. + +The dynamic pattern of a docker-compose file, that for example `environments` +can be specified as a string slice or a list of objects is currently not +supported. `dcmerge` expect a strict pattern layout. The `environments`, `ports` +and `volumes` must be declared as a slice of strings. + +Dockercompose file can be read-in from different sources. Currently are the +following sources supported: + +- File +- HTTP/HTTPS + +Furthermore, `dcmerge` support different ways to merge multiple docker-compose +files. + +- The default merge, add missing secrets, services, networks and volumes. +- The existing-win merge, add and protect existing attributes. +- The last-win merge, add or overwrite existing attributes. + +## default + +Merge only missing secrets, services, networks and volumes without respecting +their attributes. For example, when the service `app` is already declared, it is +not possible to add the service `app` twice. The second service will be +completely skipped. + +```yaml +--- +# cat ~/docker-compose-A.yaml +services: + app: + environments: + - CLIENT_SECRET=HelloWorld123 + image: example.local/app/name:0.1.0 +--- +# cat ~/docker-compose-B.yaml +services: + app: + image: app/name:2.3.0 + volume: + - /etc/localtime:/etc/localtime + - /dev/urandom:/etc/urandom + db: + image: postgres + volume: + - /etc/localtime:/etc/localtime + - /dev/urandom:/etc/urandom +--- +# dcmerge ~/docker-compose-A.yaml ~/docker-compose-B.yaml +services: + app: + environments: + - CLIENT_SECRET=HelloWorld123 + image: example.local/app/name:0.1.0 + db: + image: postgres + volume: + - /etc/localtime:/etc/localtime + - /dev/urandom:/etc/urandom +``` + +## existing-win + +The existing-win merge protects existing attributes. For example there are two +different docker-compose files, but booth has the same environment variable +`CLIENT_SECRET` defined with different values. The first declaration of the +attribute wins and is for overwriting protected. + +```yaml +--- +# cat ~/docker-compose-A.yaml +services: + app: + environments: + - CLIENT_SECRET=HelloWorld123 + image: example.local/app/name:0.1.0 +--- +# cat ~/docker-compose-B.yaml +services: + app: + environments: + - CLIENT_SECRET=FooBar123 + image: example.local/app/name:0.1.0 +--- +# dcmerge --existing-win ~/docker-compose-A.yaml ~/docker-compose-B.yaml +services: + app: + environments: + - CLIENT_SECRET=HelloWorld123 + image: example.local/app/name:0.1.0 +``` + +## last-win + +The last-win merge overwrite recursive existing attributes. For example there +are two different docker-compose files, but booth has the same environment +variable `CLIENT_SECRET` defined with different values. The last passed +docker-compose file which contains this environment wins. + +```yaml +--- +# cat ~/docker-compose-A.yaml +services: + app: + environments: + - CLIENT_SECRET=HelloWorld123 + image: example.local/app/name:0.1.0 +--- +# cat ~/docker-compose-B.yaml +services: + app: + environments: + - CLIENT_SECRET=FooBar123 + image: example.local/app/name:0.1.0 +--- +# dcmerge --last-win ~/docker-compose-A.yaml ~/docker-compose-B.yaml +services: + app: + environments: + - CLIENT_SECRET=FooBar123 + image: example.local/app/name:0.1.0 +``` diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 0000000..926735d --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,75 @@ +package cmd + +import ( + "fmt" + "os" + + "git.cryptic.systems/volker.raschek/dcmerge/pkg/domain/dockerCompose" + "git.cryptic.systems/volker.raschek/dcmerge/pkg/fetcher" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" +) + +func Execute(version string) error { + completionCmd := &cobra.Command{ + Use: "completion [bash|zsh|fish|powershell]", + Short: "Generate completion script", + Long: "To load completions", + DisableFlagsInUseLine: true, + ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, + Args: cobra.ExactValidArgs(1), + Run: func(cmd *cobra.Command, args []string) { + switch args[0] { + case "bash": + cmd.Root().GenBashCompletion(os.Stdout) + case "zsh": + cmd.Root().GenZshCompletion(os.Stdout) + case "fish": + cmd.Root().GenFishCompletion(os.Stdout, true) + case "powershell": + cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) + } + }, + } + + rootCmd := &cobra.Command{ + Use: "dcmerge", + Args: cobra.MinimumNArgs(2), + Short: "Merge docker-compose files from multiple resources", + Example: `dcmerge docker-compose.yml ./integration-test/docker-compose.yml +dcmerge docker-compose.yml https://git.example.local/user/repo/docker-compose.yml`, + RunE: run, + Version: version, + } + rootCmd.Flags().BoolP("merge-last-win", "l", true, "Overwrite existing attributes") + rootCmd.AddCommand(completionCmd) + + return rootCmd.Execute() +} + +func run(cmd *cobra.Command, args []string) error { + mergeLastWin, err := cmd.Flags().GetBool("merge-last-win") + if err != nil { + return fmt.Errorf("Failed to parse flag merge-last-win: %s", err) + } + + dockerComposeConfig := dockerCompose.NewConfig() + + dockerComposeConfigs, err := fetcher.Fetch(args...) + if err != nil { + return err + } + + for _, config := range dockerComposeConfigs { + switch { + case mergeLastWin: + dockerComposeConfig.MergeLastWin(config) + default: + dockerComposeConfig.Merge(config) + } + + } + + yamlEncoder := yaml.NewEncoder(os.Stdout) + return yamlEncoder.Encode(dockerComposeConfig) +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..be3e42f --- /dev/null +++ b/go.mod @@ -0,0 +1,17 @@ +module git.cryptic.systems/volker.raschek/dcmerge + +go 1.20 + +require ( + github.com/spf13/cobra v1.7.0 + github.com/stretchr/testify v1.8.4 + gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..44aa0ef --- /dev/null +++ b/go.sum @@ -0,0 +1,20 @@ +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/main.go b/main.go new file mode 100644 index 0000000..7785ce0 --- /dev/null +++ b/main.go @@ -0,0 +1,9 @@ +package main + +import "git.cryptic.systems/volker.raschek/dcmerge/cmd" + +var version string + +func main() { + cmd.Execute(version) +} diff --git a/manifest.tmpl b/manifest.tmpl new file mode 100644 index 0000000..2c11b5b --- /dev/null +++ b/manifest.tmpl @@ -0,0 +1,26 @@ +image: git.cryptic.systems/volker.raschek/dcmerge:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}} +{{#if build.tags}} +tags: +{{#each build.tags}} + - {{this}} +{{/each}} + - "latest" +{{/if}} +manifests: + - + image: git.cryptic.systems/volker.raschek/dcmerge:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}-amd64 + platform: + architecture: amd64 + os: linux + - + image: git.cryptic.systems/volker.raschek/dcmerge:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}-arm-v7 + platform: + architecture: arm + os: linux + variant: v7 + - + image: git.cryptic.systems/volker.raschek/dcmerge:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}-arm64-v8 + platform: + architecture: arm64 + os: linux + variant: v8 diff --git a/pkg/domain/dockerCompose/config.go b/pkg/domain/dockerCompose/config.go new file mode 100644 index 0000000..87d25c5 --- /dev/null +++ b/pkg/domain/dockerCompose/config.go @@ -0,0 +1,1210 @@ +package dockerCompose + +import ( + "fmt" + "strings" +) + +const ( + environmentDelimiter string = "=" + labelDelimiter string = "=" + volumeDelimiter string = ":" + portDelimiter string = ":" + portProtocolDelimiter string = "/" +) + +type Config struct { + Networks map[string]*Network `json:"networks,omitempty" yaml:"networks,omitempty"` + Secrets map[string]*Secret `json:"secrets,omitempty" yaml:"secrets,omitempty"` + Services map[string]*Service `json:"services,omitempty" yaml:"services,omitempty"` + Version string `json:"version,omitempty" yaml:"version,omitempty"` + Volumes map[string]*Volume `json:"volumes,omitempty" yaml:"volumes,omitempty"` +} + +// Equal returns true if the passed equalable is equal +func (c *Config) Equal(equalable Equalable) bool { + config, ok := equalable.(*Config) + if !ok { + return false + } + + switch { + case c == nil && config == nil: + return true + case c != nil && config == nil: + fallthrough + case c == nil && config != nil: + return false + default: + return EqualStringMap(c.Networks, config.Networks) && + EqualStringMap(c.Secrets, config.Secrets) && + EqualStringMap(c.Services, config.Services) && + c.Version == config.Version && + EqualStringMap(c.Volumes, config.Volumes) + } +} + +// ExistsNetwork returns true if a network with the passed named exists. +func (c *Config) ExistsNetwork(name string) bool { + return ExistsInMap(c.Networks, name) +} + +// ExistsSecret returns true if a secret with the passed named exists. +func (c *Config) ExistsSecret(name string) bool { + return ExistsInMap(c.Secrets, name) +} + +// ExistsService returns true if a service with the passed named exists. +func (c *Config) ExistsService(name string) bool { + return ExistsInMap(c.Services, name) +} + +// ExistsVolumes returns true if a volume with the passed named exists. +func (c *Config) ExistsVolume(name string) bool { + return ExistsInMap(c.Volumes, name) +} + +// Merge adds only a missing network, secret, service and volume. +func (c *Config) Merge(config *Config) { + for name, network := range c.Networks { + if !c.ExistsNetwork(name) { + c.Networks[name] = network + } + } + + for name, secret := range c.Secrets { + if !c.ExistsSecret(name) { + c.Secrets[name] = secret + } + } + + for name, service := range c.Services { + if !c.ExistsService(name) { + c.Services[name] = service + } + } + + for name, volume := range c.Volumes { + if !c.ExistsVolume(name) { + c.Volumes[name] = volume + } + } +} + +// MergeLastWin merges a config and overwrite already existing properties +func (c *Config) MergeLastWin(config *Config) { + switch { + case c == nil && config == nil: + fallthrough + case c != nil && config == nil: + return + + // WARN: It's not possible to change the memory pointer c *Config + // to a new initialized config without returning the Config + // it self. + // + // case c == nil && config != nil: + // c = NewConfig() + // fallthrough + + default: + c.mergeLastWinNetworks(config.Networks) + c.mergeLastWinSecrets(config.Secrets) + c.mergeLastWinServices(config.Services) + c.mergeLastWinVersion(config.Version) + c.mergeLastWinVolumes(config.Volumes) + } +} + +func (c *Config) mergeLastWinVersion(version string) { + if c.Version != version { + c.Version = version + } +} + +func (c *Config) mergeLastWinNetworks(networks map[string]*Network) { + for networkName, network := range networks { + if network == nil { + continue + } + + if c.ExistsNetwork(networkName) { + c.Networks[networkName].MergeLastWin(network) + } else { + c.Networks[networkName] = network + } + } +} + +func (c *Config) mergeLastWinSecrets(secrets map[string]*Secret) { + for secretName, secret := range secrets { + if secret == nil { + continue + } + + if c.ExistsNetwork(secretName) { + c.Secrets[secretName].MergeLastWin(secret) + } else { + c.Secrets[secretName] = secret + } + } +} + +func (c *Config) mergeLastWinServices(services map[string]*Service) { + for serviceName, service := range services { + if service == nil { + continue + } + + if c.ExistsService(serviceName) { + c.Services[serviceName].MergeLastWin(service) + } else { + c.Services[serviceName] = service + } + } +} + +func (c *Config) mergeLastWinVolumes(volumes map[string]*Volume) { + for volumeName, volume := range volumes { + if volume == nil { + continue + } + + if c.ExistsNetwork(volumeName) { + c.Volumes[volumeName].MergeLastWin(volume) + } else { + c.Volumes[volumeName] = volume + } + } +} + +func NewConfig() *Config { + return &Config{ + Services: make(map[string]*Service), + Networks: make(map[string]*Network), + Secrets: make(map[string]*Secret), + Volumes: make(map[string]*Volume), + } +} + +type Network struct { + External bool `json:"external,omitempty" yaml:"external,omitempty"` + Driver string `json:"driver,omitempty" yaml:"driver,omitempty"` + IPAM *NetworkIPAM `json:"ipam,omitempty" yaml:"ipam,omitempty"` +} + +// Equal returns true if the passed equalable is equal +func (n *Network) Equal(equalable Equalable) bool { + network, ok := equalable.(*Network) + if !ok { + return false + } + + switch { + case n == nil && network == nil: + return true + case n != nil && network == nil: + fallthrough + case n == nil && network != nil: + return false + default: + return n.External == network.External && + n.Driver == network.Driver && + n.IPAM.Equal(network.IPAM) + } +} + +func (n *Network) MergeLastWin(network *Network) { + switch { + case n == nil && network == nil: + fallthrough + case n != nil && network == nil: + return + + // WARN: It's not possible to change the memory pointer n *Network + // to a new initialized network without returning the Network + // it self. + // + // case n == nil && network != nil: + // c = NewCNetwork() + // fallthrough + + default: + n.mergeLastWinIPAM(network.IPAM) + } +} + +func (n *Network) mergeLastWinIPAM(networkIPAM *NetworkIPAM) { + if !n.IPAM.Equal(networkIPAM) { + n.IPAM.MergeLastWin(networkIPAM) + } +} + +func NewNetwork() *Network { + return &Network{ + External: false, + IPAM: new(NetworkIPAM), + } +} + +type NetworkIPAM struct { + Configs []*NetworkIPAMConfig `json:"config,omitempty" yaml:"config,omitempty"` +} + +// Equal returns true if the passed equalable is equal +func (nIPAM *NetworkIPAM) Equal(equalable Equalable) bool { + networkIPAM, ok := equalable.(*NetworkIPAM) + if !ok { + return false + } + + switch { + case nIPAM == nil && networkIPAM == nil: + return true + case nIPAM != nil && networkIPAM == nil: + fallthrough + case nIPAM == nil && networkIPAM != nil: + return false + default: + return Equal(nIPAM.Configs, networkIPAM.Configs) + } +} + +func (nIPAM *NetworkIPAM) MergeLastWin(networkIPAM *NetworkIPAM) { + switch { + case nIPAM == nil && networkIPAM == nil: + fallthrough + case nIPAM != nil && networkIPAM == nil: + return + + // WARN: It's not possible to change the memory pointer n *NetworkIPAM + // to a new initialized networkIPAM without returning the NetworkIPAM + // it self. + // + // case nIPAM == nil && networkIPAM != nil: + // c = NewNetworkIPAM() + // fallthrough + + default: + nIPAM.mergeLastWinConfig(networkIPAM.Configs) + } +} + +func (nIPAM *NetworkIPAM) mergeLastWinConfig(networkIPAMConfigs []*NetworkIPAMConfig) { + for _, networkIPAMConfig := range networkIPAMConfigs { + if !existsInSlice(nIPAM.Configs, networkIPAMConfig) { + nIPAM.Configs = append(nIPAM.Configs, networkIPAMConfig) + } + } +} + +func NewNetworkIPAM() *NetworkIPAM { + return &NetworkIPAM{ + Configs: make([]*NetworkIPAMConfig, 0), + } +} + +type NetworkIPAMConfig struct { + Subnet string `json:"subnet,omitempty" yaml:"subnet,omitempty"` +} + +// Equal returns true if the passed equalable is equal +func (nIPAMConfig *NetworkIPAMConfig) Equal(equalable Equalable) bool { + networkIPAMConfig, ok := equalable.(*NetworkIPAMConfig) + if !ok { + return false + } + + switch { + case nIPAMConfig == nil && networkIPAMConfig == nil: + return true + case nIPAMConfig != nil && networkIPAMConfig == nil: + fallthrough + case nIPAMConfig == nil && networkIPAMConfig != nil: + return false + default: + return nIPAMConfig.Subnet == networkIPAMConfig.Subnet + } +} + +func NewNetworkIPAMConfig() *NetworkIPAMConfig { + return &NetworkIPAMConfig{} +} + +type Secret struct { + File string `json:"file,omitempty" yaml:"file,omitempty"` +} + +// Equal returns true if the passed equalable is equal +func (s *Secret) Equal(equalable Equalable) bool { + secret, ok := equalable.(*Secret) + if !ok { + return false + } + + switch { + case s == nil && secret == nil: + return true + case s != nil && secret == nil: + fallthrough + case s == nil && secret != nil: + return false + default: + return s.File == secret.File + } +} + +// MergeLastWin merges adds or overwrite the attributes of the passed secret +// with the existing one. +func (s *Secret) MergeLastWin(secret *Secret) { + if !s.Equal(secret) { + s.File = secret.File + } +} + +func NewSecret() *Secret { + return &Secret{} +} + +type Service struct { + CapabilitiesAdd []string `json:"cap_add,omitempty" yaml:"cap_add,omitempty"` + CapabilitiesDrop []string `json:"cap_drop,omitempty" yaml:"cap_drop,omitempty"` + Deploy *ServiceDeploy `json:"deploy,omitempty" yaml:"deploy,omitempty"` + Environments []string `json:"environment,omitempty" yaml:"environment,omitempty"` + ExtraHosts []string `json:"extra_hosts,omitempty" yaml:"extra_hosts,omitempty"` + Image string `json:"image,omitempty" yaml:"image,omitempty"` + Labels []string `json:"labels,omitempty" yaml:"labels,omitempty"` + Networks map[string]*ServiceNetwork `json:"networks,omitempty" yaml:"networks,omitempty"` + Ports []string `json:"ports,omitempty" yaml:"ports,omitempty"` + Secrets []string `json:"secrets,omitempty" yaml:"secrets,omitempty"` + ULimits *ServiceULimits `json:"ulimits,omitempty" yaml:"ulimits,omitempty"` + Volumes []string `json:"volumes,omitempty" yaml:"volumes,omitempty"` +} + +// Equal returns true if the passed equalable is equal +func (s *Service) Equal(equalable Equalable) bool { + service, ok := equalable.(*Service) + if !ok { + return false + } + + switch { + case s == nil && service == nil: + return true + case s != nil && service == nil: + fallthrough + case s == nil && service != nil: + return false + default: + return equalSlice(s.CapabilitiesAdd, service.CapabilitiesAdd) && + equalSlice(s.CapabilitiesDrop, service.CapabilitiesDrop) && + s.Deploy.Equal(service.Deploy) && + equalSlice(s.Environments, service.Environments) && + equalSlice(s.ExtraHosts, service.ExtraHosts) && + s.Image == service.Image && + equalSlice(s.Labels, service.Labels) && + EqualStringMap(s.Networks, service.Networks) && + equalSlice(s.Ports, service.Ports) && + equalSlice(s.Secrets, service.Secrets) && + s.ULimits.Equal(service.ULimits) && + equalSlice(s.Volumes, service.Volumes) + } +} + +// MergeLastWin merges adds or overwrite the attributes of the passed secret +// with the existing one. +func (s *Service) MergeLastWin(service *Service) { + switch { + case s == nil && service == nil: + fallthrough + case s != nil && service == nil: + return + + // WARN: It's not possible to change the memory pointer s *Service + // to a new initialized service without returning the Service + // it self. + // + // case s == nil && service != nil: + // s = NewService() + // fallthrough + + default: + s.mergeLastWinCapabilitiesAdd(service.CapabilitiesAdd) + s.mergeLastWinCapabilitiesDrop(service.CapabilitiesDrop) + s.mergeLastWinDeploy(service.Deploy) + s.mergeLastWinEnvironments(service.Environments) + s.mergeLastWinExtraHosts(service.ExtraHosts) + s.mergeLastWinImage(service.Image) + s.mergeLastWinLabels(service.Labels) + s.mergeLastWinNetworks(service.Networks) + s.mergeLastWinPorts(service.Ports) + s.mergeLastWinSecrets(service.Secrets) + s.mergeLastWinULimits(service.ULimits) + s.mergeLastWinVolumes(service.Volumes) + } +} + +func (s *Service) mergeLastWinCapabilitiesAdd(capabilitiesAdd []string) { + for _, capabilityAdd := range capabilitiesAdd { + if !existsInSlice(s.CapabilitiesAdd, capabilityAdd) { + s.CapabilitiesAdd = append(s.CapabilitiesAdd, capabilityAdd) + } + } +} + +func (s *Service) mergeLastWinCapabilitiesDrop(capabilitiesDrop []string) { + for _, capabilityDrop := range capabilitiesDrop { + if !existsInSlice(s.CapabilitiesAdd, capabilityDrop) { + s.CapabilitiesDrop = append(s.CapabilitiesDrop, capabilityDrop) + } + } +} + +func (s *Service) mergeLastWinDeploy(deploy *ServiceDeploy) { + switch { + case s.Deploy == nil && deploy != nil: + s.Deploy = deploy + case s.Deploy != nil && deploy == nil: + fallthrough + case s.Deploy == nil && deploy == nil: + return + default: + s.Deploy.MergeLastWin(deploy) + } +} + +func (s *Service) mergeLastWinEnvironments(environments []string) { + switch { + case s.Environments == nil && environments != nil: + s.Environments = environments + case s.Environments != nil && environments == nil: + fallthrough + case s.Environments == nil && environments == nil: + return + default: + for _, environment := range environments { + key, value := splitStringInKeyValue(environment, environmentDelimiter) + s.SetEnvironment(key, value) + } + } +} + +func (s *Service) mergeLastWinImage(image string) { + if s.Image != image { + s.Image = image + } +} + +func (s *Service) mergeLastWinExtraHosts(extraHosts []string) { + for _, extraHost := range extraHosts { + if !existsInSlice(s.ExtraHosts, extraHost) { + s.ExtraHosts = append(s.ExtraHosts, extraHost) + } + } +} + +func (s *Service) mergeLastWinLabels(labels []string) { + switch { + case s.Labels == nil && labels != nil: + s.Labels = labels + case s.Labels != nil && labels == nil: + fallthrough + case s.Labels == nil && labels == nil: + return + default: + for _, label := range labels { + key, value := splitStringInKeyValue(label, labelDelimiter) + s.SetLabel(key, value) + } + } +} + +func (s *Service) mergeLastWinNetworks(networks map[string]*ServiceNetwork) { + switch { + case s.Networks == nil && networks != nil: + s.Networks = networks + case s.Networks != nil && networks == nil: + fallthrough + case s.Networks == nil && networks == nil: + return + default: + for name, network := range networks { + if _, exists := s.Networks[name]; exists { + s.Networks[name].MergeLastWin(network) + } else { + s.Networks[name] = network + } + } + } +} + +func (s *Service) mergeLastWinPorts(ports []string) { + switch { + case s.Ports == nil && ports != nil: + s.Ports = ports + case s.Ports != nil && ports == nil: + fallthrough + case s.Ports == nil && ports == nil: + return + default: + for _, port := range ports { + src, dest, protocol := splitStringInPort(port) + s.SetPort(src, dest, protocol) + } + } +} + +func (s *Service) mergeLastWinSecrets(secrets []string) { + for _, secret := range secrets { + if !existsInSlice(s.Secrets, secret) { + s.Secrets = append(s.Secrets, secret) + } + } +} + +func (s *Service) mergeLastWinULimits(uLimits *ServiceULimits) { + switch { + case s.ULimits == nil && uLimits != nil: + s.ULimits = uLimits + case s.ULimits != nil && uLimits == nil: + fallthrough + case s.ULimits == nil && uLimits == nil: + return + default: + s.ULimits.MergeLastWin(uLimits) + } +} + +func (s *Service) mergeLastWinVolumes(volumes []string) { + switch { + case s.Volumes == nil && volumes != nil: + s.Volumes = volumes + case s.Volumes != nil && volumes == nil: + fallthrough + case s.Volumes == nil && volumes == nil: + return + default: + for _, volume := range volumes { + src, dest, perm := splitStringInVolume(volume) + s.SetVolume(src, dest, perm) + } + } +} + +// RemoveEnvironment remove all found environment variable from the internal +// slice matching by the passed name. +func (s *Service) RemoveEnvironment(name string) { + environments := make([]string, 0) + for _, environment := range s.Environments { + key, value := splitStringInKeyValue(environment, environmentDelimiter) + if key != name { + environments = append(environments, fmt.Sprintf("%s%s%s", key, environmentDelimiter, value)) + } + } + s.Environments = environments +} + +// RemoveLabel remove all found labels from the internal slice matching by the +// passed name. +func (s *Service) RemoveLabel(name string) { + labels := make([]string, 0) + for _, label := range s.Labels { + key, value := splitStringInKeyValue(label, labelDelimiter) + if key != name { + labels = append(labels, fmt.Sprintf("%s%s%s", key, labelDelimiter, value)) + } + } + s.Labels = labels +} + +// RemovePort remove all found ports from the internal slice matching by the +// passed dest port. +func (s *Service) RemovePort(dest string) { + ports := make([]string, 0) + for _, port := range s.Ports { + srcPort, destPort, protocol := splitStringInPort(port) + + switch { + case destPort == dest && len(protocol) <= 0: + s.Ports = append(s.Ports, fmt.Sprintf("%s%s%s", srcPort, portDelimiter, destPort)) + case destPort == dest && len(protocol) > 0: + s.Ports = append(s.Ports, fmt.Sprintf("%s%s%s%s%s", srcPort, portDelimiter, destPort, portProtocolDelimiter, protocol)) + } + } + s.Ports = ports +} + +// RemoveVolume remove all found volumes from the internal slice matching by the +// dest path. +func (s *Service) RemoveVolume(dest string) { + volumes := make([]string, 0) + for _, volume := range s.Volumes { + srcPath, destPath, perm := splitStringInVolume(volume) + + switch { + case destPath == dest && len(perm) <= 0: + s.Volumes = append(s.Volumes, fmt.Sprintf("%s%s%s", srcPath, volumeDelimiter, destPath)) + case destPath == dest && len(perm) > 0: + s.Volumes = append(s.Volumes, fmt.Sprintf("%s%s%s%s%s", srcPath, volumeDelimiter, destPath, volumeDelimiter, perm)) + } + } + s.Volumes = volumes +} + +// SetEnvironment add or overwrite an existing environment variable. +func (s *Service) SetEnvironment(name string, value string) { + s.RemoveEnvironment(name) + s.Environments = append(s.Environments, fmt.Sprintf("%s%s%s", name, environmentDelimiter, value)) +} + +// SetLabel add or overwrite an existing label. +func (s *Service) SetLabel(name string, value string) { + s.RemoveLabel(name) + s.Labels = append(s.Labels, fmt.Sprintf("%s%s%s", name, labelDelimiter, value)) +} + +// SetPort add or overwrite an existing port. +func (s *Service) SetPort(src string, dest string, protocol string) { + s.RemovePort(dest) + if len(protocol) <= 0 { + s.Ports = append(s.Ports, fmt.Sprintf("%s%s%s", src, volumeDelimiter, dest)) + } else { + s.Ports = append(s.Ports, fmt.Sprintf("%s%s%s%s%s", src, portDelimiter, dest, portProtocolDelimiter, protocol)) + } +} + +// SetVolume add or overwrite an existing volume. +func (s *Service) SetVolume(src string, dest string, perm string) { + s.RemoveVolume(dest) + if len(perm) <= 0 { + s.Volumes = append(s.Volumes, fmt.Sprintf("%s%s%s", src, volumeDelimiter, dest)) + } else { + s.Volumes = append(s.Volumes, fmt.Sprintf("%s%s%s%s%s", src, volumeDelimiter, dest, volumeDelimiter, perm)) + } +} + +// NewService returns an empty initialized Service. +func NewService() *Service { + return &Service{ + CapabilitiesAdd: make([]string, 0), + CapabilitiesDrop: make([]string, 0), + Deploy: new(ServiceDeploy), + Environments: make([]string, 0), + ExtraHosts: make([]string, 0), + Labels: make([]string, 0), + Networks: make(map[string]*ServiceNetwork), + Ports: make([]string, 0), + Secrets: make([]string, 0), + ULimits: new(ServiceULimits), + Volumes: make([]string, 0), + } +} + +type ServiceDeploy struct { + Resources *ServiceDeployResources `json:"resources" yaml:"resources"` +} + +// Equal returns true if the passed equalable is equal +func (sd *ServiceDeploy) Equal(equalable Equalable) bool { + serviceDeploy, ok := equalable.(*ServiceDeploy) + if !ok { + return false + } + + switch { + case sd == nil && serviceDeploy == nil: + return true + case sd != nil && serviceDeploy == nil: + fallthrough + case sd == nil && serviceDeploy != nil: + return false + default: + return sd.Resources.Equal(serviceDeploy.Resources) + } +} + +// MergeLastWin merges adds or overwrite the attributes of the passed +// serviceDeploy with the existing one. +func (sd *ServiceDeploy) MergeLastWin(serviceDeploy *ServiceDeploy) { + switch { + case sd == nil && serviceDeploy == nil: + fallthrough + case sd != nil && serviceDeploy == nil: + return + + // WARN: It's not possible to change the memory pointer sd *ServiceDeploy + // to a new initialized serviceDeploy without returning the ServiceDeploy + // it self. + // + // case sd == nil && serviceDeploy != nil: + // sd = NewServiceDeploy() + // fallthrough + + default: + sd.mergeLastWinDeployResources(serviceDeploy.Resources) + } +} + +func (sd *ServiceDeploy) mergeLastWinDeployResources(resources *ServiceDeployResources) { + switch { + case sd.Resources == nil && resources != nil: + sd.Resources = resources + case sd.Resources != nil && resources == nil: + fallthrough + case sd.Resources == nil && resources == nil: + return + default: + sd.Resources.MergeLastWin(resources) + } +} + +func NewServiceDeploy() *ServiceDeploy { + return &ServiceDeploy{ + Resources: new(ServiceDeployResources), + } +} + +type ServiceDeployResources struct { + Limits *ServiceDeployResourcesLimits `json:"limits,omitempty" yaml:"limits,omitempty"` + Reservations *ServiceDeployResourcesLimits `json:"reservations,omitempty" yaml:"reservations,omitempty"` +} + +// Equal returns true if the passed equalable is equal +func (sdr *ServiceDeployResources) Equal(equalable Equalable) bool { + serviceDeployResources, ok := equalable.(*ServiceDeployResources) + if !ok { + return false + } + + switch { + case sdr == nil && serviceDeployResources == nil: + return true + case sdr != nil && serviceDeployResources == nil: + fallthrough + case sdr == nil && serviceDeployResources != nil: + return false + default: + return sdr.Limits.Equal(serviceDeployResources.Limits) && + sdr.Reservations.Equal(serviceDeployResources.Reservations) + } +} + +// MergeLastWin merges adds or overwrite the attributes of the passed +// serviceDeployResources with the existing one. +func (sdr *ServiceDeployResources) MergeLastWin(serviceDeployResources *ServiceDeployResources) { + switch { + case sdr == nil && serviceDeployResources == nil: + fallthrough + case sdr != nil && serviceDeployResources == nil: + return + + // WARN: It's not possible to change the memory pointer sdr *ServiceDeployResources + // to a new initialized serviceDeployResources without returning the + // serviceDeployResources it self. + case sdr == nil && serviceDeployResources != nil: + sdr = NewServiceDeployResources() + fallthrough + default: + sdr.mergeLastWinLimits(serviceDeployResources.Limits) + sdr.mergeLastWinReservations(serviceDeployResources.Reservations) + } +} + +func (sdr *ServiceDeployResources) mergeLastWinLimits(limits *ServiceDeployResourcesLimits) { + switch { + case sdr.Limits == nil && limits != nil: + sdr.Limits = limits + case sdr.Limits != nil && limits == nil: + fallthrough + case sdr.Limits == nil && limits == nil: + return + default: + sdr.Limits.MergeLastWin(limits) + } +} + +func (sdr *ServiceDeployResources) mergeLastWinReservations(reservations *ServiceDeployResourcesLimits) { + switch { + case sdr.Reservations == nil && reservations != nil: + sdr.Reservations = reservations + case sdr.Reservations != nil && reservations == nil: + fallthrough + case sdr.Reservations == nil && reservations == nil: + return + default: + sdr.Reservations.MergeLastWin(reservations) + } +} + +func NewServiceDeployResources() *ServiceDeployResources { + return &ServiceDeployResources{ + Limits: new(ServiceDeployResourcesLimits), + Reservations: new(ServiceDeployResourcesLimits), + } +} + +type ServiceDeployResourcesLimits struct { + CPUs string `json:"cpus,omitempty" yaml:"cpus,omitempty"` + Memory string `json:"memory,omitempty" yaml:"memory,omitempty"` +} + +// Equal returns true if the passed equalable is equal +func (sdrl *ServiceDeployResourcesLimits) Equal(equalable Equalable) bool { + serviceDeployResourcesLimits, ok := equalable.(*ServiceDeployResourcesLimits) + if !ok { + return false + } + + switch { + case sdrl == nil && serviceDeployResourcesLimits == nil: + return true + case sdrl != nil && serviceDeployResourcesLimits == nil: + fallthrough + case sdrl == nil && serviceDeployResourcesLimits != nil: + return false + default: + return sdrl.CPUs == serviceDeployResourcesLimits.CPUs && + sdrl.Memory == serviceDeployResourcesLimits.Memory + } +} + +// MergeLastWin merges adds or overwrite the attributes of the passed +// serviceDeployResourcesLimits with the existing one. +func (sdrl *ServiceDeployResourcesLimits) MergeLastWin(serviceDeployResourcesLimits *ServiceDeployResourcesLimits) { + switch { + case sdrl == nil && serviceDeployResourcesLimits == nil: + fallthrough + case sdrl != nil && serviceDeployResourcesLimits == nil: + return + + // WARN: It's not possible to change the memory pointer sdrl *ServiceDeployResourcesLimits + // to a new initialized serviceDeployResourcesLimits without returning the + // serviceDeployResourcesLimits it self. + // + // case sdrl == nil && serviceDeployResourcesLimits != nil: + // sdrl = NewServiceDeployResourcesLimits() + // fallthrough + + default: + sdrl.mergeLastWinCPUs(serviceDeployResourcesLimits.CPUs) + sdrl.mergeLastWinMemory(serviceDeployResourcesLimits.Memory) + } +} + +func (sdrl *ServiceDeployResourcesLimits) mergeLastWinCPUs(cpus string) { + if sdrl.CPUs != cpus { + sdrl.CPUs = cpus + } +} + +func (sdrl *ServiceDeployResourcesLimits) mergeLastWinMemory(memory string) { + if sdrl.Memory != memory { + sdrl.Memory = memory + } +} + +func NewServiceDeployResourcesLimits() *ServiceDeployResourcesLimits { + return &ServiceDeployResourcesLimits{} +} + +type ServiceNetwork struct { + Aliases []string `json:"aliases,omitempty" yaml:"aliases,omitempty"` +} + +// Equal returns true if the passed equalable is equal +func (sn *ServiceNetwork) Equal(equalable Equalable) bool { + serviceNetwork, ok := equalable.(*ServiceNetwork) + if !ok { + return false + } + + switch { + case sn == nil && serviceNetwork == nil: + return true + case sn != nil && serviceNetwork == nil: + fallthrough + case sn == nil && serviceNetwork != nil: + return false + default: + return equalSlice(sn.Aliases, serviceNetwork.Aliases) + } +} + +// MergeLastWin merges adds or overwrite the attributes of the passed +// serviceNetwork with the existing one. +func (sn *ServiceNetwork) MergeLastWin(serviceNetwork *ServiceNetwork) { + switch { + case sn == nil && serviceNetwork == nil: + fallthrough + case sn != nil && serviceNetwork == nil: + return + + // WARN: It's not possible to change the memory pointer sn *ServiceNetwork to a new + // initialized ServiceNetwork without returning the serviceNetwork it self. + // + // case l == nil && serviceULimits != nil: + // l = NewServiceULimits() + // fallthrough + + case sn == nil && serviceNetwork != nil: + sn = NewServiceNetwork() + fallthrough + default: + sn.mergeLastWinAliases(serviceNetwork.Aliases) + } +} + +func (sn *ServiceNetwork) mergeLastWinAliases(aliases []string) { + for _, alias := range aliases { + if !existsInSlice(sn.Aliases, alias) { + sn.Aliases = append(sn.Aliases, alias) + } + } +} + +func NewServiceNetwork() *ServiceNetwork { + return &ServiceNetwork{ + Aliases: make([]string, 0), + } +} + +type ServiceULimits struct { + NProc uint `json:"nproc,omitempty" yaml:"nproc,omitempty"` + NoFile *ServiceULimitsNoFile `json:"nofile,omitempty" yaml:"nofile,omitempty"` +} + +// Equal returns true if the passed equalable is equal +func (l *ServiceULimits) Equal(equalable Equalable) bool { + serviceULimits, ok := equalable.(*ServiceULimits) + if !ok { + return false + } + + switch { + case l == nil && serviceULimits == nil: + return true + case l != nil && serviceULimits == nil: + fallthrough + case l == nil && serviceULimits != nil: + return false + default: + return l.NProc == serviceULimits.NProc && + l.NoFile.Equal(serviceULimits.NoFile) + } +} + +// MergeLastWin merges adds or overwrite the attributes of the passed +// ServiceULimits with the existing one. +func (l *ServiceULimits) MergeLastWin(serviceULimits *ServiceULimits) { + switch { + case l == nil && serviceULimits == nil: + fallthrough + case l != nil && serviceULimits == nil: + return + + // WARN: It's not possible to change the memory pointer l *ServiceULimits to a new + // initialized ServiceULimits without returning the serviceULimits it self. + // + // case l == nil && serviceULimits != nil: + // l = NewServiceULimits() + // fallthrough + + default: + l.mergeLastWinNProc(serviceULimits.NProc) + l.mergeLastWinNoFile(serviceULimits.NoFile) + } +} + +func (l *ServiceULimits) mergeLastWinNProc(nproc uint) { + if l.NProc != nproc { + l.NProc = nproc + } +} + +func (l *ServiceULimits) mergeLastWinNoFile(noFile *ServiceULimitsNoFile) { + if !l.NoFile.Equal(noFile) { + l.NoFile.MergeLastWin(noFile) + } +} + +func NewServiceULimits() *ServiceULimits { + return &ServiceULimits{ + NoFile: new(ServiceULimitsNoFile), + } +} + +type ServiceULimitsNoFile struct { + Hard uint `json:"hard" yaml:"hard"` + Soft uint `json:"soft" yaml:"soft"` +} + +// Equal returns true if the passed equalable is equal +func (nf *ServiceULimitsNoFile) Equal(equalable Equalable) bool { + serviceULimitsNoFile, ok := equalable.(*ServiceULimitsNoFile) + if !ok { + return false + } + + switch { + case nf == nil && serviceULimitsNoFile == nil: + return true + case nf != nil && serviceULimitsNoFile == nil: + fallthrough + case nf == nil && serviceULimitsNoFile != nil: + return false + default: + return nf.Hard == serviceULimitsNoFile.Hard && + nf.Soft == serviceULimitsNoFile.Soft + } +} + +// MergeLastWin merges adds or overwrite the attributes of the passed +// ServiceULimits with the existing one. +func (nf *ServiceULimitsNoFile) MergeLastWin(serviceULimitsNoFile *ServiceULimitsNoFile) { + switch { + case nf == nil && serviceULimitsNoFile == nil: + fallthrough + case nf != nil && serviceULimitsNoFile == nil: + return + + // WARN: It's not possible to change the memory pointer nf *ServiceULimitsNoFile + // to a new initialized ServiceULimitsNoFile without returning the serviceULimitsNoFile + // it self. + // + // case nf == nil && serviceULimitsNoFile != nil: + // nf = NewServiceULimitsNoFile() + // fallthrough + + default: + nf.mergeLastWinHard(serviceULimitsNoFile.Hard) + nf.mergeLastWinSoft(serviceULimitsNoFile.Soft) + } +} + +func (nf *ServiceULimitsNoFile) mergeLastWinHard(hard uint) { + if nf.Hard != hard { + nf.Hard = hard + } +} + +func (nf *ServiceULimitsNoFile) mergeLastWinSoft(soft uint) { + if nf.Soft != soft { + nf.Soft = soft + } +} + +func NewServiceULimitsNoFile() *ServiceULimitsNoFile { + return &ServiceULimitsNoFile{} +} + +type Volume struct { + External bool `json:"external,omitempty" yaml:"external,omitempty"` +} + +// Equal returns true if the passed equalable is equal +func (v *Volume) Equal(equalable Equalable) bool { + volume, ok := equalable.(*Volume) + if !ok { + return false + } + + switch { + case v == nil && volume == nil: + return true + case v != nil && volume == nil: + fallthrough + case v == nil && volume != nil: + return false + default: + return v.External == volume.External + } +} + +func (v *Volume) MergeLastWin(volume *Volume) { + switch { + case v == nil && volume == nil: + fallthrough + case v != nil && volume == nil: + return + + // WARN: It's not possible to change the memory pointer v *Volume to a new + // initialized Volume without returning the volume it self. + // + // case v == nil && volume != nil: + // v = NewVolume() + // fallthrough + + default: + v.mergeLastWinExternal(volume.External) + } +} + +func (v *Volume) mergeLastWinExternal(external bool) { + if v.External != external { + v.External = external + } +} + +func NewVolume() *Volume { + return &Volume{ + External: false, + } +} + +// existsInSlice returns true when the passed comparable K exists in slice of +// comparables []K. +func existsInSlice[K comparable](comparables []K, k K) bool { + for _, c := range comparables { + if c == k { + return true + } + } + return false +} + +func equalSlice[K comparable](sliceA []K, sliceB []K) bool { + equalFunc := func(sliceA []K, sliceB []K) bool { + LOOP: + for i := range sliceA { + for j := range sliceB { + if sliceA[i] == sliceB[j] { + continue LOOP + } + } + return false + } + return true + } + + return equalFunc(sliceA, sliceB) && equalFunc(sliceB, sliceA) +} + +func splitStringInKeyValue(s, sep string) (string, string) { + key := strings.Split(s, sep)[0] + value := strings.TrimPrefix(s, fmt.Sprintf("%s%s", key, sep)) + return key, value +} + +func splitStringInPort(s string) (string, string, string) { + parts := strings.Split(s, portDelimiter) + src := parts[0] + rest := parts[1] + + parts = strings.Split(rest, portProtocolDelimiter) + if len(parts) == 2 { + return src, parts[0], parts[1] + } + + return src, parts[0], "" +} + +func splitStringInVolume(s string) (string, string, string) { + parts := strings.Split(s, volumeDelimiter) + src := parts[0] + dest := parts[1] + if len(parts) == 3 && len(parts[2]) > 0 { + perm := parts[2] + return src, dest, perm + } + return src, dest, "" +} diff --git a/pkg/domain/dockerCompose/config_test.go b/pkg/domain/dockerCompose/config_test.go new file mode 100644 index 0000000..4fe949b --- /dev/null +++ b/pkg/domain/dockerCompose/config_test.go @@ -0,0 +1,2093 @@ +package dockerCompose_test + +import ( + "testing" + + "git.cryptic.systems/volker.raschek/dcmerge/pkg/domain/dockerCompose" + "github.com/stretchr/testify/require" +) + +func TestNetwork_Equal(t *testing.T) { + require := require.New(t) + + testCases := []struct { + equalableA dockerCompose.Equalable + equalableB dockerCompose.Equalable + expectedResult bool + }{ + { + equalableA: &dockerCompose.Network{ + External: true, + }, + equalableB: &dockerCompose.NetworkIPAM{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Network{ + External: true, + }, + equalableB: nil, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Network{ + External: false, + Driver: "bridge", + IPAM: nil, + }, + equalableB: &dockerCompose.Network{ + External: false, + Driver: "bridge", + IPAM: nil, + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Network{ + External: false, + Driver: "host", + IPAM: nil, + }, + equalableB: &dockerCompose.Network{ + External: false, + Driver: "bride", + IPAM: nil, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Network{ + External: true, + Driver: "bridge", + IPAM: nil, + }, + equalableB: &dockerCompose.Network{ + External: false, + Driver: "bridge", + IPAM: nil, + }, + expectedResult: false, + }, + } + + for i, testCase := range testCases { + require.Equal(testCase.expectedResult, testCase.equalableA.Equal(testCase.equalableB), "Failed test case %v", i) + } +} + +func TestNetworkIPAM_Equal(t *testing.T) { + require := require.New(t) + + testCases := []struct { + equalableA dockerCompose.Equalable + equalableB dockerCompose.Equalable + expectedResult bool + }{ + { + equalableA: &dockerCompose.NetworkIPAM{}, + equalableB: &dockerCompose.Service{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.NetworkIPAM{}, + equalableB: nil, + expectedResult: false, + }, + { + equalableA: &dockerCompose.NetworkIPAM{ + Configs: make([]*dockerCompose.NetworkIPAMConfig, 0), + }, + equalableB: &dockerCompose.NetworkIPAM{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.NetworkIPAM{ + Configs: make([]*dockerCompose.NetworkIPAMConfig, 0), + }, + equalableB: &dockerCompose.NetworkIPAM{ + Configs: make([]*dockerCompose.NetworkIPAMConfig, 0), + }, + expectedResult: true, + }, + } + + for i, testCase := range testCases { + require.Equal(testCase.expectedResult, testCase.equalableA.Equal(testCase.equalableB), "Failed test case %v", i) + } +} + +func TestNetworkIPAMConfig_Equal(t *testing.T) { + require := require.New(t) + + testCases := []struct { + equalableA dockerCompose.Equalable + equalableB dockerCompose.Equalable + expectedResult bool + }{ + { + equalableA: &dockerCompose.NetworkIPAMConfig{}, + equalableB: &dockerCompose.Service{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.NetworkIPAMConfig{}, + equalableB: nil, + expectedResult: false, + }, + { + equalableA: &dockerCompose.NetworkIPAMConfig{ + Subnet: "10.12.13.14/15", + }, + equalableB: &dockerCompose.NetworkIPAMConfig{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.NetworkIPAMConfig{ + Subnet: "10.12.13.14/15", + }, + equalableB: &dockerCompose.NetworkIPAMConfig{ + Subnet: "10.12.13.14/15", + }, + expectedResult: true, + }, + } + + for i, testCase := range testCases { + require.Equal(testCase.expectedResult, testCase.equalableA.Equal(testCase.equalableB), "Failed test case %v", i) + } +} + +func TestSecret_Equal(t *testing.T) { + require := require.New(t) + + testCases := []struct { + equalableA dockerCompose.Equalable + equalableB dockerCompose.Equalable + expectedResult bool + }{ + { + equalableA: &dockerCompose.Secret{}, + equalableB: &dockerCompose.Service{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Secret{}, + equalableB: nil, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Secret{ + File: "/var/run/docker/app/secret", + }, + equalableB: &dockerCompose.Secret{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Secret{ + File: "/var/run/docker/app/secret", + }, + equalableB: &dockerCompose.Secret{ + File: "/var/run/docker/app/secret", + }, + expectedResult: true, + }, + } + + for i, testCase := range testCases { + require.Equal(testCase.expectedResult, testCase.equalableA.Equal(testCase.equalableB), "Failed test case %v", i) + } +} + +func TestService_Equal(t *testing.T) { + require := require.New(t) + + testCases := []struct { + equalableA dockerCompose.Equalable + equalableB dockerCompose.Equalable + expectedResult bool + }{ + { + equalableA: &dockerCompose.Service{}, + equalableB: &dockerCompose.Secret{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{}, + equalableB: nil, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{}, + equalableB: &dockerCompose.Service{}, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + CapabilitiesAdd: []string{}, + CapabilitiesDrop: []string{}, + Deploy: nil, + Environments: []string{}, + ExtraHosts: []string{}, + Image: "", + Labels: []string{}, + Networks: map[string]*dockerCompose.ServiceNetwork{}, + Ports: []string{}, + Secrets: []string{}, + ULimits: nil, + Volumes: []string{}, + }, + equalableB: &dockerCompose.Service{ + CapabilitiesAdd: []string{}, + CapabilitiesDrop: []string{}, + Deploy: nil, + Environments: []string{}, + ExtraHosts: []string{}, + Image: "", + Labels: []string{}, + Networks: map[string]*dockerCompose.ServiceNetwork{}, + Ports: []string{}, + Secrets: []string{}, + ULimits: nil, + Volumes: []string{}, + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + CapabilitiesAdd: []string{"NET_ADMIN"}, + }, + equalableB: &dockerCompose.Service{ + CapabilitiesAdd: []string{"NET_ADMIN"}, + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + CapabilitiesAdd: []string{"NET_ADMIN"}, + }, + equalableB: &dockerCompose.Service{ + CapabilitiesAdd: []string{}, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{ + CapabilitiesDrop: []string{"NET_ADMIN"}, + }, + equalableB: &dockerCompose.Service{ + CapabilitiesDrop: []string{"NET_ADMIN"}, + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + CapabilitiesDrop: []string{"NET_ADMIN"}, + }, + equalableB: &dockerCompose.Service{ + CapabilitiesDrop: []string{}, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{ + Deploy: &dockerCompose.ServiceDeploy{}, + }, + equalableB: &dockerCompose.Service{ + Deploy: nil, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{ + Environments: []string{"PROXY_HOST=localhost.localdomain"}, + }, + equalableB: &dockerCompose.Service{ + Environments: []string{"PROXY_HOST=localhost.localdomain"}, + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + Environments: []string{"PROXY_HOST=localhost.localdomain"}, + }, + equalableB: &dockerCompose.Service{ + Environments: []string{"PROXY_HOST=localhost"}, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{ + Environments: []string{"PROXY_HOST=localhost.localdomain"}, + }, + equalableB: &dockerCompose.Service{ + Environments: []string{"PROXY_HOST=localdomain.localhost"}, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{ + ExtraHosts: []string{"my-app.u.orbis-healthcare.com"}, + }, + equalableB: &dockerCompose.Service{ + ExtraHosts: []string{"my-app.u.orbis-healthcare.com"}, + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + ExtraHosts: []string{"my-app.u.orbis-healthcare.com"}, + }, + equalableB: &dockerCompose.Service{ + ExtraHosts: []string{}, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{ + Image: "registry.example.local/my/app:latest", + }, + equalableB: &dockerCompose.Service{ + Image: "registry.example.local/my/app:latest", + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + Image: "registry.example.local/my/app:latest", + }, + equalableB: &dockerCompose.Service{ + Image: "", + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{ + Labels: []string{"keyA=valueA"}, + }, + equalableB: &dockerCompose.Service{ + Labels: []string{"keyA=valueA"}, + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + Labels: []string{"keyA=valueA", "keyA=valueB"}, + }, + equalableB: &dockerCompose.Service{ + Labels: []string{"keyA=valueA"}, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{ + Networks: make(map[string]*dockerCompose.ServiceNetwork), + }, + equalableB: &dockerCompose.Service{ + Networks: nil, + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + Networks: make(map[string]*dockerCompose.ServiceNetwork), + }, + equalableB: &dockerCompose.Service{ + Networks: make(map[string]*dockerCompose.ServiceNetwork), + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + Ports: []string{"80:80/tcp"}, + }, + equalableB: &dockerCompose.Service{ + Ports: []string{"80:80/tcp"}, + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + Ports: []string{"80:80/tcp"}, + }, + equalableB: &dockerCompose.Service{ + Ports: []string{"80:80/udp"}, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{ + Secrets: make([]string, 0), + }, + equalableB: &dockerCompose.Service{ + Secrets: make([]string, 0), + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + ULimits: dockerCompose.NewServiceULimits(), + }, + equalableB: &dockerCompose.Service{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{ + ULimits: dockerCompose.NewServiceULimits(), + }, + equalableB: &dockerCompose.Service{ + ULimits: dockerCompose.NewServiceULimits(), + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.Service{ + Volumes: []string{"/var/run/docker/volume/mountA"}, + }, + equalableB: &dockerCompose.Service{ + Volumes: []string{"/var/run/docker/volume/mountB"}, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Service{ + Volumes: []string{"/var/run/docker/volume/mountA"}, + }, + equalableB: &dockerCompose.Service{ + Volumes: []string{"/var/run/docker/volume/mountA"}, + }, + expectedResult: true, + }, + } + + for i, testCase := range testCases { + require.Equal(testCase.expectedResult, testCase.equalableA.Equal(testCase.equalableB), "Failed test case %v", i) + } +} + +func TestService_MergeLastWin(t *testing.T) { + require := require.New(t) + + testCases := []struct { + serviceDeploymentA *dockerCompose.Service + serviceDeploymentB *dockerCompose.Service + expectedService *dockerCompose.Service + }{ + { + serviceDeploymentA: nil, + serviceDeploymentB: nil, + expectedService: nil, + }, + { + serviceDeploymentA: &dockerCompose.Service{}, + serviceDeploymentB: &dockerCompose.Service{}, + expectedService: &dockerCompose.Service{}, + }, + + // CapabilitiesAdd + { + serviceDeploymentA: &dockerCompose.Service{ + CapabilitiesAdd: []string{"NET_RAW"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + CapabilitiesAdd: []string{}, + }, + expectedService: &dockerCompose.Service{ + CapabilitiesAdd: []string{"NET_RAW"}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + CapabilitiesAdd: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + CapabilitiesAdd: []string{"NET_RAW"}, + }, + expectedService: &dockerCompose.Service{ + CapabilitiesAdd: []string{"NET_RAW"}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + CapabilitiesAdd: []string{"NET_RAW"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + CapabilitiesAdd: []string{"NET_RAW"}, + }, + expectedService: &dockerCompose.Service{ + CapabilitiesAdd: []string{"NET_RAW"}, + }, + }, + + // CapabilitiesDrop + { + serviceDeploymentA: &dockerCompose.Service{ + CapabilitiesDrop: []string{"NET_RAW"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + CapabilitiesDrop: []string{}, + }, + expectedService: &dockerCompose.Service{ + CapabilitiesDrop: []string{"NET_RAW"}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + CapabilitiesDrop: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + CapabilitiesDrop: []string{"NET_RAW"}, + }, + expectedService: &dockerCompose.Service{ + CapabilitiesDrop: []string{"NET_RAW"}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + CapabilitiesDrop: []string{"NET_RAW"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + CapabilitiesDrop: []string{"NET_RAW"}, + }, + expectedService: &dockerCompose.Service{ + CapabilitiesDrop: []string{"NET_RAW"}, + }, + }, + + // Deploy + { + serviceDeploymentA: &dockerCompose.Service{ + Deploy: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Deploy: nil, + }, + expectedService: &dockerCompose.Service{ + Deploy: nil, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Deploy: dockerCompose.NewServiceDeploy(), + }, + serviceDeploymentB: &dockerCompose.Service{ + Deploy: nil, + }, + expectedService: &dockerCompose.Service{ + Deploy: dockerCompose.NewServiceDeploy(), + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Deploy: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Deploy: dockerCompose.NewServiceDeploy(), + }, + expectedService: &dockerCompose.Service{ + Deploy: dockerCompose.NewServiceDeploy(), + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Deploy: dockerCompose.NewServiceDeploy(), + }, + serviceDeploymentB: &dockerCompose.Service{ + Deploy: dockerCompose.NewServiceDeploy(), + }, + expectedService: &dockerCompose.Service{ + Deploy: dockerCompose.NewServiceDeploy(), + }, + }, + + // Environments + { + serviceDeploymentA: &dockerCompose.Service{ + Environments: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Environments: nil, + }, + expectedService: &dockerCompose.Service{ + Environments: nil, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Environments: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Environments: nil, + }, + expectedService: &dockerCompose.Service{ + Environments: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Environments: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Environments: []string{}, + }, + expectedService: &dockerCompose.Service{ + Environments: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Environments: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Environments: []string{}, + }, + expectedService: &dockerCompose.Service{ + Environments: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Environments: []string{"PROXY_HOST=u.example.com"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Environments: []string{"PROXY_HOST=u.example.local"}, + }, + expectedService: &dockerCompose.Service{ + Environments: []string{"PROXY_HOST=u.example.local"}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Environments: []string{"PROXY_HOST=u.example.com", "PROXY_HOST=u.example.de"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Environments: []string{"PROXY_HOST=u.example.local"}, + }, + expectedService: &dockerCompose.Service{ + Environments: []string{"PROXY_HOST=u.example.local"}, + }, + }, + + // ExtraHosts + { + serviceDeploymentA: &dockerCompose.Service{ + ExtraHosts: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + ExtraHosts: nil, + }, + expectedService: &dockerCompose.Service{ + ExtraHosts: nil, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + ExtraHosts: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + ExtraHosts: nil, + }, + expectedService: &dockerCompose.Service{ + ExtraHosts: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + ExtraHosts: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + ExtraHosts: []string{}, + }, + expectedService: &dockerCompose.Service{ + ExtraHosts: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + ExtraHosts: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + ExtraHosts: []string{}, + }, + expectedService: &dockerCompose.Service{ + ExtraHosts: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + ExtraHosts: []string{"extra.host.local"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + ExtraHosts: []string{"extra.host.local"}, + }, + expectedService: &dockerCompose.Service{ + ExtraHosts: []string{"extra.host.local"}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + ExtraHosts: []string{"extra.host.local"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + ExtraHosts: []string{"extra.host.com"}, + }, + expectedService: &dockerCompose.Service{ + ExtraHosts: []string{"extra.host.com", "extra.host.local"}, + }, + }, + + // Labels + { + serviceDeploymentA: &dockerCompose.Service{ + Labels: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Labels: nil, + }, + expectedService: &dockerCompose.Service{ + Labels: nil, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Labels: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Labels: nil, + }, + expectedService: &dockerCompose.Service{ + Labels: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Labels: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Labels: []string{}, + }, + expectedService: &dockerCompose.Service{ + Labels: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Labels: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Labels: []string{}, + }, + expectedService: &dockerCompose.Service{ + Labels: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Labels: []string{"prometheus.io/scrape=true"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Labels: []string{"prometheus.io/scrape=true"}, + }, + expectedService: &dockerCompose.Service{ + Labels: []string{"prometheus.io/scrape=true"}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Labels: []string{"prometheus.io/scrape=true", "prometheus.io/scrape=false"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Labels: []string{"prometheus.io/scrape=true"}, + }, + expectedService: &dockerCompose.Service{ + Labels: []string{"prometheus.io/scrape=true"}, + }, + }, + + // Networks + { + serviceDeploymentA: &dockerCompose.Service{ + Networks: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Networks: nil, + }, + expectedService: &dockerCompose.Service{ + Networks: nil, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Networks: make(map[string]*dockerCompose.ServiceNetwork), + }, + serviceDeploymentB: &dockerCompose.Service{ + Networks: nil, + }, + expectedService: &dockerCompose.Service{ + Networks: make(map[string]*dockerCompose.ServiceNetwork), + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Networks: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Networks: make(map[string]*dockerCompose.ServiceNetwork), + }, + expectedService: &dockerCompose.Service{ + Networks: make(map[string]*dockerCompose.ServiceNetwork), + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Networks: make(map[string]*dockerCompose.ServiceNetwork), + }, + serviceDeploymentB: &dockerCompose.Service{ + Networks: make(map[string]*dockerCompose.ServiceNetwork), + }, + expectedService: &dockerCompose.Service{ + Networks: make(map[string]*dockerCompose.ServiceNetwork), + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Networks: map[string]*dockerCompose.ServiceNetwork{ + "proxy": {Aliases: []string{"app.proxy.network"}}, + }, + }, + serviceDeploymentB: &dockerCompose.Service{ + Networks: nil, + }, + expectedService: &dockerCompose.Service{ + Networks: map[string]*dockerCompose.ServiceNetwork{ + "proxy": {Aliases: []string{"app.proxy.network"}}, + }, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Networks: map[string]*dockerCompose.ServiceNetwork{ + "proxy": {Aliases: []string{"app.proxy.network"}}, + }, + }, + serviceDeploymentB: &dockerCompose.Service{ + Networks: map[string]*dockerCompose.ServiceNetwork{ + "db": {Aliases: []string{"app.db.network"}}, + }, + }, + expectedService: &dockerCompose.Service{ + Networks: map[string]*dockerCompose.ServiceNetwork{ + "db": {Aliases: []string{"app.db.network"}}, + "proxy": {Aliases: []string{"app.proxy.network"}}, + }, + }, + }, + + // Ports + { + serviceDeploymentA: &dockerCompose.Service{ + Ports: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Ports: nil, + }, + expectedService: &dockerCompose.Service{ + Ports: nil, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Ports: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Ports: nil, + }, + expectedService: &dockerCompose.Service{ + Ports: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Ports: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Ports: []string{}, + }, + expectedService: &dockerCompose.Service{ + Ports: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Ports: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Ports: []string{}, + }, + expectedService: &dockerCompose.Service{ + Ports: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Ports: []string{"80:80"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Ports: []string{"80:80"}, + }, + expectedService: &dockerCompose.Service{ + Ports: []string{"80:80"}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Ports: []string{"80:80"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Ports: []string{"10080:80"}, + }, + expectedService: &dockerCompose.Service{ + Ports: []string{"10080:80"}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Ports: []string{"80:80/tcp"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Ports: []string{"80:80"}, + }, + expectedService: &dockerCompose.Service{ + Ports: []string{"80:80"}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Ports: []string{"80:80"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Ports: []string{"10080:80/udp"}, + }, + expectedService: &dockerCompose.Service{ + Ports: []string{"10080:80/udp"}, + }, + }, + + // Secrets + { + serviceDeploymentA: &dockerCompose.Service{ + Secrets: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Secrets: nil, + }, + expectedService: &dockerCompose.Service{ + Secrets: nil, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Secrets: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Secrets: nil, + }, + expectedService: &dockerCompose.Service{ + Secrets: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Secrets: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Secrets: []string{}, + }, + expectedService: &dockerCompose.Service{ + Secrets: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Secrets: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Secrets: []string{}, + }, + expectedService: &dockerCompose.Service{ + Secrets: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Secrets: []string{"db_pass_credentials"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Secrets: []string{"db_pass_credentials"}, + }, + expectedService: &dockerCompose.Service{ + Secrets: []string{"db_pass_credentials"}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Secrets: []string{"db_pass_credentials"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Secrets: []string{"oauth2_pass_credentials"}, + }, + expectedService: &dockerCompose.Service{ + Secrets: []string{"db_pass_credentials", "oauth2_pass_credentials"}, + }, + }, + + // ULimits + { + serviceDeploymentA: &dockerCompose.Service{ + ULimits: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + ULimits: nil, + }, + expectedService: &dockerCompose.Service{ + ULimits: nil, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + ULimits: dockerCompose.NewServiceULimits(), + }, + serviceDeploymentB: &dockerCompose.Service{ + ULimits: nil, + }, + expectedService: &dockerCompose.Service{ + ULimits: dockerCompose.NewServiceULimits(), + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + ULimits: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + ULimits: dockerCompose.NewServiceULimits(), + }, + expectedService: &dockerCompose.Service{ + ULimits: dockerCompose.NewServiceULimits(), + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + ULimits: dockerCompose.NewServiceULimits(), + }, + serviceDeploymentB: &dockerCompose.Service{ + ULimits: dockerCompose.NewServiceULimits(), + }, + expectedService: &dockerCompose.Service{ + ULimits: dockerCompose.NewServiceULimits(), + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + ULimits: &dockerCompose.ServiceULimits{ + NProc: 10, + NoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + }, + }, + serviceDeploymentB: &dockerCompose.Service{ + ULimits: &dockerCompose.ServiceULimits{ + NProc: 10, + NoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + }, + }, + expectedService: &dockerCompose.Service{ + ULimits: &dockerCompose.ServiceULimits{ + NProc: 10, + NoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + }, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + ULimits: &dockerCompose.ServiceULimits{ + NProc: 10, + NoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + }, + }, + serviceDeploymentB: &dockerCompose.Service{ + ULimits: &dockerCompose.ServiceULimits{ + NProc: 15, + NoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 25, + Soft: 20, + }, + }, + }, + expectedService: &dockerCompose.Service{ + ULimits: &dockerCompose.ServiceULimits{ + NProc: 15, + NoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 25, + Soft: 20, + }, + }, + }, + }, + + // Volumes + { + serviceDeploymentA: &dockerCompose.Service{ + Volumes: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Volumes: nil, + }, + expectedService: &dockerCompose.Service{ + Volumes: nil, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Volumes: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Volumes: nil, + }, + expectedService: &dockerCompose.Service{ + Volumes: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Volumes: nil, + }, + serviceDeploymentB: &dockerCompose.Service{ + Volumes: []string{}, + }, + expectedService: &dockerCompose.Service{ + Volumes: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Volumes: []string{}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Volumes: []string{}, + }, + expectedService: &dockerCompose.Service{ + Volumes: []string{}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Volumes: []string{"/etc/localtime:/etc/localtime"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Volumes: []string{"/etc/localtime:/etc/localtime"}, + }, + expectedService: &dockerCompose.Service{ + Volumes: []string{"/etc/localtime:/etc/localtime"}, + }, + }, + { + serviceDeploymentA: &dockerCompose.Service{ + Volumes: []string{"/etc/localtime:/etc/localtime"}, + }, + serviceDeploymentB: &dockerCompose.Service{ + Volumes: []string{"/usr/share/zoneinfo/Europe/Berlin:/etc/localtime"}, + }, + expectedService: &dockerCompose.Service{ + Volumes: []string{"/usr/share/zoneinfo/Europe/Berlin:/etc/localtime"}, + }, + }, + } + + for i, testCase := range testCases { + testCase.serviceDeploymentA.MergeLastWin(testCase.serviceDeploymentB) + require.True(testCase.expectedService.Equal(testCase.serviceDeploymentA), "Failed test case %v", i) + } +} + +func TestSecretDeploy_Equal(t *testing.T) { + require := require.New(t) + + testCases := []struct { + equalableA dockerCompose.Equalable + equalableB dockerCompose.Equalable + expectedResult bool + }{ + { + equalableA: &dockerCompose.ServiceDeploy{}, + equalableB: &dockerCompose.Service{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceDeploy{}, + equalableB: nil, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceDeploy{ + Resources: dockerCompose.NewServiceDeployResources(), + }, + equalableB: &dockerCompose.ServiceDeploy{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceDeploy{ + Resources: dockerCompose.NewServiceDeployResources(), + }, + equalableB: &dockerCompose.ServiceDeploy{ + Resources: dockerCompose.NewServiceDeployResources(), + }, + expectedResult: true, + }, + } + + for i, testCase := range testCases { + require.Equal(testCase.expectedResult, testCase.equalableA.Equal(testCase.equalableB), "Failed test case %v", i) + } +} + +func TestServiceDeploy_MergeLastWin(t *testing.T) { + require := require.New(t) + + testCases := []struct { + serviceDeploymentA *dockerCompose.ServiceDeploy + serviceDeploymentB *dockerCompose.ServiceDeploy + expectedServiceDeployment *dockerCompose.ServiceDeploy + }{ + { + serviceDeploymentA: nil, + serviceDeploymentB: nil, + expectedServiceDeployment: nil, + }, + { + serviceDeploymentA: &dockerCompose.ServiceDeploy{ + Resources: nil, + }, + serviceDeploymentB: &dockerCompose.ServiceDeploy{ + Resources: nil, + }, + expectedServiceDeployment: &dockerCompose.ServiceDeploy{ + Resources: nil, + }, + }, + { + serviceDeploymentA: &dockerCompose.ServiceDeploy{ + Resources: &dockerCompose.ServiceDeployResources{ + Limits: nil, + }, + }, + serviceDeploymentB: &dockerCompose.ServiceDeploy{ + Resources: nil, + }, + expectedServiceDeployment: &dockerCompose.ServiceDeploy{ + Resources: &dockerCompose.ServiceDeployResources{ + Limits: nil, + }, + }, + }, + { + serviceDeploymentA: &dockerCompose.ServiceDeploy{ + Resources: nil, + }, + serviceDeploymentB: &dockerCompose.ServiceDeploy{ + Resources: &dockerCompose.ServiceDeployResources{ + Limits: nil, + }, + }, + expectedServiceDeployment: &dockerCompose.ServiceDeploy{ + Resources: &dockerCompose.ServiceDeployResources{ + Limits: nil, + }, + }, + }, + } + + for i, testCase := range testCases { + testCase.serviceDeploymentA.MergeLastWin(testCase.serviceDeploymentB) + require.True(testCase.expectedServiceDeployment.Equal(testCase.serviceDeploymentA), "Failed test case %v", i) + } +} + +func TestSecretDeployResources_Equal(t *testing.T) { + require := require.New(t) + + testCases := []struct { + equalableA dockerCompose.Equalable + equalableB dockerCompose.Equalable + expectedResult bool + }{ + { + equalableA: &dockerCompose.ServiceDeployResources{}, + equalableB: &dockerCompose.Service{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceDeployResources{}, + equalableB: nil, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceDeployResources{ + Limits: dockerCompose.NewServiceDeployResourcesLimits(), + }, + equalableB: &dockerCompose.ServiceDeployResources{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceDeployResources{ + Limits: dockerCompose.NewServiceDeployResourcesLimits(), + }, + equalableB: &dockerCompose.ServiceDeployResources{ + Limits: dockerCompose.NewServiceDeployResourcesLimits(), + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.ServiceDeployResources{ + Reservations: dockerCompose.NewServiceDeployResourcesLimits(), + }, + equalableB: &dockerCompose.ServiceDeployResources{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceDeployResources{ + Reservations: dockerCompose.NewServiceDeployResourcesLimits(), + }, + equalableB: &dockerCompose.ServiceDeployResources{ + Reservations: dockerCompose.NewServiceDeployResourcesLimits(), + }, + expectedResult: true, + }, + } + + for i, testCase := range testCases { + require.Equal(testCase.expectedResult, testCase.equalableA.Equal(testCase.equalableB), "Failed test case %v", i) + } +} + +func TestServiceDeployResources_MergeLastWin(t *testing.T) { + require := require.New(t) + + testCases := []struct { + serviceDeploymentResourcesA *dockerCompose.ServiceDeployResources + serviceDeploymentResourcesB *dockerCompose.ServiceDeployResources + expectedServiceDeploymentResources *dockerCompose.ServiceDeployResources + }{ + { + serviceDeploymentResourcesA: nil, + serviceDeploymentResourcesB: nil, + expectedServiceDeploymentResources: nil, + }, + { + serviceDeploymentResourcesA: &dockerCompose.ServiceDeployResources{ + Limits: nil, + }, + serviceDeploymentResourcesB: &dockerCompose.ServiceDeployResources{ + Limits: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + }, + expectedServiceDeploymentResources: &dockerCompose.ServiceDeployResources{ + Limits: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + }, + }, + { + serviceDeploymentResourcesA: &dockerCompose.ServiceDeployResources{ + Limits: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + }, + serviceDeploymentResourcesB: &dockerCompose.ServiceDeployResources{ + Limits: nil, + }, + expectedServiceDeploymentResources: &dockerCompose.ServiceDeployResources{ + Limits: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + }, + }, + { + serviceDeploymentResourcesA: &dockerCompose.ServiceDeployResources{ + Limits: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + }, + serviceDeploymentResourcesB: &dockerCompose.ServiceDeployResources{ + Limits: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "2", + Memory: "1000", + }, + }, + expectedServiceDeploymentResources: &dockerCompose.ServiceDeployResources{ + Limits: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "2", + Memory: "1000", + }, + }, + }, + { + serviceDeploymentResourcesA: &dockerCompose.ServiceDeployResources{ + Reservations: nil, + }, + serviceDeploymentResourcesB: &dockerCompose.ServiceDeployResources{ + Reservations: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + }, + expectedServiceDeploymentResources: &dockerCompose.ServiceDeployResources{ + Reservations: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + }, + }, + { + serviceDeploymentResourcesA: &dockerCompose.ServiceDeployResources{ + Reservations: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + }, + serviceDeploymentResourcesB: &dockerCompose.ServiceDeployResources{ + Reservations: nil, + }, + expectedServiceDeploymentResources: &dockerCompose.ServiceDeployResources{ + Reservations: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + }, + }, + { + serviceDeploymentResourcesA: &dockerCompose.ServiceDeployResources{ + Reservations: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + }, + serviceDeploymentResourcesB: &dockerCompose.ServiceDeployResources{ + Reservations: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "2", + Memory: "1000", + }, + }, + expectedServiceDeploymentResources: &dockerCompose.ServiceDeployResources{ + Reservations: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "2", + Memory: "1000", + }, + }, + }, + } + + for i, testCase := range testCases { + testCase.serviceDeploymentResourcesA.MergeLastWin(testCase.serviceDeploymentResourcesB) + require.True(testCase.expectedServiceDeploymentResources.Equal(testCase.serviceDeploymentResourcesA), "Failed test case %v", i) + } +} + +func TestServiceDeployResourcesLimits_Equal(t *testing.T) { + require := require.New(t) + + testCases := []struct { + equalableA dockerCompose.Equalable + equalableB dockerCompose.Equalable + expectedResult bool + }{ + { + equalableA: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + equalableB: &dockerCompose.NetworkIPAM{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + equalableB: nil, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + equalableB: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + }, + equalableB: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "2", + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceDeployResourcesLimits{ + Memory: "500", + }, + equalableB: &dockerCompose.ServiceDeployResourcesLimits{ + Memory: "1000", + }, + expectedResult: false, + }, + } + + for i, testCase := range testCases { + require.Equal(testCase.expectedResult, testCase.equalableA.Equal(testCase.equalableB), "Failed test case %v", i) + } +} + +func TestServiceDeployResourcesLimits_MergeLastWin(t *testing.T) { + require := require.New(t) + + testCases := []struct { + serviceDeploymentResourcesLimitsA *dockerCompose.ServiceDeployResourcesLimits + serviceDeploymentResourcesLimitsB *dockerCompose.ServiceDeployResourcesLimits + expectedServiceDeploymentResourcesLimits *dockerCompose.ServiceDeployResourcesLimits + }{ + { + serviceDeploymentResourcesLimitsA: nil, + serviceDeploymentResourcesLimitsB: nil, + expectedServiceDeploymentResourcesLimits: nil, + }, + { + serviceDeploymentResourcesLimitsA: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + serviceDeploymentResourcesLimitsB: nil, + expectedServiceDeploymentResourcesLimits: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + }, + { + serviceDeploymentResourcesLimitsA: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + serviceDeploymentResourcesLimitsB: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + expectedServiceDeploymentResourcesLimits: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + Memory: "500", + }, + }, + { + serviceDeploymentResourcesLimitsA: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "1", + }, + serviceDeploymentResourcesLimitsB: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "2", + }, + expectedServiceDeploymentResourcesLimits: &dockerCompose.ServiceDeployResourcesLimits{ + CPUs: "2", + }, + }, + { + serviceDeploymentResourcesLimitsA: &dockerCompose.ServiceDeployResourcesLimits{ + Memory: "500", + }, + serviceDeploymentResourcesLimitsB: &dockerCompose.ServiceDeployResourcesLimits{ + Memory: "1000", + }, + expectedServiceDeploymentResourcesLimits: &dockerCompose.ServiceDeployResourcesLimits{ + Memory: "1000", + }, + }, + } + + for i, testCase := range testCases { + testCase.serviceDeploymentResourcesLimitsA.MergeLastWin(testCase.serviceDeploymentResourcesLimitsB) + require.True(testCase.expectedServiceDeploymentResourcesLimits.Equal(testCase.serviceDeploymentResourcesLimitsA), "Failed test case %v", i) + } +} + +func TestServiceNetwork_Equal(t *testing.T) { + require := require.New(t) + + testCases := []struct { + equalableA dockerCompose.Equalable + equalableB dockerCompose.Equalable + expectedResult bool + }{ + { + equalableA: &dockerCompose.ServiceNetwork{ + Aliases: []string{}, + }, + equalableB: &dockerCompose.NetworkIPAM{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceNetwork{ + Aliases: []string{}, + }, + equalableB: nil, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceNetwork{ + Aliases: []string{}, + }, + equalableB: &dockerCompose.ServiceNetwork{ + Aliases: []string{}, + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.ServiceNetwork{ + Aliases: []string{"HelloWorld"}, + }, + equalableB: &dockerCompose.ServiceNetwork{ + Aliases: []string{"HelloWorld"}, + }, + expectedResult: true, + }, + { + equalableA: &dockerCompose.ServiceNetwork{ + Aliases: []string{"HelloWorld"}, + }, + equalableB: &dockerCompose.ServiceNetwork{ + Aliases: []string{"FooBar"}, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceNetwork{ + Aliases: []string{"Hello", "World"}, + }, + equalableB: &dockerCompose.ServiceNetwork{ + Aliases: []string{"FooBar"}, + }, + expectedResult: false, + }, + } + + for i, testCase := range testCases { + require.Equal(testCase.expectedResult, testCase.equalableA.Equal(testCase.equalableB), "Failed test case %v", i) + } +} + +func TestServiceNetwork_MergeLastWin(t *testing.T) { + require := require.New(t) + + testCases := []struct { + ServiceNetworkA *dockerCompose.ServiceNetwork + ServiceNetworkB *dockerCompose.ServiceNetwork + expectedServiceNetwork *dockerCompose.ServiceNetwork + }{ + { + ServiceNetworkA: nil, + ServiceNetworkB: nil, + expectedServiceNetwork: nil, + }, + { + ServiceNetworkA: &dockerCompose.ServiceNetwork{}, + ServiceNetworkB: nil, + expectedServiceNetwork: &dockerCompose.ServiceNetwork{}, + }, + { + ServiceNetworkA: &dockerCompose.ServiceNetwork{ + Aliases: []string{"my-app.example.com"}, + }, + ServiceNetworkB: &dockerCompose.ServiceNetwork{ + Aliases: []string{"my-app.example.com"}, + }, + expectedServiceNetwork: &dockerCompose.ServiceNetwork{ + Aliases: []string{"my-app.example.com"}, + }, + }, + { + ServiceNetworkA: &dockerCompose.ServiceNetwork{ + Aliases: []string{"my-app.example.com"}, + }, + ServiceNetworkB: &dockerCompose.ServiceNetwork{ + Aliases: []string{"my-app.example.local"}, + }, + expectedServiceNetwork: &dockerCompose.ServiceNetwork{ + Aliases: []string{"my-app.example.com", "my-app.example.local"}, + }, + }, + } + + for i, testCase := range testCases { + testCase.ServiceNetworkA.MergeLastWin(testCase.ServiceNetworkB) + require.True(testCase.expectedServiceNetwork.Equal(testCase.ServiceNetworkA), "Failed test case %v", i) + } +} + +func TestServiceULimits_Equal(t *testing.T) { + require := require.New(t) + + testCases := []struct { + equalableA dockerCompose.Equalable + equalableB dockerCompose.Equalable + expectedResult bool + }{ + { + equalableA: &dockerCompose.ServiceULimits{}, + equalableB: &dockerCompose.NetworkIPAM{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceULimits{}, + equalableB: nil, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceULimits{ + NProc: 0, + NoFile: dockerCompose.NewServiceULimitsNoFile(), + }, + equalableB: &dockerCompose.ServiceULimits{ + NProc: 0, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceULimits{ + NProc: 0, + NoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + }, + }, + equalableB: &dockerCompose.ServiceULimits{ + NProc: 0, + NoFile: &dockerCompose.ServiceULimitsNoFile{ + Soft: 10, + }, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceULimits{ + NProc: 20, + NoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + }, + equalableB: &dockerCompose.ServiceULimits{ + NProc: 20, + NoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + }, + expectedResult: true, + }, + } + + for i, testCase := range testCases { + require.Equal(testCase.expectedResult, testCase.equalableA.Equal(testCase.equalableB), "Failed test case %v", i) + } +} + +func TestServiceULimits_MergeLastWin(t *testing.T) { + require := require.New(t) + + testCases := []struct { + ServiceULimitsA *dockerCompose.ServiceULimits + ServiceULimitsB *dockerCompose.ServiceULimits + expectedServiceULimits *dockerCompose.ServiceULimits + }{ + { + ServiceULimitsA: nil, + ServiceULimitsB: nil, + expectedServiceULimits: nil, + }, + { + ServiceULimitsA: &dockerCompose.ServiceULimits{}, + ServiceULimitsB: nil, + expectedServiceULimits: &dockerCompose.ServiceULimits{}, + }, + { + ServiceULimitsA: &dockerCompose.ServiceULimits{ + NProc: 10, + }, + ServiceULimitsB: &dockerCompose.ServiceULimits{ + NProc: 10, + }, + expectedServiceULimits: &dockerCompose.ServiceULimits{ + NProc: 10, + }, + }, + { + ServiceULimitsA: &dockerCompose.ServiceULimits{ + NProc: 10, + }, + ServiceULimitsB: &dockerCompose.ServiceULimits{ + NProc: 20, + }, + expectedServiceULimits: &dockerCompose.ServiceULimits{ + NProc: 20, + }, + }, + } + + for i, testCase := range testCases { + testCase.ServiceULimitsA.MergeLastWin(testCase.ServiceULimitsB) + require.True(testCase.expectedServiceULimits.Equal(testCase.ServiceULimitsA), "Failed test case %v", i) + } +} + +func TestServiceULimitsNoFile_Equal(t *testing.T) { + require := require.New(t) + + testCases := []struct { + equalableA dockerCompose.Equalable + equalableB dockerCompose.Equalable + expectedResult bool + }{ + { + equalableA: &dockerCompose.ServiceULimitsNoFile{}, + equalableB: &dockerCompose.NetworkIPAM{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceULimitsNoFile{}, + equalableB: nil, + expectedResult: false, + }, + { + equalableA: dockerCompose.NewServiceULimitsNoFile(), + equalableB: dockerCompose.NewServiceULimitsNoFile(), + expectedResult: true, + }, + { + equalableA: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + }, + equalableB: &dockerCompose.ServiceULimitsNoFile{ + Soft: 10, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + equalableB: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + expectedResult: true, + }, + } + + for i, testCase := range testCases { + require.Equal(testCase.expectedResult, testCase.equalableA.Equal(testCase.equalableB), "Failed test case %v", i) + } +} + +func TestServiceULimitsNoFile_MergeLastWin(t *testing.T) { + require := require.New(t) + + testCases := []struct { + ServiceULimitsNoFileA *dockerCompose.ServiceULimitsNoFile + ServiceULimitsNoFileB *dockerCompose.ServiceULimitsNoFile + expectedServiceULimitsNoFile *dockerCompose.ServiceULimitsNoFile + }{ + { + ServiceULimitsNoFileA: nil, + ServiceULimitsNoFileB: nil, + expectedServiceULimitsNoFile: nil, + }, + { + ServiceULimitsNoFileA: &dockerCompose.ServiceULimitsNoFile{}, + ServiceULimitsNoFileB: nil, + expectedServiceULimitsNoFile: &dockerCompose.ServiceULimitsNoFile{}, + }, + { + ServiceULimitsNoFileA: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + ServiceULimitsNoFileB: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + expectedServiceULimitsNoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + }, + { + ServiceULimitsNoFileA: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + ServiceULimitsNoFileB: &dockerCompose.ServiceULimitsNoFile{ + Hard: 20, + Soft: 10, + }, + expectedServiceULimitsNoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 20, + Soft: 10, + }, + }, + { + ServiceULimitsNoFileA: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + ServiceULimitsNoFileB: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 20, + }, + expectedServiceULimitsNoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 20, + }, + }, + { + ServiceULimitsNoFileA: &dockerCompose.ServiceULimitsNoFile{ + Hard: 10, + Soft: 10, + }, + ServiceULimitsNoFileB: &dockerCompose.ServiceULimitsNoFile{ + Hard: 20, + Soft: 20, + }, + expectedServiceULimitsNoFile: &dockerCompose.ServiceULimitsNoFile{ + Hard: 20, + Soft: 20, + }, + }, + } + + for i, testCase := range testCases { + testCase.ServiceULimitsNoFileA.MergeLastWin(testCase.ServiceULimitsNoFileB) + require.True(testCase.expectedServiceULimitsNoFile.Equal(testCase.ServiceULimitsNoFileA), "Failed test case %v", i) + } +} + +func TestVolume_Equal(t *testing.T) { + require := require.New(t) + + testCases := []struct { + equalableA dockerCompose.Equalable + equalableB dockerCompose.Equalable + expectedResult bool + }{ + { + equalableA: &dockerCompose.Volume{}, + equalableB: &dockerCompose.NetworkIPAM{}, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Volume{}, + equalableB: nil, + expectedResult: false, + }, + { + equalableA: dockerCompose.NewVolume(), + equalableB: dockerCompose.NewVolume(), + expectedResult: true, + }, + { + equalableA: &dockerCompose.Volume{ + External: true, + }, + equalableB: &dockerCompose.Volume{ + External: false, + }, + expectedResult: false, + }, + { + equalableA: &dockerCompose.Volume{ + External: true, + }, + equalableB: &dockerCompose.Volume{ + External: true, + }, + expectedResult: true, + }, + } + + for i, testCase := range testCases { + require.Equal(testCase.expectedResult, testCase.equalableA.Equal(testCase.equalableB), "Failed test case %v", i) + } +} + +func TestVolume_MergeLastWin(t *testing.T) { + require := require.New(t) + + testCases := []struct { + volumeA *dockerCompose.Volume + volumeB *dockerCompose.Volume + expectedVolume *dockerCompose.Volume + }{ + { + volumeA: nil, + volumeB: nil, + expectedVolume: nil, + }, + { + volumeA: &dockerCompose.Volume{}, + volumeB: nil, + expectedVolume: &dockerCompose.Volume{}, + }, + { + volumeA: &dockerCompose.Volume{ + External: true, + }, + volumeB: &dockerCompose.Volume{ + External: true, + }, + expectedVolume: &dockerCompose.Volume{ + External: true, + }, + }, + { + volumeA: &dockerCompose.Volume{ + External: true, + }, + volumeB: &dockerCompose.Volume{ + External: false, + }, + expectedVolume: &dockerCompose.Volume{ + External: false, + }, + }, + } + + for i, testCase := range testCases { + testCase.volumeA.MergeLastWin(testCase.volumeB) + require.True(testCase.expectedVolume.Equal(testCase.volumeA), "Failed test case %v", i) + } +} diff --git a/pkg/domain/dockerCompose/equalable.go b/pkg/domain/dockerCompose/equalable.go new file mode 100644 index 0000000..81fec27 --- /dev/null +++ b/pkg/domain/dockerCompose/equalable.go @@ -0,0 +1,65 @@ +package dockerCompose + +type Equalable interface { + Equal(equalable Equalable) bool +} + +// Contains returns true when sliceA is in sliceB. +func Contains[R Equalable](sliceA, sliceB []R) bool { + switch { + case sliceA == nil && sliceB == nil: + return true + case sliceA != nil && sliceB == nil: + return false + case sliceA == nil && sliceB != nil: + return false + default: + LOOP: + for i := range sliceA { + for j := range sliceB { + if sliceA[i].Equal(sliceB[j]) { + continue LOOP + } + } + return false + } + return true + } +} + +// Equal returns true when sliceA and sliceB are equal. +func Equal[R Equalable](sliceA, sliceB []R) bool { + return Contains(sliceA, sliceB) && + Contains(sliceB, sliceA) && + len(sliceA) == len(sliceB) +} + +// Equal returns true when booth string maps of Equalable are equal. +func EqualStringMap[R Equalable](mapA, mapB map[string]R) bool { + equalFunc := func(mapA, mapB map[string]R) bool { + LOOP: + for keyA, valueA := range mapA { + for keyB, valueB := range mapB { + if keyA == keyB && + valueA.Equal(valueB) { + continue LOOP + } + } + return false + } + return true + } + + return equalFunc(mapA, mapB) && equalFunc(mapB, mapA) +} + +// ExistsInMap returns true if object of type any exists under the passed name. +func ExistsInMap[T any](m map[string]T, name string) bool { + switch { + case m == nil: + return false + default: + _, present := m[name] + return present + } +} diff --git a/pkg/fetcher/fetcher.go b/pkg/fetcher/fetcher.go new file mode 100644 index 0000000..fd69792 --- /dev/null +++ b/pkg/fetcher/fetcher.go @@ -0,0 +1,99 @@ +package fetcher + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "os" + + "git.cryptic.systems/volker.raschek/dcmerge/pkg/domain/dockerCompose" + "gopkg.in/yaml.v3" +) + +func Fetch(urls ...string) ([]*dockerCompose.Config, error) { + dockerComposeConfigs := make([]*dockerCompose.Config, 0) + + for _, rawURL := range urls { + dockerComposeURL, err := url.Parse(rawURL) + if err != nil { + return nil, err + } + + switch { + case dockerComposeURL.Scheme == "http" || dockerComposeURL.Scheme == "https": + dockerComposeConfig, err := getDockerComposeViaHTTP(dockerComposeURL.String()) + if err != nil { + return nil, err + } + + dockerComposeConfigs = append(dockerComposeConfigs, dockerComposeConfig) + case dockerComposeURL.Scheme == "file": + fallthrough + default: + dockerComposeConfig, err := readDockerComposeFromFile(dockerComposeURL.Path) + if err != nil { + return nil, err + } + + dockerComposeConfigs = append(dockerComposeConfigs, dockerComposeConfig) + } + } + + return dockerComposeConfigs, nil +} + +var ErrorPathIsDir error = errors.New("Path is a directory") + +func getDockerComposeViaHTTP(url string) (*dockerCompose.Config, error) { + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("Received unexpected HTTP-Statuscode %v", resp.StatusCode) + } + + dockerCompose := dockerCompose.NewConfig() + + yamlDecoder := yaml.NewDecoder(resp.Body) + err = yamlDecoder.Decode(&dockerCompose) + if err != nil { + return nil, err + } + + return dockerCompose, nil +} + +func readDockerComposeFromFile(name string) (*dockerCompose.Config, error) { + fileStat, err := os.Stat(name) + switch { + case errors.Is(err, os.ErrNotExist): + return nil, err + case fileStat.IsDir(): + return nil, fmt.Errorf("%w: %s", ErrorPathIsDir, name) + } + + file, err := os.Open(name) + if err != nil { + return nil, err + } + defer file.Close() + + dockerCompose := dockerCompose.NewConfig() + + yamlDecoder := yaml.NewDecoder(file) + err = yamlDecoder.Decode(&dockerCompose) + if err != nil { + return nil, err + } + + return dockerCompose, nil +} diff --git a/renovate.json b/renovate.json new file mode 100644 index 0000000..be6138d --- /dev/null +++ b/renovate.json @@ -0,0 +1,31 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "assignees": [ "volker.raschek" ], + "automergeStrategy": "merge-commit", + "automergeType": "pr", + "labels": [ "renovate" ], + "packageRules": [ + { + "addLabels": [ "renovate/droneci", "renovate/automerge" ], + "automerge": true, + "matchManagers": "droneci", + "matchUpdateTypes": [ "minor", "patch"] + }, + { + "description": "Automatically update patch version of used container images in docker files", + "addLabels": [ "renovate/container-image", "renovate/automerge" ], + "automerge": true, + "matchBaseBranches": [ "master" ], + "matchManagers": [ "dockerfile" ], + "matchUpdateTypes": [ "patch" ] + }, + { + "addLabels": [ "renovate/dcmerge", "renovate/automerge" ], + "automerge": false, + "matchPackageNames": [ "dcmerge" ], + "matchManagers": [ "regex" ] + } + ], + "rebaseLabel": "renovate/rebase", + "rebaseWhen": "behind-base-branch" +}