diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 00000000..a3949ba5
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,9 @@
+/.git
+/.github
+/bin
+/build
+/docs
+/*.md
+.dockerignore
+.gitignore
+Dockerfile
diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
new file mode 100644
index 00000000..e0bc9712
--- /dev/null
+++ b/.github/workflows/go.yml
@@ -0,0 +1,30 @@
+name: dagger
+on: [push, pull_request]
+
+jobs:
+ lint:
+ name: lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-go@v4
+ with:
+ go-version: '1.21'
+ cache: false
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v3
+
+ build:
+ name: build
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-go@v3
+ with:
+ go-version: '1.21'
+ - name: Vendor go dependencies
+ run: go mod vendor
+ - name: Install Dagger CLI
+ run: cd /usr/local && { curl -L https://dl.dagger.io/dagger/install.sh | sh; cd -; }
+ - name: Run Dagger pipeline
+ run: dagger run go run build/main.go github Dockerfile "${GITHUB_REF}" ${{ vars.OSX_GITHUB_FLAGS }}
diff --git a/.gitignore b/.gitignore
index f50999f3..d484c874 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,2 @@
-/.twgit_features_subject
-/.twgit
+/bin
+/vendor
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 497391e3..00000000
--- a/.travis.yml
+++ /dev/null
@@ -1,115 +0,0 @@
-language: bash
-
-services:
- - docker
-env:
- global:
- - NAME="osixia/light-baseimage"
- - VERSION="${TRAVIS_BRANCH}-dev"
- matrix:
- - TARGET_ARCH=amd64 QEMU_ARCH=x86_64
- - TARGET_ARCH=i386 QEMU_ARCH=i386
- - TARGET_ARCH=arm32v7 QEMU_ARCH=arm
- - TARGET_ARCH=arm64v8 QEMU_ARCH=aarch64
-
-addons:
- apt:
- # The docker manifest command was added in docker-ee version 18.x
- # So update our current installation and we also have to enable the experimental features.
- sources:
- - sourceline: "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
- key_url: "https://download.docker.com/linux/ubuntu/gpg"
- packages:
- - docker-ce
-
-before_install:
- - docker --version
- - mkdir $HOME/.docker
- - 'echo "{" > $HOME/.docker/config.json'
- - 'echo " \"experimental\": \"enabled\"" >> $HOME/.docker/config.json'
- - 'echo "}" >> $HOME/.docker/config.json'
- - sudo service docker restart
- # To have `DOCKER_USER` and `DOCKER_PASS`
- # use `travis env set`.
- - echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin
-
-install:
- # For cross buidling our images
- # This is necessary because travis-ci.org has only x86_64 machines.
- # If travis-ci.org gets native arm builds, probably this step is not
- # necessary any more.
- - docker run --rm --privileged multiarch/qemu-user-static:register --reset
- # Bats is necessary for the UT
- - curl -o bats.tar.gz -SL https://github.com/bats-core/bats-core/archive/v1.1.0.tar.gz
- - mkdir bats-core && tar -xf bats.tar.gz -C bats-core --strip-components=1
- - cd bats-core/
- - sudo ./install.sh /usr/local
- - cd ..
-
-before_script:
- # Injecting the necessary information and binaries for cross-compiling the images.
- # In native builds this information and binaries are not necessary and that is why
- # we are injecting them in the build scripts and we do not include them in the Dockerfiles
- - if [[ "${TARGET_ARCH}" != 'amd64' ]]; then
- sed -i "s/FROM debian/FROM ${TARGET_ARCH}\/debian/" image/Dockerfile;
- fi
- - if [[ "${TARGET_ARCH}" != 'amd64' ]]; then
- sed -i "/${TARGET_ARCH}\/debian/a COPY \
- --from=multiarch/qemu-user-static:x86_64-${QEMU_ARCH} \
- /usr/bin/qemu-${QEMU_ARCH}-static /usr/bin/" image/Dockerfile;
- fi
- - cat image/Dockerfile;
- # If this is a tag then change the VERSION variable to only have the
- # tag name and not also the commit hash.
- - if [ -n "$TRAVIS_TAG" ]; then
- VERSION=$(echo "${TRAVIS_TAG}" | sed -e 's/\(.*\)[-v]\(.*\)/\1\2/g');
- fi
- - if [ "${TRAVIS_BRANCH}" == 'master' ]; then
- VERSION="stable";
- fi
- # replace / with - in version
- - VERSION=$(echo "${VERSION}" | sed 's|/|-|g');
-
-script:
- - make build-nocache NAME=${NAME} VERSION=${VERSION}-${TARGET_ARCH}
- # Run the test and if the test fails mark the build as failed.
- - make test NAME=${NAME} VERSION=${VERSION}-${TARGET_ARCH}
-
-before_deploy:
- - docker run -d --name test_image ${NAME}:${VERSION}-${TARGET_ARCH} sleep 10
- - sleep 5
- - sudo docker ps | grep -q test_image
- - make tag NAME=${NAME} VERSION=${VERSION}-${TARGET_ARCH}
-
-deploy:
- provider: script
- on:
- all_branches: true
- script: make push NAME=${NAME} VERSION=${VERSION}-${TARGET_ARCH}
-
-jobs:
- include:
- - stage: Manifest creation
- install: skip
- script: skip
- after_deploy:
- - echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin
- - docker manifest create ${NAME}:${VERSION} ${NAME}:${VERSION}-amd64 ${NAME}:${VERSION}-i386 ${NAME}:${VERSION}-arm32v7 ${NAME}:${VERSION}-arm64v8;
- docker manifest annotate ${NAME}:${VERSION} ${NAME}:${VERSION}-amd64 --os linux --arch amd64;
- docker manifest annotate ${NAME}:${VERSION} ${NAME}:${VERSION}-i386 --os linux --arch 386;
- docker manifest annotate ${NAME}:${VERSION} ${NAME}:${VERSION}-arm32v7 --os linux --arch arm --variant v7;
- docker manifest annotate ${NAME}:${VERSION} ${NAME}:${VERSION}-arm64v8 --os linux --arch arm64 --variant v8;
-
- # The latest tag is coming from the stable branch of the repo
- - if [ "${TRAVIS_BRANCH}" == 'master' ]; then
- docker manifest create ${NAME}:latest ${NAME}:${VERSION}-amd64 ${NAME}:${VERSION}-i386 ${NAME}:${VERSION}-arm32v7 ${NAME}:${VERSION}-arm64v8;
- docker manifest annotate ${NAME}:latest ${NAME}:${VERSION}-amd64 --os linux --arch amd64;
- docker manifest annotate ${NAME}:latest ${NAME}:${VERSION}-i386 --os linux --arch 386;
- docker manifest annotate ${NAME}:latest ${NAME}:${VERSION}-arm32v7 --os linux --arch arm --variant v7;
- docker manifest annotate ${NAME}:latest ${NAME}:${VERSION}-arm64v8 --os linux --arch arm64 --variant v8;
- fi
-
- - docker manifest push ${NAME}:${VERSION};
- if [ "${TRAVIS_BRANCH}" == 'master' ]; then
- docker manifest push ${NAME}:latest;
- fi
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0e22d147..ed01780c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,27 @@
# Changelog
+## [2.0.0] - Unreleased
+### Added
+ - light-baseimage can now be used by container with read-only filesystem
+ - light-baseimage containers can now be run with non root user
+ - envsubst-templates tool
+ - gettext-base package
+ - --pre-startup-cmd, --pre-process-cmd, --pre-finish-cmd and --pre-exit-cmd commands
+ - .env file support
+
+### Changed
+ - Use debian bullseye-slim as baseimage
+ - Rename environment variable KILL_PROCESS_TIMEOUT and KILL_ALL_PROCESSES_TIMEOUT to CONTAINER_KILL_PROCESS_TIMEOUT and CONTAINER_KILL_ALL_PROCESSES_TIMEOUT
+
+### Removed
+ - -c,--cmd command
+ - --wait-state and --wait-first-startup
+ - .yaml and .json environment file support in favor of .env files
+ - .startup.yaml and .startup.json environment files support (kubernetes initContainers can be used as replacement)
+ - complex-bash-env support and associated "#PYTHON2BASH:" and "#JSON2BASH:" tags for environment variables
+ - /container/run/environment directory and files, formally used to dump container environment
+ - ssl-tools and cfssl (a dedicated docker image can be used as replacement)
+
## [1.3.3] - 2021-03-13
### Changed
- Multiple log line message are now split and log line by line for a nicest display.
@@ -9,7 +31,7 @@
## [1.3.2] - 2021-02-18
### Fixed
- - Remove -x bash flag on log-helper tool
+ - Remove -x bash flag on container-logger tool
# [1.3.1] - 2021-01-24
### Fixed
@@ -118,7 +140,7 @@
- Rename my_init to run (delete previous run script)
- Add run tool option --copy-service that copy /container/service to /container/run/service on startup
- Add run tool option --loglevel (default : info) with possible values : none, error, warning, info, debug.
- - Add bash log-helper
+ - Add bash container-logger
### Changed
- Container environment config directory /etc/container_environment moved to /container/environment
@@ -129,7 +151,7 @@
- Container startup script directory /etc/my_init.d/ moved to /container/run/startup
- Container final startup script /etc/rc.local moved to /container/run/startup.sh
- Rename install-multiple-process-stack to add-multiple-process-stack
- - Rename install-service-available to add-service-available
+ - Rename install-services-available to add-service-available
### Removed
- ssl-helper ssl-helper-openssl and ssl-helper-gnutls
@@ -179,24 +201,24 @@
## 0.1.0 - 2015-07-23
Initial release
-[1.3.3]: https://github.com/osixia/docker-light-baseimage/compare/v1.3.2...v1.3.3
-[1.3.2]: https://github.com/osixia/docker-light-baseimage/compare/v1.3.1...v1.3.2
-[1.3.1]: https://github.com/osixia/docker-light-baseimage/compare/v1.3.0...v1.3.1
-[1.3.0]: https://github.com/osixia/docker-light-baseimage/compare/v1.2.0...v1.3.0
-[1.2.0]: https://github.com/osixia/docker-light-baseimage/compare/v1.1.2...v1.2.0
-[1.1.2]: https://github.com/osixia/docker-light-baseimage/compare/v1.1.1...v1.1.2
-[1.1.1]: https://github.com/osixia/docker-light-baseimage/compare/v1.1.0...v1.1.1
-[1.1.0]: https://github.com/osixia/docker-light-baseimage/compare/v1.0.0...v1.1.0
-[1.0.0]: https://github.com/osixia/docker-light-baseimage/compare/v0.2.2...v1.0.0
-[0.2.6]: https://github.com/osixia/docker-light-baseimage/compare/v0.2.5...v0.2.6
-[0.2.5]: https://github.com/osixia/docker-light-baseimage/compare/v0.2.4...v0.2.5
-[0.2.4]: https://github.com/osixia/docker-light-baseimage/compare/v0.2.3...v0.2.4
-[0.2.3]: https://github.com/osixia/docker-light-baseimage/compare/v0.2.2...v0.2.3
-[0.2.2]: https://github.com/osixia/docker-light-baseimage/compare/v0.2.1...v0.2.2
-[0.2.1]: https://github.com/osixia/docker-light-baseimage/compare/v0.2.0...v0.2.1
-[0.2.0]: https://github.com/osixia/docker-light-baseimage/compare/v0.1.5...v0.2.0
-[0.1.5]: https://github.com/osixia/docker-light-baseimage/compare/v0.1.4...v0.1.5
-[0.1.4]: https://github.com/osixia/docker-light-baseimage/compare/v0.1.3...v0.1.4
-[0.1.3]: https://github.com/osixia/docker-light-baseimage/compare/v0.1.2...v0.1.3
-[0.1.2]: https://github.com/osixia/docker-light-baseimage/compare/v0.1.1...v0.1.2
-[0.1.1]: https://github.com/osixia/docker-light-baseimage/compare/v0.1.0...v0.1.1
+[1.3.3]: https://github.com/osixia/container-baseimage/compare/v1.3.2...v1.3.3
+[1.3.2]: https://github.com/osixia/container-baseimage/compare/v1.3.1...v1.3.2
+[1.3.1]: https://github.com/osixia/container-baseimage/compare/v1.3.0...v1.3.1
+[1.3.0]: https://github.com/osixia/container-baseimage/compare/v1.2.0...v1.3.0
+[1.2.0]: https://github.com/osixia/container-baseimage/compare/v1.1.2...v1.2.0
+[1.1.2]: https://github.com/osixia/container-baseimage/compare/v1.1.1...v1.1.2
+[1.1.1]: https://github.com/osixia/container-baseimage/compare/v1.1.0...v1.1.1
+[1.1.0]: https://github.com/osixia/container-baseimage/compare/v1.0.0...v1.1.0
+[1.0.0]: https://github.com/osixia/container-baseimage/compare/v0.2.2...v1.0.0
+[0.2.6]: https://github.com/osixia/container-baseimage/compare/v0.2.5...v0.2.6
+[0.2.5]: https://github.com/osixia/container-baseimage/compare/v0.2.4...v0.2.5
+[0.2.4]: https://github.com/osixia/container-baseimage/compare/v0.2.3...v0.2.4
+[0.2.3]: https://github.com/osixia/container-baseimage/compare/v0.2.2...v0.2.3
+[0.2.2]: https://github.com/osixia/container-baseimage/compare/v0.2.1...v0.2.2
+[0.2.1]: https://github.com/osixia/container-baseimage/compare/v0.2.0...v0.2.1
+[0.2.0]: https://github.com/osixia/container-baseimage/compare/v0.1.5...v0.2.0
+[0.1.5]: https://github.com/osixia/container-baseimage/compare/v0.1.4...v0.1.5
+[0.1.4]: https://github.com/osixia/container-baseimage/compare/v0.1.3...v0.1.4
+[0.1.3]: https://github.com/osixia/container-baseimage/compare/v0.1.2...v0.1.3
+[0.1.2]: https://github.com/osixia/container-baseimage/compare/v0.1.1...v0.1.2
+[0.1.1]: https://github.com/osixia/container-baseimage/compare/v0.1.0...v0.1.1
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..3591a634
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,128 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+contact@osixia.net.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..0581cca9
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,44 @@
+# Note: this Dockerfile is actually used and extended by dagger to build all container images
+# see build directory
+ARG GOLANG_IMAGE="golang:1.21"
+ARG ROOT_IMAGE="debian:bookworm-slim"
+
+# step 1: build container-baseimage
+FROM ${GOLANG_IMAGE} AS build
+
+ARG BUILD_VERSION="develop"
+ARG BUILD_CONTRIBUTORS="🐒✨🌴"
+
+ARG BUILD_IMAGE_NAME="osixia/baseimage"
+ARG BUILD_IMAGE_TAG="develop"
+
+ARG GOARCH="amd64"
+
+ENV GOOS="linux" \
+ GOARCH="${GOARCH}" \
+ CGO_ENABLED=0
+
+RUN mkdir /build
+WORKDIR /build
+
+COPY . .
+
+RUN go build \
+ -ldflags="-w -s -X 'github.com/osixia/container-baseimage/config.BuildVersion=${BUILD_VERSION}' -X 'github.com/osixia/container-baseimage/config.BuildContributors=${BUILD_CONTRIBUTORS}' -X 'github.com/osixia/container-baseimage/config.BuildImageName=${BUILD_IMAGE_NAME}' -X 'github.com/osixia/container-baseimage/config.BuildImageTag=${BUILD_IMAGE_TAG}'" \
+ -o container-baseimage \
+ main.go
+
+# step 2: create image
+FROM ${ROOT_IMAGE}
+
+ARG BUILD_LOG_LEVEL="info"
+
+COPY --from=build /build/container-baseimage /usr/sbin/container-baseimage
+RUN container-baseimage install --log-level ${BUILD_LOG_LEVEL}
+
+ENV LANG="en_US.UTF-8" \
+ LANGUAGE="en_US:en" \
+ LC_ALL="en_US.UTF-8" \
+ LC_CTYPE="en_US.UTF-8"
+
+ENTRYPOINT ["/usr/sbin/container-baseimage", "entrypoint"]
diff --git a/LICENSE b/LICENSE.md
similarity index 95%
rename from LICENSE
rename to LICENSE.md
index 20efd1b3..0fc7b970 100644
--- a/LICENSE
+++ b/LICENSE.md
@@ -1,6 +1,6 @@
-The MIT License (MIT)
+MIT License
-Copyright (c) 2015
+Copyright (c) 2015 Osixia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-
diff --git a/Makefile b/Makefile
deleted file mode 100644
index ddf4f672..00000000
--- a/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-NAME = osixia/light-baseimage
-VERSION = 1.3.3
-
-.PHONY: build build-nocache test tag-latest push push-latest release git-tag-version
-
-build:
- docker build -f image/Dockerfile -t $(NAME):$(VERSION) --rm image
-
-build-nocache:
- docker build -f image/Dockerfile -t $(NAME):$(VERSION) --no-cache --rm image
-
-test:
- env NAME=$(NAME) VERSION=$(VERSION) bats test/test.bats
-
-tag:
- docker tag $(NAME):$(VERSION) $(NAME):$(VERSION)
-
-tag-latest:
- docker tag $(NAME):$(VERSION) $(NAME):latest
-
-push:
- docker push $(NAME):$(VERSION)
-
-push-latest:
- docker push $(NAME):latest
-
-release: build test tag-latest push push-latest
-
-git-tag-version: release
- git tag -a v$(VERSION) -m "v$(VERSION)"
- git push origin v$(VERSION)
diff --git a/README.md b/README.md
index afaebf3b..1ca3059e 100644
--- a/README.md
+++ b/README.md
@@ -1,867 +1,207 @@
-# osixia/light-baseimage
+# osixia/baseimage:2.0.0 🐳✨🌴
-[![Docker Pulls](https://img.shields.io/docker/pulls/osixia/light-baseimage.svg)][hub]
-[![Docker Stars](https://img.shields.io/docker/stars/osixia/light-baseimage.svg)][hub]
-[![](https://images.microbadger.com/badges/image/osixia/light-baseimage.svg)](http://microbadger.com/images/osixia/light-baseimage "Get your own image badge on microbadger.com")
+[docker hub]: https://hub.docker.com/r/osixia/light-baseimage
+[github]: https://github.com/osixia/container-baseimage
-[hub]: https://hub.docker.com/r/osixia/light-baseimage/
+[![Docker Pulls](https://img.shields.io/docker/pulls/osixia/light-baseimage.svg?style=flat-square)][docker hub]
+[![Docker Stars](https://img.shields.io/docker/stars/osixia/light-baseimage.svg?style=flat-square)][docker hub]
+[![GitHub Stars](https://img.shields.io/github/stars/osixia/container-baseimage?label=github%20stars&style=flat-square)][github]
+[![Contributors](https://img.shields.io/github/contributors/osixia/container-baseimage?style=flat-square)](https://github.com/osixia/container-baseimage/graphs/contributors)
-Latest release: 1.3.3 [Changelog](CHANGELOG.md)
- | [Docker Hub](https://hub.docker.com/r/osixia/light-baseimage/)
+Debian, Alpine and Ubuntu container base images to build reliable images quickly.
-A **Debian 10 (Buster)** based docker image to build reliable image quickly. This image provide a simple opinionated solution to build multiple or single process image with minimum of layers and an optimized build.
+**This image provide a simple opinionated solution to build single or multiprocess container images with minimum of layers and an optimized build.**
-The aims of this image is to be used as a base for your own Docker images. It's base on the awesome work of: [phusion/baseimage-docker](https://github.com/phusion/baseimage-docker)
+It helps speeding up image development and CI/CD pipelines by providing:
-Other base distribution are available:
-- [Alpine](https://github.com/osixia/docker-light-baseimage/tree/alpine) | [Docker Hub](https://hub.docker.com/r/osixia/alpine-light-baseimage/) | [![](https://images.microbadger.com/badges/image/osixia/alpine-light-baseimage.svg)](http://microbadger.com/images/osixia/alpine-light-baseimage "Get your own image badge on microbadger.com")
+ - Greats building tools to minimize the image number of layers and make best use of image cache.
+ - A nice init process as image entrypoint that add helpfull extensions and options to fastly run and debug containers.
+ - A simple way to create multiprocess images.
+ Run either all the processes together in a single container or execute them one by one with multiple containers.
-Table of Contents
-- [osixia/light-baseimage](#osixialight-baseimage)
- - [Contributing](#contributing)
- - [Overview](#overview)
- - [Quick Start](#quick-start)
- - [Image directories structure](#image-directories-structure)
- - [Service directory structure](#service-directory-structure)
- - [Create a single process image](#create-a-single-process-image)
- - [Overview](#overview-1)
- - [Dockerfile](#dockerfile)
- - [Service files](#service-files)
- - [install.sh](#installsh)
- - [startup.sh](#startupsh)
- - [process.sh](#processsh)
- - [Environment files](#environment-files)
- - [default.yaml](#defaultyaml)
- - [default.startup.yaml](#defaultstartupyaml)
- - [Build and test](#build-and-test)
- - [Overriding default environment files at run time:](#overriding-default-environment-files-at-run-time)
- - [Create a multiple process image](#create-a-multiple-process-image)
- - [Overview](#overview-2)
- - [Dockerfile](#dockerfile-1)
- - [Service files](#service-files-1)
- - [install.sh](#installsh)
- - [process.sh](#processsh-1)
- - [Build and test](#build-and-test-1)
- - [Images Based On Light-Baseimage](#images-based-on-light-baseimage)
- - [Image Assets](#image-assets)
- - [Tools](#tools)
- - [Services available](#services-available)
- - [Advanced User Guide](#advanced-user-guide)
- - [Service available](#service-available)
- - [Fix docker mounted file problems](#fix-docker-mounted-file-problems)
- - [Distribution packages documentation and locales](#distribution-packages-documentation-and-locales)
- - [Mastering image tools](#mastering-image-tools)
- - [run](#run)
- - [Run command line options](#run-command-line-options)
- - [Run directory setup](#run-directory-setup)
- - [Startup files environment setup](#startup-files-environment-setup)
- - [Startup files execution](#startup-files-execution)
- - [Process execution](#process-execution)
- - [Single process image](#single-process-image)
- - [Multiple process image](#multiple-process-image)
- - [No process image](#no-process-image)
- - [Extra environment variables](#extra-environment-variables)
- - [log-helper](#log-helper)
- - [complex-bash-env](#complex-bash-env)
- - [Tests](#tests)
- - [Changelog](#changelog)
-
-## Contributing
-
-If you find this image useful here's how you can help:
-
-- Send a pull request with your kickass new features and bug fixes
-- Help new users with [issues](https://github.com/osixia/docker-openldap/issues) they may encounter
-- Support the development of this image and star this repo!
-
-## Overview
-
-This image takes all the advantages of [phusion/baseimage-docker](https://github.com/phusion/baseimage-docker) but makes programs optional which allow more lightweight images and single process images. It also define simple directory structure and files to quickly set how a program (here called service) is installed, setup and run.
-
-So major features are:
- - Greats building tools to minimize the image number of layers and optimize image build.
- - Simple way to install services and multiple process image stacks (runit, cron, syslog-ng-core and logrotate) if needed.
- - Getting environment variables from **.yaml** and **.json** files.
- - Special environment files **.startup.yaml** and **.startup.json** deleted after image startup files first execution to keep the image setup secret.
-
-
-## Quick Start
-
-### Image directories structure
-
-This image use four directories:
-
-- **/container/environment**: for environment files.
-- **/container/service**: for services to install, setup and run.
-- **/container/service-available**: for service that may be on demand downloaded, installed, setup and run.
-- **/container/tool**: for image tools.
-
-By the way at run time another directory is created:
-- **/container/run**: To store container run environment, state, startup files and process to run based on files in /container/environment and /container/service directories.
-
-But this will be dealt with in the following section.
-
-### Service directory structure
-
-This section define a service directory that can be added in /container/service or /container/service-available.
-
-- **my-service**: root directory
-- **my-service/install.sh**: install script (not mandatory).
-- **my-service/startup.sh**: startup script to setup the service when the container start (not mandatory).
-- **my-service/process.sh**: process to run (not mandatory).
-- **my-service/finish.sh**: finish script run when the process script exit (not mandatory).
-- **my-service/...** add whatever you need!
-
-Ok that's pretty all to know to start building our first images!
-
-### Create a single process image
-
-#### Overview
-For this example we are going to perform a basic nginx install.
-
-See complete example in: [example/single-process-image](example/single-process-image)
-
-First we create the directory structure of the image:
-
- - **single-process-image**: root directory
- - **single-process-image/service**: directory to store the nginx service.
- - **single-process-image/environment**: environment files directory.
- - **single-process-image/Dockerfile**: the Dockerfile to build this image.
-
-**service** and **environment** directories name are arbitrary and can be changed but make sure to adapt their name everywhere and especially in the Dockerfile.
-
-Let's now create the nginx service directory:
-
- - **single-process-image/service/nginx**: service root directory
- - **single-process-image/service/nginx/install.sh**: service installation script.
- - **single-process-image/service/nginx/startup.sh**: startup script to setup the service when the container start.
- - **single-process-image/service/nginx/process.sh**: process to run.
-
-
-#### Dockerfile
-
-In the Dockerfile we are going to:
- - Download nginx from apt-get.
- - Add the service directory to the image.
- - Install service and clean up.
- - Add the environment directory to the image.
- - Define ports exposed and volumes if needed.
-
-
- # Use osixia/light-baseimage
- # https://github.com/osixia/docker-light-baseimage
- FROM osixia/light-baseimage:1.3.3
-
- # Download nginx from apt-get and clean apt-get files
- RUN apt-get -y update \
- && LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- nginx \
- && apt-get clean \
- && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
-
- # Add service directory to /container/service
- ADD service /container/service
-
- # Use baseimage install-service script
- # https://github.com/osixia/docker-light-baseimage/blob/stable/image/tool/install-service
- RUN /container/tool/install-service
-
- # Add default env directory
- ADD environment /container/environment/99-default
-
- # Set /var/www/ in a data volume
- VOLUME /var/www/
-
- # Expose default http and https ports
- EXPOSE 80 443
-
-
-The Dockerfile contains directives to download nginx from apt-get but all the initial setup will take place in install.sh file (called by /container/tool/install-service tool) for a better build experience. The time consuming download task is decoupled from the initial setup to make great use of docker build cache. If install.sh file is changed the builder won't have to download again nginx, and will just run install scripts.
-
-#### Service files
-
-##### install.sh
-
-This file must only contain directives for the service initial setup. Files download and apt-get command takes place in the Dockerfile for a better image building experience (see [Dockerfile](#dockerfile)).
-
-In this example, for the initial setup we just delete the default nginx debian index file and create a custom index.html:
-
- #!/bin/bash -e
- # this script is run during the image build
-
- rm -rf /var/www/html/index.nginx-debian.html
- echo "Hi!" > /var/www/html/index.html
-
-Make sure install.sh can be executed (chmod +x install.sh).
-
-Note: The install.sh script is run during the docker build so run time environment variables can't be used to customize the setup. This is done in the startup.sh file.
-
-
-##### startup.sh
-
-This file is used to make process.sh ready to be run and customize the service setup based on run time environment.
-
-For example at run time we would like to introduce ourselves so we will use an environment variable WHO_AM_I set by command line with --env. So we add WHO_AM_I value to index.html file but we want to do that only on the first container start because on restart the index.html file will already contains our name:
-
- #!/bin/bash -e
- FIRST_START_DONE="${CONTAINER_STATE_DIR}/nginx-first-start-done"
-
- # container first start
- if [ ! -e "$FIRST_START_DONE" ]; then
- echo "I'm ${WHO_AM_I}." >> /var/www/html/index.html
- touch $FIRST_START_DONE
- fi
-
- exit 0
-
-Make sure startup.sh can be executed (chmod +x startup.sh).
-
-As you can see we use CONTAINER_STATE_DIR variable, it contains the directory where container state is saved, this variable is automatically set by run tool. Refer to the [Advanced User Guide](#extra-environment-variables) for more information.
-
-##### process.sh
-
-This file define the command to run:
-
- #!/bin/bash -e
- exec /usr/sbin/nginx -g "daemon off;"
-
-Make sure process.sh can be executed (chmod +x process.sh).
-
-*Caution: The command executed must start a foreground process otherwise the container will immediately stops.*
-
-That why we run nginx with `-g "daemon off;"`
-
-That's it we have a single process image that run nginx!
-We could already build and test this image but two more minutes to take advantage of environment files!
-
-#### Environment files
-
-Let's create two files:
- - single-process-image/environment/default.yaml
- - single-process-image/environment/default.startup.yaml
-
-File name *default*.yaml and *default*.startup.yaml can be changed as you want. Also in this example we are going to use yaml files but json files works too.
-
-##### default.yaml
-default.yaml file define variables that can be used at any time in the container environment:
-
- WHO_AM_I: We are Anonymous. We are Legion. We do not forgive. We do not forget. Expect us.
-
-##### default.startup.yaml
-default.startup.yaml define variables that are only available during the container **first start** in **startup files**.
-\*.startup.yaml are deleted right after startup files are processed for the first time,
-then all variables they contains will not be available in the container environment.
-
-This helps to keep the container configuration secret. If you don't care all environment variables can be defined in **default.yaml** and everything will work fine.
-
-But for this tutorial we will add a variable to this file:
-
- FIRST_START_SETUP_ONLY_SECRET: The database password is KawaaahBounga
-
-And try to get its value in **startup.sh** script:
-
- #!/bin/bash -e
- FIRST_START_DONE="${CONTAINER_STATE_DIR}/nginx-first-start-done"
-
- # container first start
- if [ ! -e "$FIRST_START_DONE" ]; then
- echo ${WHO_AM_I} >> /var/www/html/index.html
- touch $FIRST_START_DONE
- fi
-
- echo "The secret is: $FIRST_START_SETUP_ONLY_SECRET"
-
- exit 0
-
-And in **process.sh** script:
-
- #!/bin/bash -e
- echo "The secret is: $FIRST_START_SETUP_ONLY_SECRET"
- exec /usr/sbin/nginx -g "daemon off;"
-
-Ok it's time for the show!
-
-#### Build and test
-
-Build the image:
-
- docker build -t example/single-process --rm .
-
-Start a new container:
-
- docker run -p 8080:80 example/single-process
-
-Inspect the output and you should see that the secret is present in startup script:
-> \*\*\* Running /container/run/startup/nginx...
-
-> The secret is: The database password is Baw0unga!
-
-And the secret is not defined in the process:
-> \*\*\* Remove file /container/environment/99-default/default.startup.yaml [...]
-
-> \*\*\* Running /container/run/process/nginx/run...
-
-> The secret is:
-
-Yes in this case it's not really useful to have a secret variable like this, but a concrete example can be found in [osixia/openldap](https://github.com/osixia/docker-openldap) image.
-The admin password is available in clear text during the container first start to create a new ldap database where it is saved encrypted. After that the admin password is not available in clear text in the container environment.
-
-Ok let's check our name now, go to [http://localhost:8080/](http://localhost:8080/)
-
-You should see:
-> Hi! We are Anonymous. We are Legion. We do not forgive. We do not forget. Expect us.
-
-And finally, let's say who we really are, stop the previous container (ctrl+c or ctrl+d) and start a new one:
-
- docker run --env WHO_AM_I="I'm Jon Snow, what?! i'm dead?" \
- -p 8080:80 example/single-process
-
-Refresh [http://localhost:8080/](http://localhost:8080/) and you should see:
-> Hi! I'm Jon Snow, what?! i'm dead?
-
-
-##### Overriding default environment files at run time:
-Let's create two new environment files:
- - single-process-image/test-custom-env/env.yaml
- - single-process-image/test-custom-env/env.startup.yaml
-
-env.yaml:
-
- WHO_AM_I: I'm bobby.
-
-env.startup.yaml:
-
- FIRST_START_SETUP_ONLY_SECRET: The database password is KawaaahB0unga!!!
-
-And we mount them at run time:
-
- docker run --volume $PWD/test-custom-env:/container/environment/01-custom \
- -p 8080:80 example/single-process
-
-Take care to link your environment files folder to `/container/environment/XX-somedir` (with XX < 99 so they will be processed before default environment files) and not directly to `/container/environment` because this directory contains predefined baseimage environment files to fix container environment (INITRD, LANG, LANGUAGE and LC_CTYPE).
-
-In the output:
-> \*\*\* Running /container/run/startup/nginx...
-
-> The secret is: The database password is KawaaahB0unga!!!
-
-Refresh [http://localhost:8080/](http://localhost:8080/) and you should see:
-> Hi! I'm bobby.
-
-### Create a multiple process image
-
-#### Overview
-
-This example takes back the single process image example and add php7.0-fpm to run php scripts.
-
-See complete example in: [example/multiple-process-image](example/multiple-process-image)
-
-Note: it would have been ♪ ~~harder~~, faster, better, stronger ♪ to extends the previous image but to make things easier we just copied files.
-
-So here the image directory structure:
-
- - **multiple-process-image**: root directory
- - **multiple-process-image/service**: directory to store the nginx and php7.0-fpm service.
- - **multiple-process-image/environment**: environment files directory.
- - **multiple-process-image/Dockerfile**: the Dockerfile to build this image.
-
-**service** and **environment** directories name are arbitrary and can be changed but make sure to adapt their name in the Dockerfile.
-
-Let's now create the nginx and php directories:
-
- - **multiple-process-image/service/nginx**: nginx root directory
- - **multiple-process-image/service/nginx/install.sh**: service installation script.
- - **multiple-process-image/service/nginx/startup.sh**: startup script to setup the service when the container start.
- - **multiple-process-image/service/nginx/process.sh**: process to run.
-
- - **multiple-process-image/service/php**: php root directory
- - **multiple-process-image/service/php/install.sh**: service installation script.
- - **multiple-process-image/service/php/process.sh**: process to run.
- - **multiple-process-image/service/php/config/default**: default nginx server config with
-
-#### Dockerfile
-
-In the Dockerfile we are going to:
- - Add the multiple process stack
- - Download nginx and php7.0-fpm from apt-get.
- - Add the service directory to the image.
- - Install service and clean up.
- - Add the environment directory to the image.
- - Define ports exposed and volumes if needed.
-
-
- # Use osixia/light-baseimage
- # https://github.com/osixia/docker-light-baseimage
- FROM osixia/light-baseimage:1.3.3
-
- # Install multiple process stack, nginx and php7.0-fpm and clean apt-get files
- # https://github.com/osixia/docker-light-baseimage/blob/stable/image/tool/add-multiple-process-stack
- RUN apt-get -y update \
- && /container/tool/add-multiple-process-stack \
- && LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- nginx \
- php7.0-fpm \
- && apt-get clean \
- && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
-
- # Add service directory to /container/service
- ADD service /container/service
-
- # Use baseimage install-service script
- # https://github.com/osixia/docker-light-baseimage/blob/stable/image/tool/install-service
- RUN /container/tool/install-service
-
- # Add default env directory
- ADD environment /container/environment/99-default
-
- # Set /var/www/ in a data volume
- VOLUME /var/www/
-
- # Expose default http and https ports
- EXPOSE 80 443
-
-
-The Dockerfile contains directives to download nginx and php7.0-fpm from apt-get but all the initial setup will take place in install.sh file (called by /container/tool/install-service tool) for a better build experience. The time consuming download task is decoupled from the initial setup to make great use of docker build cache. If an install.sh file is changed the builder will not have to download again nginx and php7.0-fpm add will just run install scripts.
-
-Maybe you already read that in the previous example ?Sorry.
-
-#### Service files
+Compatible with nonroot and read-only containers.
-Please refer to [single process image](#create-a-single-process-image) for the nginx service files description. Here just php service files are described.
-
-##### install.sh
-
-This file must only contains directives for the service initial setup. Files download and apt-get command takes place in the Dockerfile for a better image building experience (see [Dockerfile](#dockerfile-1) ).
-
-In this example, for the initial setup we set some php default configuration, replace the default nginx server config and add phpinfo.php file:
-
- #!/bin/bash -e
- # this script is run during the image build
-
- # config
- sed -i -e "s/expose_php = On/expose_php = Off/g" /etc/php/7.0/fpm/php.ini
- sed -i -e "s/;cgi.fix_pathinfo=1/cgi.fix_pathinfo=0/g" /etc/php/7.0/fpm/php.ini
- sed -i -e "s/;listen.owner = www-data/listen.owner = www-data/g" /etc/php/7.0/fpm/php.ini
- sed -i -e "s/;listen.group = www-data/listen.group = www-data/g" /etc/php/7.0/fpm/php.ini
-
- # create php socket directory
- mkdir -p /run/php
-
- # replace default website with php service default website
- cp -f /container/service/php/config/default /etc/nginx/sites-available/default
-
- # create phpinfo.php
- echo " /var/www/html/phpinfo.php
-
-
-
-Make sure install.sh can be executed (chmod +x install.sh).
-
-##### process.sh
-
-This file define the command to run:
-
- #!/bin/bash -e
- exec /usr/sbin/php-fpm7.0 --nodaemonize
-
-Make sure process.sh can be executed (chmod +x process.sh).
-
-*Caution: The command executed must start a foreground process otherwise runit (use to supervise multiple process images) will keep restarting php-fpm7.0.*
-
-That why we run php with `--nodaemonize"`
-
-##### config/default
-nginx server configuration:
-
- server {
- listen 80 default_server;
- listen [::]:80 default_server;
-
- root /var/www/html;
-
- # Add index.php to the list if you are using PHP
- index index.html index.htm index.nginx-debian.html;
-
- server_name _;
-
- location / {
- # First attempt to serve request as file, then
- # as directory, then fall back to displaying a 404.
- try_files $uri $uri/ =404;
- }
-
- location ~ \.php$ {
- fastcgi_split_path_info ^(.+\.php)(/.+)$;
- # With php fpm:
- fastcgi_pass unix:/run/php/php7.0-fpm.sock;
- fastcgi_index index.php;
- include fastcgi_params;
- include fastcgi.conf;
- }
- }
-
-That's it we have a multiple process image that run nginx and php!
-
-#### Build and test
-
-
-Build the image:
-
- docker build -t example/multiple-process --rm .
-
-Start a new container:
-
- docker run -p 8080:80 example/multiple-process
-
-Go to [http://localhost:8080/phpinfo.php](http://localhost:8080/phpinfo.php)
-
-> phpinfo should be printed
-
-So we have a container with two process supervised by runit running in our container!
-
-
-## Images Based On Light-Baseimage
-
-Single process images:
-- [osixia/openldap](https://github.com/osixia/docker-openldap)
-- [osixia/keepalived](https://github.com/osixia/docker-keepalived)
-- [osixia/tinc](https://github.com/osixia/docker-tinc)
-- [osixia/registry-ldap-auth](https://github.com/osixia/docker-registry-ldap-auth)
-- [osixia/cfssl-multirootca](https://github.com/osixia/docker-cfssl-multirootca)
-- [osixia/backup](https://github.com/osixia/docker-backup)
-- [osixia/backup-manager](https://github.com/osixia/docker-backup-manager)
-- [osixia/mmc-agent](https://github.com/osixia/docker-mmc-agent)
-
-Multiple process images:
-- [osixia/openldap-backup](https://github.com/osixia/docker-openldap-backup)
-- [osixia/mariadb](https://github.com/osixia/docker-mariadb)
-- [osixia/wordpress](https://github.com/osixia/docker-wordpress)
-- [osixia/roundcube](https://github.com/osixia/docker-roundcube)
-- [osixia/piwik](https://github.com/osixia/docker-piwik)
-- [osixia/phpMyAdmin](https://github.com/osixia/docker-phpMyAdmin)
-- [osixia/phpLDAPadmin](https://github.com/osixia/docker-phpLDAPadmin)
-- [osixia/keepalived-confd](https://github.com/osixia/docker-keepalived-confd)
-- [osixia/tinc-etcd](https://github.com/osixia/docker-tinc-etcd)
-- [osixia/postfix-gateway-confd](https://github.com/osixia/docker-postfix-gateway-confd)
-- [osixia/mmc-mail](https://github.com/osixia/docker-mmc-mail)
-- [osixia/mmc-web](https://github.com/osixia/docker-mmc-web)
-
-Image adding light-baseimage tools to an existing image
-- [osixia/gitlab](https://github.com/osixia/docker-gitlab)
-
-Send me a message to add your image in this list.
-
-## Image Assets
-
-### Tools
-
-All container tools are available in `/container/tool` directory and are linked in `/sbin/` so they belong to the container PATH.
-
-
-| Filename | Description |
-| ---------------- | ------------------- |
-| run | The run tool is defined as the image ENTRYPOINT (see [Dockerfile](image/Dockerfile)). It set environment and run startup scripts and images process. More information in the [Advanced User Guide](#run). |
-| setuser | A tool for running a command as another user. Easier to use than su, has a smaller attack vector than sudo, and unlike chpst this tool sets $HOME correctly.|
-| log-helper | A simple bash tool to print message base on the log level. |
-| add-service-available | A tool to download and add services in service-available directory to the regular service directory. |
-| add-multiple-process-stack | A tool to add the multiple process stack: runit, cron syslog-ng-core and logrotate. |
-| install-service | A tool that execute /container/service/install.sh and /container/service/\*/install.sh scripts. |
-| complex-bash-env | A tool to iterate trough complex bash environment variables created by the run tool when a table or a list was set in environment files or in environment command line argument. |
-
-### Services available
-
-| Name | Description |
-| ---------------- | ------------------- |
-| :runit | Replaces Debian's Upstart. Used for service supervision and management. Much easier to use than SysV init and supports restarting daemons when they crash. Much easier to use and more lightweight than Upstart.
*This service is part of the multiple-process-stack.*|
-| :cron | Cron daemon.
*This service is part of the multiple-process-stack.*|
-| :syslog-ng-core | Syslog daemon so that many services - including the kernel itself - can correctly log to /var/log/syslog. If no syslog daemon is running, a lot of important messages are silently swallowed.
Only listens locally. All syslog messages are forwarded to "docker logs".
*This service is part of the multiple-process-stack.* |
-| :logrotate | Rotates and compresses logs on a regular basis.
*This service is part of the multiple-process-stack.*|
-| :ssl-tools | Add CFSSL a CloudFlare PKI/TLS swiss army knife. It's a command line tool for signing, verifying, and bundling TLS certificates. Comes with cfssl-helper tool that make it docker friendly by taking command line parameters from environment variables.
Also add jsonssl-helper to get certificates from json files, parameters are set by environment variables. |
-
-
-## Advanced User Guide
-
-### Service available
-
-A service-available is basically a normal service expect that it is in the `service-available` directory and have a `download.sh` file.
-
-To add a service-available to the current image use the `add-service-available` tool. It will process the download.sh file of services given in argument and move them to the regular service directory (/container/service).
-
-After that the service-available will be process like regular services.
-
-Here simple Dockerfile example how to add a service-available to an image:
-
- # Use osixia/light-baseimage
- # https://github.com/osixia/docker-light-baseimage
- FROM osixia/light-baseimage:1.3.3
-
- # Add cfssl and cron service-available
- # https://github.com/osixia/docker-light-baseimage/blob/stable/image/tool/add-service-available
- # https://github.com/osixia/docker-light-baseimage/blob/stable/image/service-available/:ssl-tools/download.sh
- # https://github.com/osixia/docker-light-baseimage/blob/stable/image/service-available/:cron/download.sh
- RUN apt-get -y update \
- && /container/tool/add-service-available :ssl-tools :cron \
- && LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- nginx \
- php7.0-fpm
- ...
-
-
-Note: Most of predefined service available start with a `:` to make sure they are installed before regular services (so they can be used by regular services). The install-service tool process services in /container/service in alphabetical order.
-
-To create a service-available just create a regular service, add a download.sh file to set how the needed content is downloaded and add it to /container/service-available directory. The download.sh script is not mandatory if nothing need to be downloaded.
-
-For example a simple image example that add service-available to this baseimage: [osixia/web-baseimage](https://github.com/osixia/docker-web-baseimage)
-
-
-### Fix docker mounted file problems
-
-For some reasons you will probably have to mount custom files to your container. For example in the *mutliple process image example* you can customise the nginx config by mounting your custom config to "/container/service/php/config/default" :
-
- docker run -v /data/my-nginx-config:/container/service/php/config/default example/multiple-process
-
-In this case every thing should work fine, but if the startup script makes some `sed` replacement or change file owner and permissions this can results in "Device or resource busy" error. See [Docker documentation](https://docs.docker.com/v1.4/userguide/dockervolumes/#mount-a-host-file-as-a-data-volume).
-
- sed -i "s|listen 80|listen 8080|g" /container/service/php/config/default
-
-To prevent that king of error light-baseimage provide *--copy-service* command argument :
-
- docker run -v /data/my-nginx-config:/container/service/php/config/default example/multiple-process --copy-service
-
-On startup this will copy all /container/service directory to /container/run/service.
-
-
-At run time you can get the container service directory with `CONTAINER_SERVICE_DIR` environment variable.
-If *--copy-service* is used *CONTAINER_SERVICE_DIR=/container/run/service* otherwise *CONTAINER_SERVICE_DIR=/container/service*
-
-So to always apply sed on the correct file in the startup script the command becomes :
-
- sed -i "s|listen 80|listen 8080|g" ${CONTAINER_SERVICE_DIR}/php/config/default
-
-
-### Distribution packages documentation and locales
-
-This image has a configuration to prevent documentation and locales to be installed from base distribution packages repositories to make it more lightweight as possible. If you need the doc and locales remove the following files :
-**/etc/dpkg/dpkg.cfg.d/01_nodoc** and **/etc/dpkg/dpkg.cfg.d/01_nolocales**
-
-
-### Mastering image tools
-
-#### run
-
-The *run tool* is defined as the image ENTRYPOINT (see [Dockerfile](image/Dockerfile)). It's the core tool of this image.
-
-What it does:
-- Setup the run directory
-- Set the startup files environment
-- Run startup files
-- Set process environment
-- Run process
-
-##### Run command line options
-
-*Run tool* takes several options, to list them:
-
- docker run osixia/light-baseimage:1.3.3 --help
- usage: run [-h] [-e] [-s] [-p] [-f] [-o {startup,process,finish}]
- [-c COMMAND [WHEN={startup,process,finish} ...]] [-k]
- [--wait-state FILENAME] [--wait-first-startup] [--keep-startup-env]
- [--copy-service] [--dont-touch-etc-hosts] [--keepalive]
- [--keepalive-force] [-l {none,error,warning,info,debug,trace}]
- [MAIN_COMMAND [MAIN_COMMAND ...]]
-
- Initialize the system.
-
- positional arguments:
- MAIN_COMMAND The main command to run, leave empty to only run
- container process.
-
- optional arguments:
- -h, --help show this help message and exit
- -e, --skip-env-files Skip getting environment values from environment
- file(s).
- -s, --skip-startup-files
- Skip running /container/run/startup/* and
- /container/run/startup.sh file(s).
- -p, --skip-process-files
- Skip running container process file(s).
- -f, --skip-finish-files
- Skip running container finish file(s).
- -o {startup,process,finish}, --run-only {startup,process,finish}
- Run only this file type and ignore others.
- -c COMMAND [WHEN={startup,process,finish} ...], --cmd COMMAND [WHEN={startup,process,finish} ...]
- Run this command before WHEN file(s). Default before
- startup file(s).
- -k, --no-kill-all-on-exit
- Don't kill all processes on the system upon exiting.
- --wait-state FILENAME
- Wait until the container state file exists in
- /container/run/state directory before starting.
- Usefull when 2 containers share /container/run
- directory via volume.
- --wait-first-startup Wait until the first startup is done before starting.
- Usefull when 2 containers share /container/run
- directory via volume.
- --keep-startup-env Don't remove ('.startup.yaml', '.startup.json')
- environment files after startup scripts.
- --copy-service Copy /container/service to /container/run/service.
- Help to fix docker mounted files problems.
- --dont-touch-etc-hosts
- Don't add in /etc/hosts a line with the container ip
- and $HOSTNAME environment variable value.
- --keepalive Keep alive container if all startup files and process
- exited without error.
- --keepalive-force Keep alive container in all circonstancies.
- -l {none,error,warning,info,debug,trace}, --loglevel {none,error,warning,info,debug,trace}
- Log level (default: info)
-
- Osixia! Light Baseimage: https://github.com/osixia/docker-light-baseimage
-
-
-##### Run directory setup
-*Run tool* will create if they not exists the following directories:
- - /container/run/state
- - /container/run/environment
- - /container/run/startup
- - /container/run/process
- - /container/run/service
-
-At the container first start it will search in /container/service or /container/run/service (if --copy-service option is used) all image's services.
-
-In a service directory for example /container/service/my-service:
- - If a startup.sh file is found, the file is linked to /container/run/startup/my-service
- - If a process.sh file is found, the file is linked to /container/run/process/my-service/run
-
-##### Startup files environment setup
-*Run tool* takes all file in /container/environment/* and import the variables values to the container environment.
-The container environment is then exported to /container/run/environment and in /container/run/environment.sh
-
-##### Startup files execution
-*Run tool* iterate trough /container/run/startup/* directory in alphabetical order and run scripts.
-After each time *run tool* runs a startup script, it resets its own environment variables to the state in /container/run/environment, and re-dumps the new environment variables to /container/run/environment.sh
-
-After all startup script *run tool* run /container/run/startup.sh if exists.
-
-##### Process environment setup
-*Run tool* delete all .startup.yaml and .startup.json in /container/environment/* and clear the previous run environment (/container/run/environment is removed)
-Then it takes all remaining file in /container/environment/* and import the variables values to the container environment.
-The container environment is then exported to /container/run/environment and in /container/run/environment.sh
-
-##### Process execution
-
-###### Single process image
-
-*Run tool* execute the unique /container/run/process/service-name/run file.
-
-If a main command is set for example:
-
- docker run -it osixia/openldap:1.4.0 bash
-
-*Run tool* will execute the single process and the main command. If the main command exits the container exits. This is useful to debug or image development purpose.
-
-###### Multiple process image
-
-In a multiple process image *run tool* execute runit witch supervise /container/run/process directory and start all services automatically. Runit will also relaunched them if they failed.
-
-If a main command is set for example:
-
- docker run -it osixia/phpldapadmin:0.9.0 bash
-
-*run tool* will execute runit and the main command. If the main command exits the container exits. This is still useful to debug or image development purpose.
-
-###### No process image
-If a main command is set *run tool* launch it otherwise bash is launched.
-Example:
-
- docker run -it osixia/light-baseimage:1.3.3
-
-
-##### Extra environment variables
-
-*run tool* add 3 variables to the container environment:
-- **CONTAINER_STATE_DIR**: /container/run/state
-- **CONTAINER_SERVICE_DIR**: the container service directory. By default: /container/service but if the container is started with --copy-service option: /container/run/service
-- **CONTAINER_LOG_LEVEL**: log level set by --loglevel option defaults to: 3 (info)
-
-#### log-helper
-This tool is a simple utility based on the CONTAINER_LOG_LEVEL variable to print leveled log messages.
-
-For example if the log level is info:
-
- log-helper info hello
-
-will echo:
-> hello
-
- log-helper debug i'm bob
-
-will echo nothing.
-
-log-helper support piped input:
-
- echo "Heyyyyy" | log-helper info
-
-> Heyyyyy
-
-Log message functions usage: `log-helper error|warning|info|debug|trace message`
-
-You can also test the log level with the level function:
-
- log-helper level eq info && echo "log level is infos"
-
-for example this will echo "log level is trace" if log level is trace.
+Table of Contents
+- [osixia/baseimage:2.0.0 🐳✨🌴](#osixiabaseimage200-)
+ - [⚡ Quick Start](#-quick-start)
+ - [🗂 Entrypoint Options](#-entrypoint-options)
+ - [🍹 First Image In 2 Minutes](#-first-image-in-2-minutes)
+ - [📄 Documentation](#-documentation)
+ - [♥ Contributing](#-contributing)
+ - [🔓 License](#-license)
+ - [💥 Changelog](#-changelog)
-Level `function usage: log-helper level eq|ne|gt|ge|lt|le none|error|warning|info|debug|trace`
-Help: [http://www.tldp.org/LDP/abs/html/comparison-ops.html](http://www.tldp.org/LDP/abs/html/comparison-ops.html)
+## ⚡ Quick Start
-#### complex-bash-env
-With light-baseimage you can set bash environment variable from .yaml and .json files.
-But bash environment variables can't store complex objects such as table that can be defined in yaml or json files, that's why they are converted to "complex bash environment variables" and complex-bash-env tool help getting those variables values easily.
+Run the following command to generate a sample Dockerfile and start building an image based on osixia/baseimage:
-For example the following yaml file:
+```
+# Debian
+docker run --rm osixia/baseimage generate dockerfile --print
+```
- FRUITS:
- - orange
- - apple
+```
+# Alpine
+docker run --rm osixia/baseimage:alpine generate dockerfile --print
+```
+
+```
+# Ubuntu
+docker run --rm osixia/baseimage:ubuntu generate dockerfile --print
+```
+
+Next step: check out a fully functionnal [image example](#-first-single-process-image-in-2-minutes).
+
+## 🗂 Entrypoint Options
+
+```
+docker run --rm osixia/baseimage --help
+```
+
+```
+ / _ \ ___(_)_ _(_) __ _ / / __ ) __ _ ___ ___(_)_ __ ___ __ _ __ _ ___
+| | | / __| \ \/ / |/ _` | / /| _ \ / _` / __|/ _ \ | '_ ` _ \ / _` |/ _` |/ _
+| |_| \__ \ |> <| | (_| |/ / | |_) | (_| \__ \ __/ | | | | | | (_| | (_| | __/
+ \___/|___/_/_/\_\_|\__,_/_/ |____/ \__,_|___/\___|_|_| |_| |_|\__,_|\__, |\___|
+ |___/
+Container image built with osixia/baseimage (develop) 🐳✨🌴
+https://github.com/osixia/container-baseimage
+
+Usage:
+ container-baseimage entrypoint [flags]
+ container-baseimage entrypoint [command]
+
+Aliases:
+ entrypoint, ep
+
+Available Commands:
+ generate Generate sample templates
+ container Container image information
+ thanks List container-baseimage contributors
+
+Flags:
+ -e, --skip-env-files skip getting environment variables values from environment file(s)
+
+ -s, --skip-startup skip running pre-startup-cmd and service(s) startup.sh script(s)
+ -p, --skip-process skip running pre-process-cmd and service(s) process.sh script(s)
+ -f, --skip-finish skip running pre-finish-cmd and service(s) finish.sh script(s)
+ -c, --run-only-lifecycle-step string run only one lifecycle step pre-command and script(s) file(s), choices: startup, process, finish
+
+ -1, --pre-startup-cmd stringArray run command passed as argument before service(s) startup.sh script(s)
+ -3, --pre-process-cmd stringArray run command passed as argument before service(s) process.sh script(s)
+ -5, --pre-finish-cmd stringArray run command passed as argument before service(s) finish.sh script(s)
+ -7, --pre-exit-cmd stringArray run command passed as argument before container exits
+
+ -x, --exec stringArray execute only listed service(s) (default run service(s) linked to the entrypoint)
+
+ -b, --bash run Bash along with other service(s) or command
+
+ -k, --kill-all-on-exit kill all processes on the system upon exiting (send sigterm to all processes) (default true)
+ -t, --kill-all-on-exit-timeout duration kill all processes timeout (send sigkill to all processes after sigterm timeout has been reached) (default 15s)
+ -r, --restart automatically restart failed services process.sh scripts (single process: default false, multiprocess: default true)
+ -a, --keep-alive keep alive container after all processes have exited
+
+ -w, --unsecure-fast-write disable fsync and friends with eatmydata LD_PRELOAD library
+
+ -d, --debug set log level to debug and install debug packages
+ -i, --install-packages strings install packages
+
+ -v, --version print container image version
+
+ -l, --log-level string set log level, choices: none, error, warning, info, debug, trace (default "info")
+ -o, --log-format string set log format, choices: console, json (default "console")
+ -h, --help help for entrypoint
+
+Use "container-baseimage entrypoint [command] --help" for more information about a command.
+```
+
+## 🍹 First Image In 2 Minutes
+Generate image templates in the **osixia-baseimage-example** directory
+
+```
+mkdir osixia-baseimage-example
+```
+
+```
+docker run --rm --user $UID --volume $(pwd)/osixia-baseimage-example:/run/container/generator \
+osixia/baseimage generate bootstrap
+```
-will produce this bash environment variables:
+Note: add `--multiprocess` to get a multiprocess image sample.
- FRUITS=#COMPLEX_BASH_ENV:TABLE: FRUITS_ROW_1 FRUITS_ROW_2
- FRUITS_ROW_1=orange
- FRUITS_ROW_2=apple
+List generated directories and files in **osixia-baseimage-example** directory
+```
+tree -a osixia-baseimage-example
+```
-(this is done by run tool)
+```
+osixia-baseimage-example
+├── Dockerfile
+├── environment
+│ ├── .env
+│ └── README.md
+└── services
+ └── service-1
+ ├── finish.sh
+ ├── install.sh
+ ├── .priority
+ ├── process.sh
+ ├── README.md
+ └── startup.sh
+```
-complex-bash-env make it easy to iterate trough this variable:
+Build the image **example/my-image:develop** using files in the **osixia-baseimage-example** directory
+```
+docker build --tag example/my-image:develop ./osixia-baseimage-example
+```
+
+Run **example/my-image:develop** image
+```
+docker run example/my-image:develop
+```
+
+```
+2024-04-15T16:23:36Z INFO Container image: osixia/example-baseimage:latest
+2024-04-15T16:23:36Z INFO Loading environment variables from /container/environment/.env ...
+2024-04-15T16:23:36Z INFO Running /container/services/service-1/startup.sh ...
+service-1: Doing some container first start setup ...
+service-1: Doing some others container start setup ...
+service-1: EXAMPLE_ENV_VAR=Hello :) ...
+2024-04-15T16:23:36Z INFO Running /container/services/service-1/process.sh ...
+service-1: Just going to sleep for 7 seconds ...
+2024-04-15T16:23:43Z INFO Running /container/services/service-1/finish.sh ...
+service-1: process ended ...
+2024-04-15T16:23:43Z INFO Exiting ...
+```
- for fruit in $(complex-bash-env iterate FRUITS)
- do
- echo ${!fruit}
- done
+That's it you have a single process image based on osixia/baseimage.
-A more complete example can be found [osixia/phpLDAPadmin](https://github.com/osixia/docker-phpLDAPadmin) image.
+Next steps:
+- [Get familiar with generated files]().
+- [Customize Dockerfile and service scripts]().
+- [Set the container image name to "example/my-image:develop" instead of "osixia/example-baseimage:latest"]().
+- [Review image entrypoint options to fastly run and debug containers]().
-Note this yaml definition:
+## 📄 Documentation
- FRUITS:
- - orange
- - apple
+⚠ 2.0.0 release is out. Check the [v1 to v2 migration guide](https://opensource.osixia.net/projects/container-images/baseimage/migration-guide-v1-v2/).
-Can also be set by command line converted in python or json:
+See full documentation and complete features list on [osixia/baseimage documentation](https://opensource.osixia.net/projects/container-images/baseimage/).
- docker run -it --env FRUITS="#PYTHON2BASH:['orange','apple']" osixia/light-baseimage:1.3.3 printenv
- docker run -it --env FRUITS="#JSON2BASH:[\"orange\",\"apple\"]" osixia/light-baseimage:1.3.3 printenv
+## ♥ Contributing
-### Tests
+If you find this project useful here's how you can help:
-We use **Bats** (Bash Automated Testing System) to test this image:
+- Send a pull request with new features and bug fixes.
+- Help new users with [issues](https://github.com/osixia/container-baseimage/issues) they may encounter.
+- Support the development of this image and star [this repo][github] and the image [docker hub repository][docker hub].
-> [https://github.com/bats-core/bats-core](https://github.com/bats-core/bats-core)
+This project use [dagger](https://github.com/dagger/dagger) as CI/CD tool to build, test and deploy images. See source code and usefull command lines in [build directory](build/).
-Install Bats, and in this project directory run:
+## 🔓 License
- make test
+This project is licensed under the terms of the MIT license. See [LICENSE.md](LICENSE.md) file for more information.
-## Changelog
+## 💥 Changelog
Please refer to: [CHANGELOG.md](CHANGELOG.md)
diff --git a/SECURITY.md b/SECURITY.md
index f88d7037..13416a69 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -2,14 +2,14 @@
## Supported Versions
-| Version | Supported |
-| ------- | ------------------ |
-| >= 2.0.0 | :heavy_check_mark: |
-| < 2.0.0 | :x: |
+| Version | Supported |
+| -------- | ------------------ |
+| >= 2.0.0 | :heavy_check_mark: |
+| < 2.0.0 | :x: |
## Reporting a Vulnerability
-If you discover a security vulnerability within this docker image,
+If you discover a security vulnerability within this container image,
please send an email to security@osixia.net.
For minor vulnerabilities feel free to add an issue here on github.
diff --git a/alpine/alpine.go b/alpine/alpine.go
new file mode 100644
index 00000000..c32505c9
--- /dev/null
+++ b/alpine/alpine.go
@@ -0,0 +1,28 @@
+package alpine
+
+import (
+ "embed"
+
+ "github.com/osixia/container-baseimage/core"
+)
+
+// list all services so .priority files are included (. files are ignored in subdirs otherwise)
+
+//go:embed assets/* assets/services/cron/* assets/services/logrotate/* assets/services/syslog-ng/*
+var assets embed.FS
+
+var SupportedDistribution = &core.SupportedDistribution{
+ Name: "Alpine",
+ Vendors: []string{"alpine"},
+
+ Config: &core.DistributionConfig{
+ DebugPackages: []string{"curl", "less", "procps", "psmisc", "strace", "vim"},
+ Assets: []*embed.FS{&assets},
+
+ InstallScript: "install.sh",
+
+ BinPackagesIndexUpdate: "packages-index-update",
+ BinPackagesInstallClean: "packages-install-clean",
+ BinPackagesIndexClean: "packages-index-clean",
+ },
+}
diff --git a/alpine/assets/bin/install-debug-packages b/alpine/assets/bin/install-debug-packages
new file mode 100644
index 00000000..aaa42540
--- /dev/null
+++ b/alpine/assets/bin/install-debug-packages
@@ -0,0 +1,9 @@
+#!/bin/bash -e
+
+# Usage:
+# install-debug-packages [extra package 1] [extra package 2] ...
+
+container-logger level eq trace && set -x
+
+package-index-update
+packages-install-clean "$@" ${CONTAINER_DEBUG_PACKAGES}
diff --git a/alpine/assets/bin/packages-index-clean b/alpine/assets/bin/packages-index-clean
new file mode 100755
index 00000000..5e163c9d
--- /dev/null
+++ b/alpine/assets/bin/packages-index-clean
@@ -0,0 +1,10 @@
+#!/bin/bash -e
+container-logger level eq trace && set -x
+
+set -o pipefail
+
+# Usage:
+# packages-index-clean
+
+container-logger info "Clean packages index"
+rm -rf /var/cache/apk/* | container-logger debug
diff --git a/alpine/assets/bin/packages-index-update b/alpine/assets/bin/packages-index-update
new file mode 100755
index 00000000..87948b38
--- /dev/null
+++ b/alpine/assets/bin/packages-index-update
@@ -0,0 +1,10 @@
+#!/bin/bash -e
+container-logger level eq trace && set -x
+
+set -o pipefail
+
+# Usage:
+# packages-index-update
+
+container-logger info "Update packages index"
+apk update -q 2>&1 | container-logger info
diff --git a/alpine/assets/bin/packages-install b/alpine/assets/bin/packages-install
new file mode 100755
index 00000000..3bbe01c8
--- /dev/null
+++ b/alpine/assets/bin/packages-install
@@ -0,0 +1,12 @@
+#!/bin/bash -e
+container-logger level eq trace && set -x
+
+set -o pipefail
+
+# Usage:
+# packages-install [package 1] [package 2] ...
+
+ARGS="$*"
+
+container-logger info "Install packages: ${ARGS}"
+eval apk -q add "${ARGS}" 2>&1 | container-logger info
diff --git a/alpine/assets/bin/packages-install-clean b/alpine/assets/bin/packages-install-clean
new file mode 100755
index 00000000..1d8008b8
--- /dev/null
+++ b/alpine/assets/bin/packages-install-clean
@@ -0,0 +1,9 @@
+#!/bin/bash -e
+
+# Usage:
+# packages-install-clean [package]...
+
+container-logger level eq trace && set -x
+
+packages-install "$@"
+packages-index-clean
diff --git a/alpine/assets/bin/packages-remove b/alpine/assets/bin/packages-remove
new file mode 100755
index 00000000..48b187e2
--- /dev/null
+++ b/alpine/assets/bin/packages-remove
@@ -0,0 +1,10 @@
+#!/bin/bash -e
+container-logger level eq trace && set -x
+
+set -o pipefail
+
+# Usage:
+# packages-remove [package 1] [package 2] ...
+
+container-logger info "Remove packages: $*"
+apk del --purge "$@" 2>&1 | container-logger debug
diff --git a/alpine/assets/install.sh b/alpine/assets/install.sh
new file mode 100755
index 00000000..f4335f5e
--- /dev/null
+++ b/alpine/assets/install.sh
@@ -0,0 +1,10 @@
+#!/bin/sh -e
+
+# Install required packages.
+apk add --update bash bash-completion libeatmydata
+
+# Add container-baseimage bash completion.
+container-baseimage completion bash > /usr/share/bash-completion/completions/container-baseimage
+
+# Clean.
+rm -rf /tmp/* /var/tmp/* /var/cache/apk/*
diff --git a/alpine/assets/services/cron/.optional b/alpine/assets/services/cron/.optional
new file mode 100644
index 00000000..e69de29b
diff --git a/alpine/assets/services/cron/.priority b/alpine/assets/services/cron/.priority
new file mode 100644
index 00000000..a6905f8b
--- /dev/null
+++ b/alpine/assets/services/cron/.priority
@@ -0,0 +1 @@
+999
diff --git a/alpine/assets/services/cron/install.sh b/alpine/assets/services/cron/install.sh
new file mode 100755
index 00000000..435fdba7
--- /dev/null
+++ b/alpine/assets/services/cron/install.sh
@@ -0,0 +1,3 @@
+#!/bin/bash -e
+
+chmod 600 /etc/crontabs
diff --git a/alpine/assets/services/cron/process.sh b/alpine/assets/services/cron/process.sh
new file mode 100755
index 00000000..3f4d0733
--- /dev/null
+++ b/alpine/assets/services/cron/process.sh
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+container-logger level eq trace && set -x
+
+exec /usr/sbin/crond -f
diff --git a/alpine/assets/services/cron/startup.sh b/alpine/assets/services/cron/startup.sh
new file mode 100755
index 00000000..c440e697
--- /dev/null
+++ b/alpine/assets/services/cron/startup.sh
@@ -0,0 +1,13 @@
+#!/bin/bash -e
+container-logger level eq trace && set -x
+
+# prevent NUMBER OF HARD LINKS > 1 error
+# https://github.com/phusion/baseimage-docker/issues/198
+touch /etc/crontabs /etc/periodic/15min /etc/periodic/hourly /etc/periodic/daily /etc/periodic/weekly /etc/periodic/monthly
+
+find /etc/crontabs/ -exec touch {} \;
+find /etc/periodic/15min/ -exec touch {} \;
+find /etc/periodic/hourly/ -exec touch {} \;
+find /etc/periodic/daily/ -exec touch {} \;
+find /etc/periodic/weekly/ -exec touch {} \;
+find /etc/periodic/monthly/ -exec touch {} \;
diff --git a/alpine/assets/services/logrotate/.optional b/alpine/assets/services/logrotate/.optional
new file mode 100644
index 00000000..e69de29b
diff --git a/alpine/assets/services/logrotate/.priority b/alpine/assets/services/logrotate/.priority
new file mode 100644
index 00000000..a6905f8b
--- /dev/null
+++ b/alpine/assets/services/logrotate/.priority
@@ -0,0 +1 @@
+999
diff --git a/alpine/assets/services/logrotate/.tags/logs-stack b/alpine/assets/services/logrotate/.tags/logs-stack
new file mode 100644
index 00000000..e69de29b
diff --git a/image/service-available/:logrotate/assets/config/logrotate.conf b/alpine/assets/services/logrotate/config/logrotate.conf
similarity index 100%
rename from image/service-available/:logrotate/assets/config/logrotate.conf
rename to alpine/assets/services/logrotate/config/logrotate.conf
diff --git a/image/service-available/:logrotate/assets/config/logrotate_syslogng b/alpine/assets/services/logrotate/config/logrotate_syslogng
similarity index 99%
rename from image/service-available/:logrotate/assets/config/logrotate_syslogng
rename to alpine/assets/services/logrotate/config/logrotate_syslogng
index 93d6b027..2990c327 100644
--- a/image/service-available/:logrotate/assets/config/logrotate_syslogng
+++ b/alpine/assets/services/logrotate/config/logrotate_syslogng
@@ -36,4 +36,4 @@
kill -HUP `cat /var/run/syslog-ng.pid`
fi
endscript
-}
\ No newline at end of file
+}
diff --git a/alpine/assets/services/logrotate/download.sh b/alpine/assets/services/logrotate/download.sh
new file mode 100755
index 00000000..448fe816
--- /dev/null
+++ b/alpine/assets/services/logrotate/download.sh
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+
+# download logrotate
+packages-install logrotate
diff --git a/image/service-available/:logrotate/install.sh b/alpine/assets/services/logrotate/install.sh
similarity index 80%
rename from image/service-available/:logrotate/install.sh
rename to alpine/assets/services/logrotate/install.sh
index 5323d037..9cce3158 100755
--- a/image/service-available/:logrotate/install.sh
+++ b/alpine/assets/services/logrotate/install.sh
@@ -1,4 +1,4 @@
-#!/bin/sh -e
+#!/bin/bash -e
rm -f /etc/logrotate.conf
rm -f /etc/logrotate.d/syslog-ng
diff --git a/alpine/assets/services/logrotate/startup.sh b/alpine/assets/services/logrotate/startup.sh
new file mode 100755
index 00000000..6e2d1368
--- /dev/null
+++ b/alpine/assets/services/logrotate/startup.sh
@@ -0,0 +1,7 @@
+#!/bin/bash -e
+container-logger level eq trace && set -x
+
+ln -sf /container/services/logrotate/assets/config/logrotate.conf /etc/logrotate.conf
+ln -sf /container/services/logrotate/assets/config/logrotate_syslogng /etc/logrotate.d/syslog-ng
+
+chmod 444 -R /container/services/logrotate/assets/config/*
diff --git a/alpine/assets/services/syslog-ng/.optional b/alpine/assets/services/syslog-ng/.optional
new file mode 100644
index 00000000..e69de29b
diff --git a/alpine/assets/services/syslog-ng/.priority b/alpine/assets/services/syslog-ng/.priority
new file mode 100644
index 00000000..a6905f8b
--- /dev/null
+++ b/alpine/assets/services/syslog-ng/.priority
@@ -0,0 +1 @@
+999
diff --git a/alpine/assets/services/syslog-ng/.tags/logs-stack b/alpine/assets/services/syslog-ng/.tags/logs-stack
new file mode 100644
index 00000000..e69de29b
diff --git a/image/service-available/:syslog-ng-core/assets/config/syslog-ng.conf b/alpine/assets/services/syslog-ng/config/syslog-ng.conf
similarity index 100%
rename from image/service-available/:syslog-ng-core/assets/config/syslog-ng.conf
rename to alpine/assets/services/syslog-ng/config/syslog-ng.conf
diff --git a/image/service-available/:syslog-ng-core/assets/config/syslog_ng_default b/alpine/assets/services/syslog-ng/config/syslog_ng_default
similarity index 100%
rename from image/service-available/:syslog-ng-core/assets/config/syslog_ng_default
rename to alpine/assets/services/syslog-ng/config/syslog_ng_default
diff --git a/alpine/assets/services/syslog-ng/download.sh b/alpine/assets/services/syslog-ng/download.sh
new file mode 100755
index 00000000..de14e1c6
--- /dev/null
+++ b/alpine/assets/services/syslog-ng/download.sh
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+
+# download syslog-ng
+packages-install syslog-ng
diff --git a/image/service-available/:syslog-ng-core/install.sh b/alpine/assets/services/syslog-ng/install.sh
similarity index 72%
rename from image/service-available/:syslog-ng-core/install.sh
rename to alpine/assets/services/syslog-ng/install.sh
index cc8a4a22..4c639ddb 100755
--- a/image/service-available/:syslog-ng-core/install.sh
+++ b/alpine/assets/services/syslog-ng/install.sh
@@ -1,7 +1,7 @@
-#!/bin/sh -e
+#!/bin/bash -e
mkdir -p /var/lib/syslog-ng
-rm -f /etc/default/syslog-ng
+rm -f /etc/syslog-ng/syslog-ng
touch /var/log/syslog
chmod u=rw,g=r,o= /var/log/syslog
diff --git a/alpine/assets/services/syslog-ng/process.sh b/alpine/assets/services/syslog-ng/process.sh
new file mode 100755
index 00000000..b9f8a8d3
--- /dev/null
+++ b/alpine/assets/services/syslog-ng/process.sh
@@ -0,0 +1,9 @@
+#!/bin/bash -e
+container-logger level eq trace && set -x
+
+PIDFILE="/run/container/syslog-ng.pid"
+SYSLOGNG_OPTS=""
+
+[ -r /etc/syslog-ng/syslog-ng ] && . /etc/syslog-ng/syslog-ng
+
+exec /usr/sbin/syslog-ng --pidfile "$PIDFILE" -F $SYSLOGNG_OPTS
diff --git a/image/service-available/:syslog-ng-core/startup.sh b/alpine/assets/services/syslog-ng/startup.sh
similarity index 58%
rename from image/service-available/:syslog-ng-core/startup.sh
rename to alpine/assets/services/syslog-ng/startup.sh
index e0e1c5a5..0e0c411c 100755
--- a/image/service-available/:syslog-ng-core/startup.sh
+++ b/alpine/assets/services/syslog-ng/startup.sh
@@ -1,8 +1,8 @@
-#!/bin/sh -e
-log-helper level eq trace && set -x
+#!/bin/bash -e
+container-logger level eq trace && set -x
-ln -sf "${CONTAINER_SERVICE_DIR}/:syslog-ng-core/assets/config/syslog_ng_default" /etc/default/syslog-ng
-ln -sf "${CONTAINER_SERVICE_DIR}/:syslog-ng-core/assets/config/syslog-ng.conf" /etc/syslog-ng/syslog-ng.conf
+ln -sf /container/services/syslog-ng/assets/config/syslog_ng_default /etc/syslog-ng/syslog-ng
+ln -sf /container/services/syslog-ng/assets/config/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
# If /dev/log is either a named pipe or it was placed there accidentally,
# e.g. because of the issue documented at https://github.com/phusion/baseimage-docker/pull/25,
@@ -12,11 +12,14 @@ if [ ! -S /var/lib/syslog-ng/syslog-ng.ctl ]; then rm -f /var/lib/syslog-ng/sysl
# determine output mode on /dev/stdout because of the issue documented at https://github.com/phusion/baseimage-docker/issues/468
if [ -p /dev/stdout ]; then
- sed -i 's/##SYSLOG_OUTPUT_MODE_DEV_STDOUT##/pipe/' /etc/syslog-ng/syslog-ng.conf
+ SYSLOG_OUTPUT_MODE_DEV_STDOUT=pipe
else
- sed -i 's/##SYSLOG_OUTPUT_MODE_DEV_STDOUT##/file/' /etc/syslog-ng/syslog-ng.conf
+ SYSLOG_OUTPUT_MODE_DEV_STDOUT=file
fi
+export SYSLOG_OUTPUT_MODE_DEV_STDOUT
+envsubst-templates /container/services/syslog-ng/config /etc/syslog-ng
+
# If /var/log is writable by another user logrotate will fail
/bin/chown root:root /var/log
/bin/chmod 0755 /var/log
diff --git a/build/README.md b/build/README.md
new file mode 100644
index 00000000..ab7dc14f
--- /dev/null
+++ b/build/README.md
@@ -0,0 +1,19 @@
+# Container-baseimage CI/CD tool
+
+This project use [dagger](https://github.com/dagger/dagger) as CI/CD tool to build, test and deploy images.
+
+Please refer to the [dagger documentation](https://docs.dagger.io/) to install dagger.
+
+# Example command lines
+## Get help
+```
+go run main.go --help
+go run main.go build --help
+go run main.go test --help
+```
+
+## Build and run tests
+```
+go mod vendor
+dagger run go run main.go test ../Dockerfile develop --with-nonroot
+```
diff --git a/build/cmd/build.go b/build/cmd/build.go
new file mode 100644
index 00000000..ccee3b1f
--- /dev/null
+++ b/build/cmd/build.go
@@ -0,0 +1,134 @@
+package cmd
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/pflag"
+
+ "github.com/osixia/container-baseimage/build/config"
+ "github.com/osixia/container-baseimage/build/job"
+)
+
+type buildFlags struct {
+ dockerfile string
+
+ version string
+ latest bool
+
+ contributors string
+
+ distributions []string
+ arches []string
+
+ withNonroot bool
+}
+
+var buildCmdFlags = &buildFlags{}
+
+var buildCmd = newStepCmd("build", "Run build job", "b", job.Build, buildCmdFlags)
+
+func init() {
+ // flags
+ buildCmd.Flags().SortFlags = false
+ addBuildFlags(buildCmd.Flags(), buildCmdFlags)
+}
+
+func addBuildFlags(fs *pflag.FlagSet, cf *buildFlags) {
+ distributions := distributions()
+ arches := arches()
+
+ fs.StringSliceVarP(&cf.distributions, "distributions", "i", distributions, fmt.Sprintf("distributions to build, choices: %v", strings.Join(distributions, ", ")))
+ fs.StringSliceVarP(&cf.arches, "arches", "a", arches, fmt.Sprintf("arches to build, choices: %v", strings.Join(arches, ", ")))
+ fs.BoolVarP(&cf.withNonroot, "with-nonroot", "n", false, "build also nonroot containers")
+ fs.BoolVarP(&cf.latest, "latest", "l", false, "tag as latest image")
+ fs.StringVarP(&cf.contributors, "contributors", "c", "", "generated image contributors\n")
+}
+
+func distributions() []string {
+
+ distributions := map[string]bool{}
+ for _, img := range config.Images {
+ distributions[img.Distribution] = true
+ }
+
+ distributionsStrings := make([]string, 0, len(distributions))
+ for k := range distributions {
+ distributionsStrings = append(distributionsStrings, k)
+ }
+
+ return distributionsStrings
+}
+
+func arches() []string {
+
+ arches := map[string]bool{}
+ for _, p := range config.Platforms {
+ arches[p.GoArch] = true
+ }
+
+ archesStrings := make([]string, 0, len(arches))
+ for k := range arches {
+ archesStrings = append(archesStrings, k)
+ }
+
+ return archesStrings
+}
+
+func filterImages(distributions []string) []*config.Image {
+
+ images := make([]*config.Image, 0)
+
+ for _, d := range distributions {
+ for _, ci := range config.Images {
+ if ci.Distribution == d {
+ images = append(images, ci)
+ }
+ }
+ }
+
+ return images
+}
+
+func filterPlatforms(pfs []string) []*config.Platform {
+
+ platforms := make([]*config.Platform, 0, len(pfs))
+
+ for _, pf := range pfs {
+ for _, cpf := range config.Platforms {
+ if cpf.GoArch == pf {
+ platforms = append(platforms, cpf)
+ }
+ }
+ }
+
+ return platforms
+}
+
+func (bf *buildFlags) SetDockerfile(d string) {
+ bf.dockerfile = d
+}
+
+func (bf *buildFlags) SetVersion(v string) {
+ bf.version = v
+}
+
+func (bf *buildFlags) toJobOptions() interface{} {
+
+ return &job.BuildOptions{
+ BuildImageOptions: job.BuildImageOptions{
+ Version: bf.version,
+ Latest: bf.latest,
+
+ Contributors: bf.contributors,
+
+ Platforms: filterPlatforms(bf.arches),
+ },
+
+ Dockerfile: bf.dockerfile,
+
+ Images: filterImages(bf.distributions),
+
+ WithNonroot: bf.withNonroot,
+ }
+}
diff --git a/build/cmd/cmd.go b/build/cmd/cmd.go
new file mode 100644
index 00000000..3f419944
--- /dev/null
+++ b/build/cmd/cmd.go
@@ -0,0 +1,91 @@
+package cmd
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/build/config"
+ "github.com/osixia/container-baseimage/build/job"
+)
+
+type jobFlags interface {
+ SetDockerfile(d string)
+ SetVersion(v string)
+
+ toJobOptions() interface{}
+}
+
+const (
+ pipelineGroupID = "pipeline"
+ jobsGroupID = "jobs"
+)
+
+var cmd = &cobra.Command{
+ Use: "ci",
+}
+
+func init() {
+ cobra.EnableCommandSorting = false
+
+ // subcommands groups
+ cmd.AddGroup(&cobra.Group{
+ ID: pipelineGroupID,
+ Title: "Pipeline:",
+ })
+
+ cmd.AddGroup(&cobra.Group{
+ ID: jobsGroupID,
+ Title: "Jobs:",
+ })
+
+ // subcommands
+ cmd.AddCommand(githubCmd)
+
+ cmd.AddCommand(buildCmd)
+ cmd.AddCommand(testCmd)
+ cmd.AddCommand(deployCmd)
+}
+
+func Run(ctx context.Context) error {
+ return cmd.ExecuteContext(ctx)
+}
+
+func newStepCmd(use string, short string, alias string, j job.Job, cf jobFlags) *cobra.Command {
+
+ return &cobra.Command{
+
+ Use: fmt.Sprintf("%v dockerfile [version]", use),
+ Short: short,
+
+ GroupID: jobsGroupID,
+
+ Aliases: []string{
+ alias,
+ },
+
+ Args: cobra.RangeArgs(1, 2),
+
+ Run: func(cmd *cobra.Command, args []string) {
+
+ cf.SetDockerfile(args[0])
+
+ cf.SetVersion(config.DefaultVersion)
+
+ if len(args) > 1 {
+ cf.SetVersion(args[1])
+ }
+
+ if _, err := job.Run(cmd.Context(), j, cf.toJobOptions()); err != nil {
+ fatal(err)
+ }
+ },
+ }
+}
+
+func fatal(err error) {
+ fmt.Printf("error: %v\n", err.Error())
+ os.Exit(1)
+}
diff --git a/build/cmd/deploy.go b/build/cmd/deploy.go
new file mode 100644
index 00000000..a3bc5e5f
--- /dev/null
+++ b/build/cmd/deploy.go
@@ -0,0 +1,32 @@
+package cmd
+
+import (
+ "github.com/osixia/container-baseimage/build/job"
+)
+
+type deployFlags struct {
+ testFlags
+
+ dryRun bool
+}
+
+var deployCmdFlags = &deployFlags{}
+
+var deployCmd = newStepCmd("deploy", "Run build, test and deploy jobs", "d", job.Deploy, deployCmdFlags)
+
+func init() {
+ // flags
+ deployCmd.Flags().SortFlags = false
+
+ deployCmd.Flags().BoolVarP(&deployCmdFlags.dryRun, "dry-run", "d", false, "do not deploy images to registry\n")
+ addBuildFlags(deployCmd.Flags(), &deployCmdFlags.buildFlags)
+}
+
+func (df *deployFlags) toJobOptions() interface{} {
+
+ return &job.DeployOptions{
+ TestOptions: *df.testFlags.toJobOptions().(*job.TestOptions),
+
+ DryRun: df.dryRun,
+ }
+}
diff --git a/build/cmd/github.go b/build/cmd/github.go
new file mode 100644
index 00000000..27b7f91f
--- /dev/null
+++ b/build/cmd/github.go
@@ -0,0 +1,279 @@
+package cmd
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/google/go-github/v41/github"
+ "github.com/hashicorp/go-version"
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/build/config"
+ "github.com/osixia/container-baseimage/build/job"
+)
+
+type githubFlags struct {
+ dryRun bool
+}
+
+type githubData struct {
+ contributors []string
+ tags []string
+}
+
+var githubCmdFlags = &githubFlags{}
+
+var githubCmd = &cobra.Command{
+ Use: "github dockerfile github_ref",
+ Short: "Run jobs based on GitHub Actions CI/CD environment",
+
+ GroupID: pipelineGroupID,
+
+ Args: cobra.ExactArgs(2),
+
+ Run: func(cmd *cobra.Command, args []string) {
+
+ dockerfile := args[0]
+ githubRef := args[1]
+
+ fmt.Printf("Dockerfile: %v, githubRef: %v\n", dockerfile, githubRef)
+
+ testFlags := testFlags{
+ buildFlags: buildFlags{
+ dockerfile: dockerfile,
+
+ distributions: distributions(),
+ arches: arches(),
+
+ withNonroot: true,
+ },
+ }
+
+ jb := job.Test
+ var jFlags jobFlags = &testFlags
+
+ // GITHUB_REF values:
+ // refs/heads/, refs/pull//merge, refs/tags/
+
+ // Build and test: branches main, develop, feature/*, bugfix/*, release/*, hotfix/*, support/*
+ // Build, test and deploy: tags
+
+ ref := regexp.MustCompile(`^refs/(heads|tags|pull)/(.*)$`).FindStringSubmatch(githubRef)
+ if ref == nil || (len(ref) != 3) {
+ fatal(fmt.Errorf("unable to get github reference type and name %v", ref))
+ }
+
+ refType := ref[1]
+ refName := ref[2]
+
+ testFlags.version = refName
+
+ if refType == "pull" {
+
+ testFlags.version = strings.TrimSuffix(refName, "/merge")
+
+ } else if refType == "tags" { // deploy
+
+ if !regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+-?[^.]*$`).MatchString(refName) {
+ fatal(fmt.Errorf("%v tag must be formated like x.y.z or x.y.z-a with x, y and z numbers and a any char except '.'", refName))
+ }
+
+ gi, err := githubDataRequest(config.BaseimageGithubRepo)
+ if err != nil {
+ fatal(err)
+ }
+
+ testFlags.latest, err = isLatestTag(testFlags.version, gi.tags)
+ if err != nil {
+ fatal(err)
+ }
+
+ deployFlags := deployFlags{
+ testFlags: testFlags,
+
+ dryRun: githubCmdFlags.dryRun,
+ }
+ deployFlags.contributors = strings.Join(gi.contributors, ", ")
+
+ jb = job.Deploy
+ jFlags = &deployFlags
+
+ }
+
+ if _, err := job.Run(cmd.Context(), jb, jFlags.toJobOptions()); err != nil {
+ fatal(err)
+ }
+
+ //TODO upload archives to tag release
+ if !githubCmdFlags.dryRun && jb == job.Deploy {
+ panic("unimplemented")
+ }
+
+ },
+}
+
+func init() {
+ // flags
+ githubCmd.Flags().SortFlags = false
+ githubCmd.Flags().BoolVarP(&githubCmdFlags.dryRun, "dry-run", "d", false, "do not deploy images to registry\n")
+}
+
+func isLatestTag(tag string, existingTags []string) (bool, error) {
+
+ // test latest only on x.y.z tags (not x.y.z-a)
+ if !regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`).MatchString(tag) {
+ return false, nil
+ }
+
+ tVersion, err := version.NewVersion(tag)
+ if err != nil {
+ return false, err
+ }
+
+ // remove prefixes from github tags
+ etVersions := make([]*version.Version, 0, len(existingTags))
+ for _, gtag := range existingTags {
+ m := regexp.MustCompile(`^.*([0-9]+\.[0-9]+\.[0-9]+)-?.*$`).FindStringSubmatch(gtag)
+ if m == nil || len(m) < 2 {
+ return false, fmt.Errorf("error: parsing tag: %v", gtag)
+ }
+
+ v, err := version.NewVersion(m[1])
+ if err != nil {
+ return false, err
+ }
+ etVersions = append(etVersions, v)
+ }
+
+ // sort github tags
+ sort.Sort(version.Collection(etVersions))
+
+ // compare new tag version and greated github tag version
+ return tVersion.GreaterThanOrEqual(etVersions[len(etVersions)-1]), nil
+}
+
+func githubDataRequest(repo *config.GithubRepo) (*githubData, error) {
+
+ client := github.NewClient(nil)
+
+ // get all contributors
+ copt := &github.ListContributorsOptions{}
+
+ var contributors []string
+ for {
+ cbs, r, err := client.Repositories.ListContributors(cmd.Context(), repo.Organization, repo.Project, copt)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, c := range cbs {
+ contributors = append(contributors, *c.Login)
+ }
+
+ if r.NextPage == 0 {
+ break
+ }
+
+ copt.Page = r.NextPage
+ }
+
+ // get all tags
+ topt := &github.ListOptions{}
+
+ var tags []string
+ for {
+
+ ts, r, err := client.Repositories.ListTags(context.Background(), repo.Organization, repo.Project, topt)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, t := range ts {
+ tags = append(tags, *t.Name)
+ }
+
+ if r.NextPage == 0 {
+ break
+ }
+
+ topt.Page = r.NextPage
+ }
+
+ // sort results
+ sort.Strings(tags)
+
+ return &githubData{
+ contributors: contributors,
+ tags: tags,
+ }, nil
+}
+
+/* func githubReleaseRequest(ctx context.Context, repo *config.GithubRepo, df *deployFlags, brs []*job.BuildResult) error {
+
+ token := os.Getenv("GITHUB_TOKEN")
+ if token == "" {
+ return errors.New("Environment variable GITHUB_TOKEN must be set")
+ }
+
+ // setting up the GitHub client with authentication
+ ts := oauth2.StaticTokenSource(
+ &oauth2.Token{AccessToken: token},
+ )
+ tc := oauth2.NewClient(ctx, ts)
+ client := github.NewClient(tc)
+
+ release := &github.RepositoryRelease{
+ TagName: github.String(df.version),
+ Name: github.String(df.version),
+ Body: github.String(releaseBody),
+ //Prerelease
+ }
+
+ var previousRelease *github.RepositoryRelease
+
+ // check if the release already exists
+ releases, _, err := client.Repositories.ListReleases(ctx, repo.Organization, repo.Project, nil)
+ if err != nil {
+ return err
+ }
+ for _, r := range releases {
+ if r.GetTagName() == df.version {
+ previousRelease = r
+ }
+ }
+
+ if previousRelease != nil {
+ release, _, err := client.Repositories.EditRelease(ctx, repo.Organization, repo.Project, previousRelease.GetID(), release)
+ if err != nil {
+ return err
+ }
+ } else {
+ release, _, err := client.Repositories.CreateRelease(ctx, repo.Organization, repo.Project, release)
+ if err != nil {
+ return err
+ }
+ }
+
+ for _, br := range brs {
+ for _, f := range br.Files {
+ // Ajout d'un fichier à la release
+ filePath := "path/to/your/file.zip"
+ file, err := os.Open(filePath)
+ if err != nil {
+ log.Fatalf("Error opening file: %v", err)
+ }
+ defer file.Close()
+
+ opts := &github.UploadOptions{Name: "file.zip"}
+ asset, _, err := client.Repositories.UploadReleaseAsset(ctx, owner, repo, release.GetID(), opts, file)
+ if err != nil {
+ log.Fatalf("Error uploading release asset: %v", err)
+ }
+
+ log.Printf("Asset uploaded: %v", asset.GetBrowserDownloadURL())
+ }
+ }
+} */
diff --git a/build/cmd/test.go b/build/cmd/test.go
new file mode 100644
index 00000000..4d74268f
--- /dev/null
+++ b/build/cmd/test.go
@@ -0,0 +1,26 @@
+package cmd
+
+import (
+ "github.com/osixia/container-baseimage/build/job"
+)
+
+type testFlags struct {
+ buildFlags
+}
+
+var testCmdFlags = &testFlags{}
+
+var testCmd = newStepCmd("test", "Run build and test jobs", "t", job.Test, testCmdFlags)
+
+func init() {
+ // flags
+ testCmd.Flags().SortFlags = false
+ addBuildFlags(testCmd.Flags(), &testCmdFlags.buildFlags)
+}
+
+func (tf *testFlags) toJobOptions() interface{} {
+
+ return &job.TestOptions{
+ BuildOptions: *tf.buildFlags.toJobOptions().(*job.BuildOptions),
+ }
+}
diff --git a/build/config/config.go b/build/config/config.go
new file mode 100644
index 00000000..0ac41459
--- /dev/null
+++ b/build/config/config.go
@@ -0,0 +1,95 @@
+package config
+
+import "dagger.io/dagger"
+
+type Image struct {
+ RootImage string
+ Distribution string
+
+ BuildImageName string
+ TagPrefixes []string
+}
+
+type Platform struct {
+ Name dagger.Platform
+ GoArch string
+}
+
+type GithubRepo struct {
+ Organization string
+ Project string
+}
+
+type Nonroot struct {
+ UserName string
+ UserID int
+ GroupName string
+ GroupID int
+}
+
+// images
+
+var DefaultVersion = "develop"
+var DefaultImage = DebianBookwormImage
+
+var DebianBookwormImage = &Image{
+ RootImage: "debian:bookworm-slim",
+ Distribution: "debian",
+
+ BuildImageName: "osixia/baseimage",
+ TagPrefixes: []string{"debian-bookworm", "debian"},
+}
+
+var Ubuntu2404Image = &Image{
+ RootImage: "ubuntu:24.04",
+ Distribution: "ubuntu",
+
+ BuildImageName: "osixia/baseimage",
+ TagPrefixes: []string{"ubuntu-24.04", "ubuntu"},
+}
+
+var Alpine321Image = &Image{
+ RootImage: "alpine:3.21.2",
+ Distribution: "alpine",
+
+ BuildImageName: "osixia/baseimage",
+ TagPrefixes: []string{"alpine-3.21", "alpine-3", "alpine"},
+}
+
+var Images = []*Image{
+ DebianBookwormImage,
+ Ubuntu2404Image,
+ Alpine321Image,
+}
+
+// platforms
+
+var Amd64Platform = &Platform{
+ Name: "linux/amd64",
+ GoArch: "amd64",
+}
+
+var Arm64Platform = &Platform{
+ Name: "linux/arm64",
+ GoArch: "arm64",
+}
+
+var Platforms = []*Platform{
+ Amd64Platform,
+ Arm64Platform,
+}
+
+// nonroot
+
+var NonrootUser = "nonroot"
+var NonrootUID = 65532
+
+var NonrootGroup = "nonroot"
+var NonrootGID = 65532
+
+// github repo
+
+var BaseimageGithubRepo = &GithubRepo{
+ Organization: "osixia",
+ Project: "container-baseimage",
+}
diff --git a/build/job/build.go b/build/job/build.go
new file mode 100644
index 00000000..00b34951
--- /dev/null
+++ b/build/job/build.go
@@ -0,0 +1,310 @@
+package job
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "slices"
+ "strconv"
+ "strings"
+ "time"
+
+ "dagger.io/dagger"
+
+ "github.com/osixia/container-baseimage/build/config"
+)
+
+type BuildOptions struct {
+ BuildImageOptions
+
+ Dockerfile string
+
+ Images []*config.Image
+
+ WithNonroot bool
+}
+
+type BuildImageOptions struct {
+ Version string
+ Latest bool
+
+ Contributors string
+
+ Platforms []*config.Platform
+}
+
+type BuildResult struct {
+ Image *config.Image
+ Containers []*dagger.Container
+ Tags []string
+
+ Files []string
+}
+
+func build(ctx context.Context, client *dagger.Client, options *BuildOptions) ([]*BuildResult, error) {
+
+ wd, err := os.Getwd()
+ if err != nil {
+ return nil, err
+ }
+
+ dp, err := filepath.Abs(filepath.Join(wd, options.Dockerfile))
+ if err != nil {
+ return nil, err
+ }
+
+ // check dockerfile exists
+ if _, err := os.Stat(dp); err != nil {
+ return nil, err
+ }
+
+ dockerfileDir := filepath.Dir(dp)
+ dockerfileName := filepath.Base(dp)
+
+ hostDirectoryOpts := dagger.HostDirectoryOpts{
+ Exclude: []string{".git/", ".github/", "bin/", "build/", "docs/", "*.md", ".dockerignore", ".gitignore", "Dockerfile"},
+ }
+
+ contextDir := client.Host().Directory(dockerfileDir, hostDirectoryOpts)
+ dockerfile := client.Host().File(dp)
+ workspace := contextDir.WithFile(dockerfileName, dockerfile)
+
+ entries, err := workspace.Entries(ctx)
+ if err != nil {
+ return nil, err
+ }
+ fmt.Printf("working dir content: %v\n", entries)
+
+ var builds []*BuildResult
+ for _, image := range options.Images {
+
+ b, err := buildImage(ctx, client, workspace, dockerfileName, &options.BuildImageOptions, image, false)
+ if err != nil {
+ return nil, err
+ }
+
+ builds = append(builds, b)
+
+ if options.WithNonroot {
+ b, err := buildImage(ctx, client, workspace, dockerfileName, &options.BuildImageOptions, image, true)
+ if err != nil {
+ return nil, err
+ }
+
+ builds = append(builds, b)
+ }
+ }
+
+ // container to compress bin
+ tar := client.Container().From("ubuntu:latest")
+
+ // create empty directory to put build outputs
+ outputs := client.Directory()
+
+ // export bin from default image in outputs directory
+ for _, b := range builds {
+
+ // export bin only from default image
+ if b.Image != config.DefaultImage {
+ continue
+ }
+
+ for _, c := range b.Containers {
+
+ p, err := c.Platform(ctx)
+ if err != nil {
+ return nil, err
+ }
+ ps := strings.Replace(string(p), "/", "_", -1)
+
+ // create dagger directory with compressed bin
+ binName := fmt.Sprintf("container-baseimage_%v", ps)
+ tarName := fmt.Sprintf("%v.tar.gz", binName)
+
+ tar, _ = tar.WithFile(binName, c.File("/usr/sbin/container-baseimage")).Sync(ctx)
+ tar = tar.WithExec([]string{"tar", "-czf", tarName, binName})
+
+ outputs = outputs.WithFile(tarName, tar.File(tarName))
+
+ b.Files = append(b.Files, tarName)
+ }
+ }
+
+ // export outputs directory on filesystem
+ _, err = outputs.Export(ctx, filepath.Join(dockerfileDir, "/bin"))
+ if err != nil {
+ return nil, err
+ }
+
+ return builds, nil
+}
+
+func buildImage(ctx context.Context, client *dagger.Client, workspace *dagger.Directory, dockerfile string, options *BuildImageOptions, image *config.Image, nonroot bool) (*BuildResult, error) {
+
+ b := &BuildResult{
+ Image: image,
+ Containers: make([]*dagger.Container, 0, len(options.Platforms)),
+
+ Tags: buildTags(image, options.Version, options.Latest, nonroot),
+ }
+
+ for _, platform := range options.Platforms {
+
+ cbo := dagger.ContainerBuildOpts{
+ BuildArgs: buildArgs(image, options.Version, options.Contributors, platform, b.LongestTag()),
+ Dockerfile: dockerfile,
+ }
+
+ c, err := client.Container(dagger.ContainerOpts{Platform: platform.Name}).Build(workspace, cbo).Sync(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if nonroot {
+ if image.Distribution == "alpine" {
+ c = c.WithExec([]string{"--", "adduser", "-D", config.NonrootUser, "-u", strconv.Itoa(config.NonrootUID), "-g", config.NonrootGroup, "-s", "/sbin/nologin"})
+ } else if slices.Contains([]string{"debian", "ubuntu"}, image.Distribution) {
+ c = c.WithExec([]string{"--", "groupadd", config.NonrootGroup, "-g", strconv.Itoa(config.NonrootGID)}).
+ WithExec([]string{"--", "useradd", "-m", config.NonrootUser, "-u", strconv.Itoa(config.NonrootUID), "-g", strconv.Itoa(config.NonrootGID), "-s", "/sbin/nologin"})
+ } else {
+ panic(fmt.Sprintf("error: root image %v not supported", image.Distribution))
+ }
+
+ c.WithUser(config.NonrootUser)
+ }
+
+ // add oci labels
+ c = c.WithLabel("org.opencontainers.image.title", image.BuildImageName).
+ WithLabel("org.opencontainers.image.version", b.LongestTag()).
+ WithLabel("org.opencontainers.image.created", time.Now().String()).
+ WithLabel("org.opencontainers.image.source", fmt.Sprintf("https://github.com/%v/%v", config.BaseimageGithubRepo.Organization, config.BaseimageGithubRepo.Project)).
+ WithLabel("org.opencontainers.image.licenses", "MIT")
+
+ c, err = c.Sync(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ b.Containers = append(b.Containers, c)
+ }
+
+ return b, nil
+}
+
+func buildArgs(image *config.Image, version string, contributors string, platform *config.Platform, tag string) []dagger.BuildArg {
+
+ var buildArgs []dagger.BuildArg
+
+ // common build args
+ if version != "" {
+ buildVersionArg := dagger.BuildArg{
+ Name: "BUILD_VERSION",
+ Value: version,
+ }
+ buildArgs = append(buildArgs, buildVersionArg)
+ }
+
+ if contributors != "" {
+ buildContributorsArg := dagger.BuildArg{
+ Name: "BUILD_CONTRIBUTORS",
+ Value: contributors,
+ }
+ buildArgs = append(buildArgs, buildContributorsArg)
+ }
+
+ // image build args
+ buildRootImageArg := dagger.BuildArg{
+ Name: "ROOT_IMAGE",
+ Value: image.RootImage,
+ }
+
+ buildImageNameArg := dagger.BuildArg{
+ Name: "BUILD_IMAGE_NAME",
+ Value: image.BuildImageName,
+ }
+
+ buildImageTagArg := dagger.BuildArg{
+ Name: "BUILD_IMAGE_TAG",
+ Value: tag,
+ }
+
+ // platform build args
+ goarchArg := dagger.BuildArg{
+ Name: "GOARCH",
+ Value: platform.GoArch,
+ }
+
+ return append(buildArgs, buildRootImageArg, buildImageNameArg, buildImageTagArg, goarchArg)
+}
+
+func buildTags(image *config.Image, version string, latest bool, nonroot bool) []string {
+ var tags []string
+
+ v := strings.Split(version, ".")
+
+ // default image tags
+ if image == config.DefaultImage {
+
+ // version x.y.z
+ tags = append(tags, version)
+
+ // latest release
+ if latest {
+
+ // latest
+ tags = append(tags, "latest")
+
+ // version x.y
+ tags = append(tags, fmt.Sprintf("%v.%v", v[0], v[1]))
+
+ // version x
+ tags = append(tags, v[0])
+ }
+ }
+
+ // regular image tags
+ for _, tagPrefix := range image.TagPrefixes {
+
+ // prefix + version x.y.z
+ tags = append(tags, fmt.Sprintf("%v-%v", tagPrefix, version))
+
+ // latest release
+ if latest {
+
+ // prefix + version x.y
+ tags = append(tags, fmt.Sprintf("%v-%v.%v", tagPrefix, v[0], v[1]))
+
+ // prefix + version x
+ tags = append(tags, fmt.Sprintf("%v-%v", tagPrefix, v[0]))
+
+ // prefix only
+ tags = append(tags, fmt.Sprintf("%v", tagPrefix))
+ }
+ }
+
+ // add nonroot suffix
+ if nonroot {
+ for i := range tags {
+ tags[i] += "-nonroot"
+ }
+ }
+
+ return tags
+}
+
+func (br *BuildResult) LongestTag() string {
+
+ if len(br.Tags) == 0 {
+ return ""
+ }
+
+ longest := br.Tags[0]
+ for _, str := range br.Tags {
+ if len(str) > len(longest) {
+ longest = str
+ }
+ }
+
+ return longest
+}
diff --git a/build/job/deploy.go b/build/job/deploy.go
new file mode 100644
index 00000000..37112b8d
--- /dev/null
+++ b/build/job/deploy.go
@@ -0,0 +1,65 @@
+package job
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+
+ "dagger.io/dagger"
+)
+
+type DeployOptions struct {
+ TestOptions
+
+ DryRun bool
+}
+
+func (do *DeployOptions) Validate() error {
+
+ if do.Latest && !regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`).MatchString(do.BuildOptions.Version) {
+ return fmt.Errorf("error: with latest set, version must be a tag formated like x.y.z with x, y and z numbers")
+ }
+
+ return nil
+}
+
+func deploy(ctx context.Context, client *dagger.Client, options *DeployOptions) ([]*BuildResult, error) {
+
+ if err := options.Validate(); err != nil {
+ return nil, err
+ }
+
+ builds, err := test(ctx, client, &options.TestOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, b := range builds {
+
+ publishOptions := dagger.ContainerPublishOpts{
+ PlatformVariants: b.Containers,
+ // Some registries may require explicit use of docker mediatypes
+ // rather than the default OCI mediatypes
+ // MediaTypes: dagger.Dockermediatypes,
+ }
+
+ for _, tag := range b.Tags {
+
+ img := fmt.Sprintf("%v:%v", b.Image.BuildImageName, tag)
+
+ fmt.Printf("pushing image %v ...\n", img)
+
+ if options.DryRun {
+ continue
+ }
+
+ digest, err := client.Container().Publish(ctx, img, publishOptions)
+ if err != nil {
+ return nil, err
+ }
+ fmt.Printf("image %v pushed with digest %v\n", img, digest)
+ }
+ }
+
+ return builds, nil
+}
diff --git a/build/job/job.go b/build/job/job.go
new file mode 100644
index 00000000..576c1708
--- /dev/null
+++ b/build/job/job.go
@@ -0,0 +1,38 @@
+package job
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "dagger.io/dagger"
+)
+
+type Job int
+
+const (
+ Build Job = iota
+ Test Job = iota
+ Deploy Job = iota
+)
+
+func Run(ctx context.Context, s Job, options interface{}) ([]*BuildResult, error) {
+
+ // dagger client
+ client, err := dagger.Connect(ctx, dagger.WithLogOutput(os.Stderr))
+ if err != nil {
+ return nil, err
+ }
+ defer client.Close()
+
+ switch s {
+ case Build:
+ return build(ctx, client, options.(*BuildOptions))
+ case Test:
+ return test(ctx, client, options.(*TestOptions))
+ case Deploy:
+ return deploy(ctx, client, options.(*DeployOptions))
+ }
+
+ return nil, fmt.Errorf("%v: job unknown", s)
+}
diff --git a/build/job/test.go b/build/job/test.go
new file mode 100644
index 00000000..2b0801c7
--- /dev/null
+++ b/build/job/test.go
@@ -0,0 +1,55 @@
+package job
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "dagger.io/dagger"
+)
+
+// version
+// no process (check env files)
+// single process (check env files)
+// multiprocess (check env files)
+// run only one service
+// no startup / process / finish
+// skip env files
+// pre startup / process / finish file
+// restart processes
+
+// kill all ?
+
+// debug
+// install packages
+
+type TestOptions struct {
+ BuildOptions
+}
+
+func test(ctx context.Context, client *dagger.Client, options *TestOptions) ([]*BuildResult, error) {
+
+ builds, err := build(ctx, client, &options.BuildOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, b := range builds {
+
+ expectedImageVersion := fmt.Sprintf("%v:%v", b.Image.BuildImageName, b.LongestTag())
+
+ for _, i := range b.Containers {
+
+ imgVersion, err := i.WithExec([]string{"--version"}).Stdout(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if strings.TrimSuffix(imgVersion, "\n") != expectedImageVersion {
+ return nil, fmt.Errorf("error: image version is %v expected image version is %v", imgVersion, expectedImageVersion)
+ }
+ }
+ }
+
+ return builds, nil
+}
diff --git a/build/main.go b/build/main.go
new file mode 100644
index 00000000..9f9a9bf0
--- /dev/null
+++ b/build/main.go
@@ -0,0 +1,18 @@
+package main
+
+import (
+ "context"
+ "os"
+
+ "github.com/osixia/container-baseimage/build/cmd"
+)
+
+func main() {
+
+ // execute cmd
+ mainCtx := context.Background()
+ if err := cmd.Run(mainCtx); err != nil {
+ os.Exit(1)
+ }
+
+}
diff --git a/cmd/cmd.go b/cmd/cmd.go
new file mode 100644
index 00000000..afede350
--- /dev/null
+++ b/cmd/cmd.go
@@ -0,0 +1,79 @@
+package cmd
+
+import (
+ "context"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/entrypoint"
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/cmd/processes"
+ "github.com/osixia/container-baseimage/cmd/services"
+)
+
+const (
+ installGroupID = "install"
+ entrypointGroupID = "entrypoint"
+ containerGroupID = "container"
+ envsubstGroupID = "envsubst"
+ loggerGroupID = "logger"
+)
+
+var cmd = &cobra.Command{
+ Use: "container-baseimage",
+
+ PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
+ return logger.HandleFlags(cmd)
+ },
+}
+
+func init() {
+ cobra.EnableCommandSorting = false
+
+ // subcommands groups
+ cmd.AddGroup(&cobra.Group{
+ ID: installGroupID,
+ Title: "Install Command:",
+ })
+
+ cmd.AddGroup(&cobra.Group{
+ ID: entrypointGroupID,
+ Title: "Entrypoint Command:",
+ })
+
+ cmd.AddGroup(&cobra.Group{
+ ID: containerGroupID,
+ Title: "Container Commands:",
+ })
+
+ cmd.AddGroup(&cobra.Group{
+ ID: envsubstGroupID,
+ Title: "Envsubst Commands:",
+ })
+
+ cmd.AddGroup(&cobra.Group{
+ ID: loggerGroupID,
+ Title: "Logger Command:",
+ })
+
+ // subcommands
+ entrypoint.EntrypointCmd.GroupID = entrypointGroupID
+ cmd.AddCommand(entrypoint.EntrypointCmd)
+
+ services.ServicesCmd.GroupID = containerGroupID
+ processes.ProcessesCmd.GroupID = containerGroupID
+ cmd.AddCommand(services.ServicesCmd)
+ cmd.AddCommand(processes.ProcessesCmd)
+
+ cmd.AddCommand(installCmd)
+
+ cmd.AddCommand(envsubstCmd)
+ cmd.AddCommand(envsubstTemplatesCmd)
+
+ logger.LoggerCmd.GroupID = loggerGroupID
+ cmd.AddCommand(logger.LoggerCmd)
+}
+
+func Run(ctx context.Context) error {
+ return cmd.ExecuteContext(ctx)
+}
diff --git a/cmd/entrypoint/container.go b/cmd/entrypoint/container.go
new file mode 100644
index 00000000..c019efb4
--- /dev/null
+++ b/cmd/entrypoint/container.go
@@ -0,0 +1,52 @@
+package entrypoint
+
+import (
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var containerCmd = &cobra.Command{
+ Use: "container",
+ Short: "Container image information",
+
+ Aliases: []string{
+ "c",
+ },
+}
+
+var debugPackages = func() string {
+ return strings.Join(core.Instance().Distribution().Config().DebugPackages, "\n")
+}
+
+var environmentFilesFunc = func() string {
+ efs, err := core.Instance().Filesystem().ListDotEnv()
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+ return strings.Join(efs, "\n")
+}
+
+var servicesFunc = func() string {
+ svcs, err := core.Instance().Services().List()
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+
+ r := ""
+ for _, s := range svcs {
+ r += s.Status()
+ }
+
+ return r
+}
+
+func init() {
+ // subcommands
+ containerCmd.AddCommand(newPrintCmd("debug-packages", "Debug packages", "dbg-pkgs", debugPackages))
+ containerCmd.AddCommand(newPrintCmd("environment-files", "Environment file(s)", "env", environmentFilesFunc))
+ containerCmd.AddCommand(newPrintCmd("services", "Services", "svcs", servicesFunc))
+}
diff --git a/cmd/entrypoint/entrypoint.go b/cmd/entrypoint/entrypoint.go
new file mode 100755
index 00000000..ce08183c
--- /dev/null
+++ b/cmd/entrypoint/entrypoint.go
@@ -0,0 +1,207 @@
+package entrypoint
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/entrypoint/generate"
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/config"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+const (
+ Banner = " / _ \\ ___(_)_ _(_) __ _ / / __ ) __ _ ___ ___(_)_ __ ___ __ _ __ _ ___ \n| | | / __| \\ \\/ / |/ _` | / /| _ \\ / _` / __|/ _ \\ | '_ ` _ \\ / _` |/ _` |/ _ \n| |_| \\__ \\ |> <| | (_| |/ / | |_) | (_| \\__ \\ __/ | | | | | | (_| | (_| | __/\n \\___/|___/_/_/\\_\\_|\\__,_/_/ |____/ \\__,_|___/\\___|_|_| |_| |_|\\__,_|\\__, |\\___|\n |___/ "
+)
+
+var ErrInvalidLifecycleStep = errors.New("invalid lifecycle step")
+
+var entrypointCmdFlags = &entrypointFlags{}
+
+type NilBool struct {
+ Value *bool
+}
+
+func (b *NilBool) Set(s string) error {
+
+ if s == "" {
+ s = "true"
+ }
+
+ v, err := strconv.ParseBool(s)
+ if err != nil {
+ return err
+ }
+ b.Value = &v
+
+ return nil
+}
+
+func (b *NilBool) String() string {
+ if b.Value == nil {
+ return ""
+ }
+ return fmt.Sprintf("%t", *b.Value)
+}
+
+func (b *NilBool) Type() string {
+ return "bool"
+}
+
+type entrypointFlags struct {
+ core.EntrypointOptions
+
+ exec []string
+ runOnlyLifecycleStep string
+ restart NilBool
+ debug bool
+ version bool
+}
+
+func (o *entrypointFlags) toEntrypointOptions() (core.EntrypointOptions, error) {
+
+ if o.exec != nil {
+ services, err := core.Instance().Services().List(core.WithServicesNames(o.exec), core.HandleServicesTagPrefixInNames(config.TagsNamePrefix))
+ if err != nil {
+ return o.EntrypointOptions, err
+ }
+ o.Services = services
+ }
+
+ if o.runOnlyLifecycleStep != "" {
+ switch o.runOnlyLifecycleStep {
+ case string(core.LifecycleStepStartup):
+ o.SkipProcess = true
+ o.SkipFinish = true
+ case string(core.LifecycleStepProcess):
+ o.SkipStartup = true
+ o.SkipFinish = true
+ case string(core.LifecycleStepFinish):
+ o.SkipStartup = true
+ o.SkipProcess = true
+ default:
+ return o.EntrypointOptions, fmt.Errorf("%v: %w (choices: %v)", o.runOnlyLifecycleStep, ErrInvalidLifecycleStep, strings.Join(core.LifecycleStepsList(), ", "))
+ }
+ }
+
+ o.RestartProcesses = o.restart.Value
+
+ if o.debug {
+ // append debug packages to packages to install
+ o.InstallPackages = append(o.InstallPackages, core.Instance().Distribution().Config().DebugPackages...)
+ }
+
+ return o.EntrypointOptions, nil
+}
+
+var EntrypointCmd = &cobra.Command{
+ Use: "entrypoint",
+ Short: "Container entrypoint",
+
+ Long: fmt.Sprintf("\n%v\nContainer image built with osixia/baseimage (%v) 🐳✨🌴\nhttps://github.com/osixia/container-baseimage", Banner, config.BuildVersion),
+
+ Aliases: []string{
+ "ep",
+ },
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ containerImage := core.Instance().Config().Image
+
+ if entrypointCmdFlags.version {
+ fmt.Println(containerImage)
+ os.Exit(0)
+ }
+ log.Infof("Container image: %v", containerImage)
+
+ if entrypointCmdFlags.debug && log.Level() < log.LevelDebug {
+ if err := log.SetLevel(log.Levels[log.LevelDebug]); err != nil {
+ log.Error(err.Error())
+ }
+ }
+
+ epo, err := entrypointCmdFlags.toEntrypointOptions()
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ epo.Args = args
+
+ exitCode, err := core.Instance().Entrypoint().Run(cmd.Context(), epo)
+ if err != nil {
+ log.Error(err.Error())
+ }
+
+ os.Exit(exitCode)
+ },
+}
+
+func init() {
+ // subcommands
+ EntrypointCmd.AddCommand(generate.GenerateCmd)
+ EntrypointCmd.AddCommand(containerCmd)
+ EntrypointCmd.AddCommand(thanksCmd)
+
+ // flags
+ EntrypointCmd.Flags().SortFlags = false
+
+ EntrypointCmd.Flags().BoolVarP(&entrypointCmdFlags.SkipEnvFiles, "skip-env-files", "e", false, "skip getting environment variables values from environment file(s)\n")
+
+ EntrypointCmd.Flags().BoolVarP(&entrypointCmdFlags.SkipStartup, "skip-startup", "s", false, "skip running pre-startup-cmd and service(s) startup.sh script(s)")
+ EntrypointCmd.Flags().BoolVarP(&entrypointCmdFlags.SkipProcess, "skip-process", "p", false, "skip running pre-process-cmd and service(s) process.sh script(s)")
+ EntrypointCmd.Flags().BoolVarP(&entrypointCmdFlags.SkipFinish, "skip-finish", "f", false, "skip running pre-finish-cmd and service(s) finish.sh script(s)")
+ EntrypointCmd.Flags().StringVarP(&entrypointCmdFlags.runOnlyLifecycleStep, "run-only-lifecycle-step", "c", "", fmt.Sprintf("run only one lifecycle step pre-command and script(s) file(s), choices: %v\n", strings.Join(core.LifecycleStepsList(), ", ")))
+ EntrypointCmd.MarkFlagsMutuallyExclusive("run-only-lifecycle-step", "skip-startup")
+ EntrypointCmd.MarkFlagsMutuallyExclusive("run-only-lifecycle-step", "skip-process")
+ EntrypointCmd.MarkFlagsMutuallyExclusive("run-only-lifecycle-step", "skip-finish")
+
+ EntrypointCmd.Flags().StringArrayVarP(&entrypointCmdFlags.PreStartupCmds, "pre-startup-cmd", "1", nil, "run command passed as argument before service(s) startup.sh script(s)")
+ EntrypointCmd.Flags().StringArrayVarP(&entrypointCmdFlags.PreProcessCmds, "pre-process-cmd", "3", nil, "run command passed as argument before service(s) process.sh script(s)")
+ EntrypointCmd.Flags().StringArrayVarP(&entrypointCmdFlags.PreFinishCmds, "pre-finish-cmd", "5", nil, "run command passed as argument before service(s) finish.sh script(s)")
+ EntrypointCmd.Flags().StringArrayVarP(&entrypointCmdFlags.PreExitCmds, "pre-exit-cmd", "7", nil, "run command passed as argument before container exits\n")
+
+ EntrypointCmd.Flags().StringArrayVarP(&entrypointCmdFlags.exec, "exec", "x", nil, "execute only listed service(s) (default run service(s) linked to the entrypoint)\n")
+
+ EntrypointCmd.Flags().BoolVarP(&entrypointCmdFlags.RunBash, "bash", "b", false, "run Bash along with other service(s) or command\n")
+
+ EntrypointCmd.Flags().BoolVarP(&entrypointCmdFlags.TerminateAllOnExit, "kill-all-on-exit", "k", true, "kill all processes on the system upon exiting (send sigterm to all processes)")
+ EntrypointCmd.Flags().DurationVarP(&entrypointCmdFlags.TerminateAllOnExitTimeout, "kill-all-on-exit-timeout", "t", 15*time.Second, "kill all processes timeout (send sigkill to all processes after sigterm timeout has been reached)")
+ EntrypointCmd.Flags().VarP(&entrypointCmdFlags.restart, "restart", "r", "automatically restart failed services process.sh scripts (single process: default false, multiprocess: default true)")
+ EntrypointCmd.Flags().BoolVarP(&entrypointCmdFlags.KeepAlive, "keep-alive", "a", false, "keep alive container after all processes have exited\n")
+
+ EntrypointCmd.Flags().BoolVarP(&entrypointCmdFlags.UnsecureFastWrite, "unsecure-fast-write", "w", false, "disable fsync and friends with eatmydata LD_PRELOAD library\n")
+
+ EntrypointCmd.Flags().BoolVarP(&entrypointCmdFlags.debug, "debug", "d", false, "set log level to debug and install debug packages")
+ EntrypointCmd.Flags().StringSliceVarP(&entrypointCmdFlags.InstallPackages, "install-packages", "i", nil, "install packages\n")
+
+ EntrypointCmd.Flags().BoolVarP(&entrypointCmdFlags.version, "version", "v", false, "print container image version\n")
+
+ logger.AddFlags(EntrypointCmd.Flags())
+}
+
+func newPrintCmd(use string, short string, alias string, f func() string) *cobra.Command {
+ return &cobra.Command{
+ Use: use,
+ Short: short,
+
+ Aliases: []string{
+ alias,
+ },
+
+ Args: cobra.NoArgs,
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ fmt.Println(f())
+ },
+ }
+}
diff --git a/cmd/entrypoint/generate/bootstrap.go b/cmd/entrypoint/generate/bootstrap.go
new file mode 100644
index 00000000..1f8ae1a6
--- /dev/null
+++ b/cmd/entrypoint/generate/bootstrap.go
@@ -0,0 +1,55 @@
+package generate
+
+import (
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+type generateBootstrapFlags struct {
+ core.GenerateBootstrapOptions
+ generateFlags
+}
+
+var bootstrapCmdFlags = &generateBootstrapFlags{}
+
+var bootstrapCmd = &cobra.Command{
+ Use: "bootstrap [service name]...",
+ Short: "Generate bootstrap",
+
+ Aliases: []string{
+ "b",
+ },
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ bootstrapCmdFlags.Names = args
+
+ files, err := core.Instance().Generator().GenerateBootstrap(&bootstrapCmdFlags.GenerateBootstrapOptions)
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ if bootstrapCmdFlags.print {
+ print(files)
+ }
+ },
+}
+
+func init() {
+ // flags
+ bootstrapCmd.Flags().SortFlags = false
+ addBootstrapFlags(bootstrapCmd.Flags(), &bootstrapCmdFlags.GenerateBootstrapOptions)
+ addDockerfileFlags(bootstrapCmd.Flags(), &bootstrapCmdFlags.GenerateDockerfileOptions)
+ addServicesFlags(bootstrapCmd.Flags(), &bootstrapCmdFlags.GenerateServicesOptions)
+ addGenerateFlags(bootstrapCmd.Flags(), &bootstrapCmdFlags.generateFlags)
+ logger.AddFlags(bootstrapCmd.Flags())
+}
+
+func addBootstrapFlags(fs *pflag.FlagSet, gopt *core.GenerateBootstrapOptions) {
+ fs.BoolVarP(&gopt.Multiprocess, "multiprocess", "m", false, "generate multiprocess example")
+}
diff --git a/cmd/entrypoint/generate/dockerfile.go b/cmd/entrypoint/generate/dockerfile.go
new file mode 100644
index 00000000..c4416204
--- /dev/null
+++ b/cmd/entrypoint/generate/dockerfile.go
@@ -0,0 +1,53 @@
+package generate
+
+import (
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+type generateDockerfileFlags struct {
+ core.GenerateDockerfileOptions
+ generateFlags
+}
+
+var dockerfileCmdFlags = &generateDockerfileFlags{}
+
+var dockerfileCmd = &cobra.Command{
+ Use: "dockerfile",
+ Short: "Generate Dockerfile",
+
+ Aliases: []string{
+ "d",
+ },
+
+ Args: cobra.NoArgs,
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ files, err := core.Instance().Generator().GenerateDockerfile(&dockerfileCmdFlags.GenerateDockerfileOptions)
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ if dockerfileCmdFlags.print {
+ print(files)
+ }
+ },
+}
+
+func init() {
+ // flags
+ dockerfileCmd.Flags().SortFlags = false
+ addDockerfileFlags(dockerfileCmd.Flags(), &dockerfileCmdFlags.GenerateDockerfileOptions)
+ addGenerateFlags(dockerfileCmd.Flags(), &dockerfileCmdFlags.generateFlags)
+ logger.AddFlags(dockerfileCmd.Flags())
+}
+
+func addDockerfileFlags(fs *pflag.FlagSet, gopt *core.GenerateDockerfileOptions) {
+ fs.StringVarP(&gopt.Image, "image", "i", "osixia/baseimage-example:latest", "image name")
+}
diff --git a/cmd/entrypoint/generate/environment.go b/cmd/entrypoint/generate/environment.go
new file mode 100644
index 00000000..7bd28015
--- /dev/null
+++ b/cmd/entrypoint/generate/environment.go
@@ -0,0 +1,42 @@
+package generate
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var environmentCmdFlags = &generateFlags{}
+
+var environmentCmd = &cobra.Command{
+ Use: "environment",
+ Short: "Generate environment",
+
+ Aliases: []string{
+ "e",
+ },
+
+ Args: cobra.NoArgs,
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ files, err := core.Instance().Generator().GenerateEnvironment()
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ if environmentCmdFlags.print {
+ print(files)
+ }
+ },
+}
+
+func init() {
+ // flags
+ environmentCmd.Flags().SortFlags = false
+ addGenerateFlags(environmentCmd.Flags(), environmentCmdFlags)
+ logger.AddFlags(environmentCmd.Flags())
+}
diff --git a/cmd/entrypoint/generate/generate.go b/cmd/entrypoint/generate/generate.go
new file mode 100644
index 00000000..08f58a37
--- /dev/null
+++ b/cmd/entrypoint/generate/generate.go
@@ -0,0 +1,57 @@
+package generate
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+type generateFlags struct {
+ print bool
+}
+
+const (
+ Separator = "--------------------------------------------------------------------------------"
+)
+
+var GenerateCmd = &cobra.Command{
+ Use: "generate",
+ Short: "Generate sample templates",
+
+ Aliases: []string{
+ "gen",
+ },
+}
+
+func init() {
+ // subcommands
+ GenerateCmd.AddCommand(bootstrapCmd)
+ GenerateCmd.AddCommand(dockerfileCmd)
+ GenerateCmd.AddCommand(environmentCmd)
+ GenerateCmd.AddCommand(servicesCmd)
+}
+
+func addGenerateFlags(fs *pflag.FlagSet, gopt *generateFlags) {
+ fs.BoolVar(&gopt.print, "print", false, "print generated files content\n")
+}
+
+func print(files []string) {
+
+ dirPrefix := core.Instance().Filesystem().Paths().RunGenerator
+
+ for _, f := range files {
+ c, err := os.ReadFile(f)
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+
+ fmt.Printf("\n%v\n%v\n%v%v\n", strings.TrimPrefix(f, dirPrefix), Separator, string(c), Separator)
+ }
+
+}
diff --git a/cmd/entrypoint/generate/services.go b/cmd/entrypoint/generate/services.go
new file mode 100644
index 00000000..986cd79b
--- /dev/null
+++ b/cmd/entrypoint/generate/services.go
@@ -0,0 +1,56 @@
+package generate
+
+import (
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/config"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+type generateServicesFlags struct {
+ core.GenerateServicesOptions
+ generateFlags
+}
+
+var servicesCmdFlags = &generateServicesFlags{}
+
+var servicesCmd = &cobra.Command{
+ Use: "services [name]...",
+ Short: "Generate services",
+
+ Aliases: []string{
+ "s",
+ },
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ servicesCmdFlags.Names = args
+
+ files, err := core.Instance().Generator().GenerateServices(&servicesCmdFlags.GenerateServicesOptions)
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ if servicesCmdFlags.print {
+ print(files)
+ }
+ },
+}
+
+func init() {
+ // flags
+ servicesCmd.Flags().SortFlags = false
+ addServicesFlags(servicesCmd.Flags(), &servicesCmdFlags.GenerateServicesOptions)
+ addGenerateFlags(servicesCmd.Flags(), &servicesCmdFlags.generateFlags)
+ logger.AddFlags(servicesCmd.Flags())
+}
+
+func addServicesFlags(fs *pflag.FlagSet, gopt *core.GenerateServicesOptions) {
+ fs.IntVarP(&gopt.Priority, "priority", "p", config.ServicesConfig.DefaultPriority, "services priority")
+ fs.StringSliceVarP(&gopt.Tags, "tags", "t", nil, "services tags")
+ fs.BoolVar(&gopt.Optional, "optional", false, "optional service")
+}
diff --git a/cmd/entrypoint/thanks.go b/cmd/entrypoint/thanks.go
new file mode 100644
index 00000000..48cdcca1
--- /dev/null
+++ b/cmd/entrypoint/thanks.go
@@ -0,0 +1,13 @@
+package entrypoint
+
+import (
+ "fmt"
+
+ "github.com/osixia/container-baseimage/config"
+)
+
+var thanksFunc = func() string {
+ return fmt.Sprintf("%v\n\nThanks to all contributors ♥", config.BuildContributors)
+}
+
+var thanksCmd = newPrintCmd("thanks", "List container-baseimage contributors", "t", thanksFunc)
diff --git a/cmd/envsubst.go b/cmd/envsubst.go
new file mode 100644
index 00000000..c744dad7
--- /dev/null
+++ b/cmd/envsubst.go
@@ -0,0 +1,39 @@
+package cmd
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/helpers"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var envsubstCmd = &cobra.Command{
+ Use: "envsubst input [output=input]",
+ Short: "Envsubst",
+
+ GroupID: envsubstGroupID,
+
+ Args: cobra.RangeArgs(1, 2),
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ input := args[0]
+ output := input
+
+ if len(args) > 1 {
+ output = args[1]
+ }
+
+ if err := helpers.Envsubst(input, output); err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ },
+}
+
+func init() {
+ // flags
+ envsubstCmd.Flags().SortFlags = false
+ logger.AddFlags(envsubstCmd.Flags())
+}
diff --git a/cmd/envsubst_templates.go b/cmd/envsubst_templates.go
new file mode 100644
index 00000000..32bda2b2
--- /dev/null
+++ b/cmd/envsubst_templates.go
@@ -0,0 +1,48 @@
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/config"
+ "github.com/osixia/container-baseimage/helpers"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var envsubstTemplatesFilesSuffix = config.EnvsubstTemplatesFilesSuffix
+
+var envsubstTemplatesCmd = &cobra.Command{
+ Use: fmt.Sprintf("envsubst-templates templates_dir [output_dir=templates_dir] [templates_files_suffix=%v]", envsubstTemplatesFilesSuffix),
+ Short: "Envsubst templates",
+
+ GroupID: envsubstGroupID,
+
+ Args: cobra.RangeArgs(1, 3),
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ templatesDir := args[0]
+ outputDir := templatesDir
+
+ if len(args) > 1 {
+ outputDir = args[1]
+
+ if len(args) > 2 {
+ envsubstTemplatesFilesSuffix = args[2]
+ }
+ }
+
+ if _, err := helpers.EnvsubstTemplates(templatesDir, outputDir, envsubstTemplatesFilesSuffix); err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ },
+}
+
+func init() {
+ // flags
+ envsubstTemplatesCmd.Flags().SortFlags = false
+ logger.AddFlags(envsubstTemplatesCmd.Flags())
+}
diff --git a/cmd/install.go b/cmd/install.go
new file mode 100644
index 00000000..b4fdd4e4
--- /dev/null
+++ b/cmd/install.go
@@ -0,0 +1,32 @@
+package cmd
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var installCmd = &cobra.Command{
+ Use: "install",
+ Short: "Install container-baseimage",
+
+ GroupID: installGroupID,
+
+ Args: cobra.NoArgs,
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ if err := core.Instance().Install(cmd.Context()); err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ },
+}
+
+func init() {
+ // flags
+ installCmd.Flags().SortFlags = false
+ logger.AddFlags(installCmd.Flags())
+}
diff --git a/cmd/logger/level.go b/cmd/logger/level.go
new file mode 100644
index 00000000..c3e2a59b
--- /dev/null
+++ b/cmd/logger/level.go
@@ -0,0 +1,54 @@
+package logger
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/log"
+)
+
+var levelCmd = &cobra.Command{
+ Use: "level",
+
+ Aliases: []string{
+ "lvl",
+ },
+}
+
+func init() {
+ levelCmd.AddCommand(newLevelCompareCmd("eq", "Equals", log.Equals))
+ levelCmd.AddCommand(newLevelCompareCmd("ne", "Not equals", log.NotEquals))
+ levelCmd.AddCommand(newLevelCompareCmd("gt", "Greater than", log.GreaterThan))
+ levelCmd.AddCommand(newLevelCompareCmd("ge", "Greater or equals", log.GreaterOrEquals))
+ levelCmd.AddCommand(newLevelCompareCmd("lt", "Less than", log.LessThan))
+ levelCmd.AddCommand(newLevelCompareCmd("le", "Less or equals", log.LessOrEquals))
+}
+
+func newLevelCompareCmd(use string, short string, f log.CompareFunc) *cobra.Command {
+
+ return &cobra.Command{
+ Use: fmt.Sprintf("%v [%v]", use, strings.Join(log.LevelsList(), ",")),
+ Short: short,
+
+ Args: cobra.ExactArgs(1),
+
+ RunE: func(cmd *cobra.Command, args []string) error {
+
+ level, err := log.ParseLevel(args[0])
+ if err != nil {
+ return err
+ }
+
+ if f(log.Level(), level) {
+ os.Exit(0)
+ return nil
+ }
+
+ os.Exit(1)
+ return nil
+ },
+ }
+}
diff --git a/cmd/logger/logger.go b/cmd/logger/logger.go
new file mode 100644
index 00000000..c98b5866
--- /dev/null
+++ b/cmd/logger/logger.go
@@ -0,0 +1,90 @@
+package logger
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "golang.org/x/text/cases"
+ "golang.org/x/text/language"
+
+ "github.com/osixia/container-baseimage/log"
+)
+
+const (
+ LevelFlag = "log-level"
+ FormatFlag = "log-format"
+
+ PrintGroupID = "print"
+)
+
+var caser = cases.Title(language.English)
+
+var LoggerCmd = &cobra.Command{
+ Use: "logger",
+ Short: "Logger subcommands",
+
+ Aliases: []string{
+ "log",
+ },
+}
+
+func init() {
+ // subcommands groups
+ LoggerCmd.AddGroup(&cobra.Group{
+ ID: PrintGroupID,
+ Title: "Print Commands:",
+ })
+
+ // subcommands
+ LoggerCmd.AddCommand(newPrintCmd(log.Levels[log.LevelError], "e", log.Error))
+ LoggerCmd.AddCommand(newPrintCmd(log.Levels[log.LevelWarning], "w", log.Warning))
+ LoggerCmd.AddCommand(newPrintCmd(log.Levels[log.LevelInfo], "i", log.Info))
+ LoggerCmd.AddCommand(newPrintCmd(log.Levels[log.LevelDebug], "d", log.Debug))
+ LoggerCmd.AddCommand(newPrintCmd(log.Levels[log.LevelTrace], "t", log.Trace))
+
+ LoggerCmd.AddCommand(levelCmd)
+}
+
+func AddFlags(fs *pflag.FlagSet) {
+ fs.StringP(LevelFlag, "l", log.Levels[log.Level()], fmt.Sprintf("set log level, choices: %v", strings.Join(log.LevelsList(), ", ")))
+ fs.StringP(FormatFlag, "o", string(log.Format()), fmt.Sprintf("set log format, choices: %v", strings.Join(log.FormatsList(), ", ")))
+}
+
+func HandleFlags(cmd *cobra.Command) error {
+
+ level, err := cmd.Flags().GetString(LevelFlag)
+ if err == nil {
+ if err := log.SetLevel(level); err != nil {
+ return err
+ }
+ }
+
+ format, err := cmd.Flags().GetString(FormatFlag)
+ if err == nil {
+ if err := log.SetFormat(format); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func newPrintCmd(level string, alias string, f log.PrintFunc) *cobra.Command {
+
+ return &cobra.Command{
+ Use: fmt.Sprintf("%v message", level),
+ Short: caser.String(level),
+
+ GroupID: PrintGroupID,
+
+ Aliases: []string{
+ alias,
+ },
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.FromCmd(f, args)
+ },
+ }
+}
diff --git a/cmd/processes/processes.go b/cmd/processes/processes.go
new file mode 100644
index 00000000..329ee84d
--- /dev/null
+++ b/cmd/processes/processes.go
@@ -0,0 +1,19 @@
+package processes
+
+import "github.com/spf13/cobra"
+
+var ProcessesCmd = &cobra.Command{
+ Use: "processes",
+ Short: "Processes subcommands",
+
+ Aliases: []string{
+ "prcs",
+ },
+}
+
+func init() {
+ //subcommands
+ ProcessesCmd.AddCommand(startCmd)
+ ProcessesCmd.AddCommand(stopCmd)
+ ProcessesCmd.AddCommand(statusCmd)
+}
diff --git a/cmd/processes/start.go b/cmd/processes/start.go
new file mode 100644
index 00000000..4ed1dea6
--- /dev/null
+++ b/cmd/processes/start.go
@@ -0,0 +1,44 @@
+package processes
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/config"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var startCmd = &cobra.Command{
+ Use: fmt.Sprintf("start process|%vname [process|%vname]...", config.TagsNamePrefix, config.TagsNamePrefix),
+ Short: "Start process(es)",
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ ps, err := core.Instance().Processes().List(core.WithProcessesNames(args), core.HandleProcessesTagPrefixInNames(config.TagsNamePrefix))
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ // if no process defined get all processes
+ if len(ps) == 0 {
+ ps, err = core.Instance().Processes().List()
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ }
+
+ if err := core.Instance().Processes().Start(ps); err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ },
+}
+
+func init() {
+ // flags
+ startCmd.Flags().SortFlags = false
+ logger.AddFlags(startCmd.Flags())
+}
diff --git a/cmd/processes/status.go b/cmd/processes/status.go
new file mode 100644
index 00000000..12951b0f
--- /dev/null
+++ b/cmd/processes/status.go
@@ -0,0 +1,44 @@
+package processes
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/config"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var statusCmd = &cobra.Command{
+ Use: fmt.Sprintf("status [process|%vname]...", config.TagsNamePrefix),
+ Short: "Print process(es) status",
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ ps, err := core.Instance().Processes().List(core.WithProcessesNames(args), core.HandleProcessesTagPrefixInNames(config.TagsNamePrefix))
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ // if no process defined get all processes
+ if len(ps) == 0 {
+ ps, err = core.Instance().Processes().List()
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ }
+
+ for _, p := range ps {
+ fmt.Println(p.Status())
+ }
+ },
+}
+
+func init() {
+ // flags
+ statusCmd.Flags().SortFlags = false
+ logger.AddFlags(statusCmd.Flags())
+}
diff --git a/cmd/processes/stop.go b/cmd/processes/stop.go
new file mode 100644
index 00000000..94814b16
--- /dev/null
+++ b/cmd/processes/stop.go
@@ -0,0 +1,44 @@
+package processes
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/config"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var stopCmd = &cobra.Command{
+ Use: fmt.Sprintf("stop [service|%vname]...", config.TagsNamePrefix),
+ Short: "Stop process(es)",
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ ps, err := core.Instance().Processes().List(core.WithProcessesNames(args), core.HandleProcessesTagPrefixInNames(config.TagsNamePrefix))
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ // if no process defined get all processes
+ if len(ps) == 0 {
+ ps, err = core.Instance().Processes().List()
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ }
+
+ if err := core.Instance().Processes().Stop(ps); err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ },
+}
+
+func init() {
+ // flags
+ stopCmd.Flags().SortFlags = false
+ logger.AddFlags(stopCmd.Flags())
+}
diff --git a/cmd/services/download.go b/cmd/services/download.go
new file mode 100644
index 00000000..fa7a03ff
--- /dev/null
+++ b/cmd/services/download.go
@@ -0,0 +1,50 @@
+package services
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/config"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var downloadCmd = &cobra.Command{
+ Use: fmt.Sprintf("download [service|%vname]...", config.TagsNamePrefix),
+ Short: "Download optional service",
+
+ Long: "With no argument: download all not optional services",
+
+ Aliases: []string{
+ "d",
+ },
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ ss, err := core.Instance().Services().List(core.WithServicesNames(args), core.HandleServicesTagPrefixInNames(config.TagsNamePrefix))
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ // if no service defined download all not optional and not already downloaded services
+ if len(ss) == 0 {
+ ss, err = core.Instance().Services().List(core.WithServicesOptional(false), core.WithServicesDownloaded(false), core.SortServicesByPriority(true))
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ }
+
+ if err := core.Instance().Services().Download(cmd.Context(), ss); err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ },
+}
+
+func init() {
+ // flags
+ downloadCmd.Flags().SortFlags = false
+ logger.AddFlags(downloadCmd.Flags())
+}
diff --git a/cmd/services/install.go b/cmd/services/install.go
new file mode 100644
index 00000000..4778bd6f
--- /dev/null
+++ b/cmd/services/install.go
@@ -0,0 +1,50 @@
+package services
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/config"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var installCmd = &cobra.Command{
+ Use: fmt.Sprintf("install [service|%vname]...", config.TagsNamePrefix),
+ Short: "Install service(s)",
+
+ Long: "With no argument: install all not optional services and only downloaded optional services",
+
+ Aliases: []string{
+ "i",
+ },
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ ss, err := core.Instance().Services().List(core.WithServicesNames(args), core.HandleServicesTagPrefixInNames(config.TagsNamePrefix))
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ // if no service defined install all not optional services not already installed
+ if len(ss) == 0 {
+ ss, err = core.Instance().Services().List(core.WithServicesOptional(false), core.WithServicesInstalled(false), core.SortServicesByPriority(true))
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ }
+
+ if err := core.Instance().Services().Install(cmd.Context(), ss); err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ },
+}
+
+func init() {
+ // flags
+ installCmd.Flags().SortFlags = false
+ logger.AddFlags(installCmd.Flags())
+}
diff --git a/cmd/services/link.go b/cmd/services/link.go
new file mode 100644
index 00000000..6ba0fdff
--- /dev/null
+++ b/cmd/services/link.go
@@ -0,0 +1,50 @@
+package services
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/config"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var linkCmd = &cobra.Command{
+ Use: fmt.Sprintf("link [service|%vname]...", config.TagsNamePrefix),
+ Short: "Link service(s) to entrypoint",
+
+ Long: "With no argument: link all installed and not optional services",
+
+ Aliases: []string{
+ "l",
+ },
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ ss, err := core.Instance().Services().List(core.WithServicesNames(args), core.HandleServicesTagPrefixInNames(config.TagsNamePrefix))
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ // if no service defined link all installed, not optional and not already linked services
+ if len(ss) == 0 {
+ ss, err = core.Instance().Services().List(core.WithServicesOptional(false), core.WithServicesLinked(false), core.SortServicesByPriority(true))
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ }
+
+ if err := core.Instance().Services().Link(ss); err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ },
+}
+
+func init() {
+ // flags
+ linkCmd.Flags().SortFlags = false
+ logger.AddFlags(linkCmd.Flags())
+}
diff --git a/cmd/services/require.go b/cmd/services/require.go
new file mode 100644
index 00000000..9985eafb
--- /dev/null
+++ b/cmd/services/require.go
@@ -0,0 +1,42 @@
+package services
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/config"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var requireCmd = &cobra.Command{
+ Use: fmt.Sprintf("require service|%vname [service|%vname]...", config.TagsNamePrefix, config.TagsNamePrefix),
+ Short: "Require optional service",
+
+ Aliases: []string{
+ "r",
+ },
+
+ Args: cobra.MinimumNArgs(1),
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ ss, err := core.Instance().Services().List(core.WithServicesNames(args), core.HandleServicesTagPrefixInNames(config.TagsNamePrefix))
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ if err := core.Instance().Services().Require(ss); err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ },
+}
+
+func init() {
+ // flags
+ requireCmd.Flags().SortFlags = false
+ logger.AddFlags(requireCmd.Flags())
+}
diff --git a/cmd/services/services.go b/cmd/services/services.go
new file mode 100644
index 00000000..94996e72
--- /dev/null
+++ b/cmd/services/services.go
@@ -0,0 +1,21 @@
+package services
+
+import "github.com/spf13/cobra"
+
+var ServicesCmd = &cobra.Command{
+ Use: "services",
+ Short: "Services subcommands",
+
+ Aliases: []string{
+ "svcs",
+ },
+}
+
+func init() {
+ //subcommands
+ ServicesCmd.AddCommand(requireCmd)
+ ServicesCmd.AddCommand(downloadCmd)
+ ServicesCmd.AddCommand(installCmd)
+ ServicesCmd.AddCommand(linkCmd)
+ ServicesCmd.AddCommand(unlinkCmd)
+}
diff --git a/cmd/services/unlink.go b/cmd/services/unlink.go
new file mode 100644
index 00000000..bb9e12fe
--- /dev/null
+++ b/cmd/services/unlink.go
@@ -0,0 +1,50 @@
+package services
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/osixia/container-baseimage/cmd/logger"
+ "github.com/osixia/container-baseimage/config"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+var unlinkCmd = &cobra.Command{
+ Use: fmt.Sprintf("unlink [service|%vname]...", config.TagsNamePrefix),
+ Short: "Unlink entrypoint's service(s)",
+
+ Long: "With no argument: unlink all linked services",
+
+ Aliases: []string{
+ "u",
+ },
+
+ Run: func(cmd *cobra.Command, args []string) {
+ log.Tracef("Run: %v called with args: %v", cmd.Use, args)
+
+ ss, err := core.Instance().Services().List(core.WithServicesNames(args), core.HandleServicesTagPrefixInNames(config.TagsNamePrefix))
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+
+ // if no service defined unlink all linked services
+ if len(ss) == 0 {
+ ss, err = core.Instance().Services().List(core.WithServicesLinked(true), core.SortServicesByPriority(true))
+ if err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ }
+
+ if err := core.Instance().Services().Unlink(ss); err != nil {
+ log.Fatalf("%v: %v", cmd.Use, err.Error())
+ }
+ },
+}
+
+func init() {
+ // flags
+ unlinkCmd.Flags().SortFlags = false
+ logger.AddFlags(unlinkCmd.Flags())
+}
diff --git a/config/config.go b/config/config.go
new file mode 100644
index 00000000..95bb5f0a
--- /dev/null
+++ b/config/config.go
@@ -0,0 +1,91 @@
+package config
+
+import (
+ "fmt"
+
+ "github.com/osixia/container-baseimage/alpine"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/debian"
+ "github.com/osixia/container-baseimage/log"
+)
+
+// build global variables
+var (
+ BuildVersion = "develop"
+ BuildContributors = "🐒✨🌴"
+
+ BuildImageName = "osixia/baseimage"
+ BuildImageTag = "develop"
+)
+
+// global variables
+var (
+ EnvsubstTemplatesFilesSuffix = ".template"
+ TagsNamePrefix = "tag:"
+)
+
+// logger environment configuration
+var LogEnvironmentConfig = &log.EnvironmentConfig{
+ LevelKey: "CONTAINER_LOG_LEVEL",
+ FormatKey: "CONTAINER_LOG_FORMAT",
+}
+
+// core environment configuration
+var CoreEnvironmentConfig = &core.EnvironmentConfig{
+ ImageKey: "CONTAINER_IMAGE",
+ DebugPackagesKey: "CONTAINER_DEBUG_PACKAGES",
+}
+
+// supported distributions
+var SupportedDistributions = []*core.SupportedDistribution{
+ alpine.SupportedDistribution,
+ debian.SupportedDistribution,
+}
+
+// filesystem configuration
+var FilesystemConfig = &core.FilesystemConfig{
+ RootPath: "/container",
+ RunRootPath: "/run/container",
+
+ EnvironmentFilesPrefix: ".env",
+}
+
+// services configuration
+var ServicesConfig = &core.ServicesConfig{
+ TagsDir: ".tags",
+
+ DefaultPriority: 500,
+ PriorityFilename: ".priority",
+
+ OptionalFilename: ".optional",
+
+ DownloadFilename: "download.sh",
+ DownloadedFilename: ".downloaded",
+
+ InstallFilename: "install.sh",
+ InstalledFilename: ".installed",
+
+ StartupFilename: "startup.sh",
+ ProcessFilename: "process.sh",
+ FinishFilename: "finish.sh",
+
+ LinkedFilename: ".entrypoint",
+}
+
+// processes configuration
+var ProcessesConfig = &core.ProcessesConfig{
+ PIDFileSuffix: ".pid",
+ WantedDownFileSuffix: ".down",
+ TagsDir: ".tags",
+}
+
+// core configuration
+var CoreConfig = &core.CoreConfig{
+ Image: fmt.Sprintf("%v:%v", BuildImageName, BuildImageTag),
+ SupportedDistributions: SupportedDistributions,
+
+ EnvironmentConfig: CoreEnvironmentConfig,
+ FilesystemConfig: FilesystemConfig,
+ ServicesConfig: ServicesConfig,
+ ProcessesConfig: ProcessesConfig,
+}
diff --git a/core/assets/bin/container-envsubst b/core/assets/bin/container-envsubst
new file mode 100755
index 00000000..b750c032
--- /dev/null
+++ b/core/assets/bin/container-envsubst
@@ -0,0 +1,8 @@
+#!/bin/bash -e
+
+# Usage:
+# container-envsubst input [output=input]
+
+container-logger level eq trace && set -x
+
+exec container-baseimage envsubst "$@"
diff --git a/core/assets/bin/container-envsubst-templates b/core/assets/bin/container-envsubst-templates
new file mode 100755
index 00000000..35ab0572
--- /dev/null
+++ b/core/assets/bin/container-envsubst-templates
@@ -0,0 +1,8 @@
+#!/bin/bash -e
+
+# Usage:
+# container-envsubst-templates templates_dir [output_dir=templates_dir] [templates_files_suffix=.template]
+
+container-logger level eq trace && set -x
+
+exec container-baseimage envsubst-templates "$@"
diff --git a/core/assets/bin/container-logger b/core/assets/bin/container-logger
new file mode 100755
index 00000000..9812d735
--- /dev/null
+++ b/core/assets/bin/container-logger
@@ -0,0 +1,7 @@
+#!/bin/bash -e
+
+# Usage:
+# container-logger error|warning|info|debug|trace [message]
+# container-logger level eq|ne|gt|ge|lt|le none|error|warning|info|debug|trace
+
+exec container-baseimage logger "$@"
diff --git a/core/assets/generator/templates/Dockerfile.template b/core/assets/generator/templates/Dockerfile.template
new file mode 100644
index 00000000..7a661ba4
--- /dev/null
+++ b/core/assets/generator/templates/Dockerfile.template
@@ -0,0 +1,20 @@
+FROM ${FROM_IMAGE}
+
+# Set image name
+ARG IMAGE="${IMAGE}"
+ENV ${CONTAINER_IMAGE_ENV_KEY}=$${IMAGE}
+
+# Download service(s) required packages or resources
+# RUN ${PACKAGES_INDEX_UPDATE_BIN} \
+# && ${PACKAGES_INSTALL_CLEAN_BIN} \
+# [...]
+# && curl -o resources.tar.gz https://[...].tar.gz
+# && tar -xzf resources.tar.gz
+
+COPY ${DOCKERFILE_SERVICES_DIR} ${CONTAINER_SERVICES_DIR}
+
+# Install and link service(s) to the entrypoint
+RUN container-baseimage services install \
+ && container-baseimage services link
+
+COPY ${DOCKERFILE_ENVIRONMENT_FILES_DIR} ${CONTAINER_ENVIRONMENT_FILES_DIR}
diff --git a/core/assets/generator/templates/environment/.env.template b/core/assets/generator/templates/environment/.env.template
new file mode 100644
index 00000000..842ba0a1
--- /dev/null
+++ b/core/assets/generator/templates/environment/.env.template
@@ -0,0 +1 @@
+EXAMPLE_ENV_VAR="Hello :)"
diff --git a/core/assets/generator/templates/environment/README.md.template b/core/assets/generator/templates/environment/README.md.template
new file mode 100644
index 00000000..ec9d9d4f
--- /dev/null
+++ b/core/assets/generator/templates/environment/README.md.template
@@ -0,0 +1,8 @@
+# .env files
+
+.env files in this directory and any sub-directories are loaded before executing services lifeycle script(s) (startup.sh, process.sh and finish.sh) or entrypoint lifecycle pre-commands.
+The variables they contain are defined as environment variables in the container.
+
+Files are loaded in alphabetical order, and variables are overwrite if already defined in a previous file.
+
+**Container environment variables set at run time will overwrite value defined in .env files.**
diff --git a/core/assets/generator/templates/services/service-name/.optional.template b/core/assets/generator/templates/services/service-name/.optional.template
new file mode 100644
index 00000000..e69de29b
diff --git a/core/assets/generator/templates/services/service-name/.priority.template b/core/assets/generator/templates/services/service-name/.priority.template
new file mode 100644
index 00000000..684c0d32
--- /dev/null
+++ b/core/assets/generator/templates/services/service-name/.priority.template
@@ -0,0 +1 @@
+${SERVICE_PRIORITY}
diff --git a/core/assets/generator/templates/services/service-name/.tags/tag-name b/core/assets/generator/templates/services/service-name/.tags/tag-name
new file mode 100644
index 00000000..e69de29b
diff --git a/core/assets/generator/templates/services/service-name/README.md.template b/core/assets/generator/templates/services/service-name/README.md.template
new file mode 100644
index 00000000..4de3ab1f
--- /dev/null
+++ b/core/assets/generator/templates/services/service-name/README.md.template
@@ -0,0 +1,36 @@
+# Service Files
+The files outlined below are not mandatory.
+
+## install.sh
+This script should exclusively contain instructions for the initial setup of the service.
+
+For improved image construction, all package installations or file downloads should occur within the Dockerfile.
+
+By separating time-intensive download operations from the setup, the docker build cache is utilized effectively.
+Changes to the `install.sh` file will not necessitate re-downloading dependencies,
+as the Dockerfile builder will only execute the service installation script.
+
+Note: The `install.sh` script executes during the docker build, thus runtime environment variables cannot be used for setup customization.
+Such customizations are handled in the `startup.sh` file.
+
+## startup.sh
+This script prepares `process.sh` for execution and tailors the service setup to runtime environment variables.
+
+## process.sh
+This script specifies the command to be executed.
+
+In images designed for multiple processes, all `process.sh` scripts are launched simultaneously.
+The order defined in the service `.priority` file is irrelevant.
+
+## finish.sh
+This script is executed once `process.sh` has concluded.
+
+## .priority
+The .priority file establishes the sequence in which services `startup.sh` or `finish.sh` scripts are invoked.
+The higher the number, the greater the priority. The default is 500.
+
+## .optional
+This file indicates that the service is optional and can be incorporated later via the `container-baseimage services require ${SERVICE_NAME}` command.
+
+## download.sh
+This script is called during container build to download optional service resources.
diff --git a/core/assets/generator/templates/services/service-name/download.sh.template b/core/assets/generator/templates/services/service-name/download.sh.template
new file mode 100755
index 00000000..8715b239
--- /dev/null
+++ b/core/assets/generator/templates/services/service-name/download.sh.template
@@ -0,0 +1,3 @@
+#!/bin/bash -e
+
+echo "${SERVICE_NAME}: Downloading package from distribution repository or any url ..."
diff --git a/core/assets/generator/templates/services/service-name/finish.sh.template b/core/assets/generator/templates/services/service-name/finish.sh.template
new file mode 100755
index 00000000..06c46708
--- /dev/null
+++ b/core/assets/generator/templates/services/service-name/finish.sh.template
@@ -0,0 +1,7 @@
+#!/bin/bash -e
+
+# if container log level is trace:
+# print commands and their arguments as they are executed
+container-logger level eq trace && set -x
+
+echo "${SERVICE_NAME}: process ended ..."
diff --git a/core/assets/generator/templates/services/service-name/install.sh.template b/core/assets/generator/templates/services/service-name/install.sh.template
new file mode 100755
index 00000000..cfbbe2d5
--- /dev/null
+++ b/core/assets/generator/templates/services/service-name/install.sh.template
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+# this script is run during the image build
+
+echo "${SERVICE_NAME}: Installing some tools ..."
diff --git a/core/assets/generator/templates/services/service-name/process.sh.template b/core/assets/generator/templates/services/service-name/process.sh.template
new file mode 100755
index 00000000..fdbf5473
--- /dev/null
+++ b/core/assets/generator/templates/services/service-name/process.sh.template
@@ -0,0 +1,10 @@
+#!/bin/bash -e
+
+# if container log level is trace:
+# print commands and their arguments as they are executed
+container-logger level eq trace && set -x
+
+SLEEP=$(shuf -i 3-15 -n 1)
+
+echo "${SERVICE_NAME}: Just going to sleep for $${SLEEP} seconds ..."
+exec sleep "$${SLEEP}"
diff --git a/core/assets/generator/templates/services/service-name/startup.sh.template b/core/assets/generator/templates/services/service-name/startup.sh.template
new file mode 100755
index 00000000..578a81a5
--- /dev/null
+++ b/core/assets/generator/templates/services/service-name/startup.sh.template
@@ -0,0 +1,8 @@
+#!/bin/bash -e
+
+# if container log level is trace:
+# print commands and their arguments as they are executed
+container-logger level eq trace && set -x
+
+echo "${SERVICE_NAME}: Doing some container start setup ..."
+echo "${SERVICE_NAME}: EXAMPLE_ENV_VAR=$${EXAMPLE_ENV_VAR} ..."
diff --git a/core/core.go b/core/core.go
new file mode 100644
index 00000000..46091966
--- /dev/null
+++ b/core/core.go
@@ -0,0 +1,297 @@
+package core
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/osixia/container-baseimage/errors"
+ "github.com/osixia/container-baseimage/log"
+)
+
+// Core global variables
+// =============================
+var (
+ ci Core // Core instance
+ ciLock = &sync.Mutex{} // Core instance singleton creation lock
+)
+
+// Core functions
+// =============================
+
+func Init(cc *CoreConfig) error {
+
+ ciLock.Lock()
+ defer ciLock.Unlock()
+
+ // get core environment variables
+ env, err := newEnvironment(cc.EnvironmentConfig)
+ if err != nil {
+ return err
+ }
+
+ // customize core configuration with environment variables
+ if env.Image() != "" {
+ image := env.Image()
+
+ log.Tracef("Setting container image from environment variables to %v ...", image)
+ cc.Image = image
+ }
+
+ if env.DebugPackages() != "" {
+ log.Tracef("Adding %v debug packages from environment variables ...", env.DebugPackages())
+
+ sd := &SupportedDistribution{
+ Name: "Environment based distributions common configuration",
+ Vendors: nil, // all vendors
+
+ Config: &DistributionConfig{
+ DebugPackages: strings.Split(env.DebugPackages(), " "),
+ },
+ }
+
+ cc.SupportedDistributions = append(cc.SupportedDistributions, sd)
+ }
+
+ // validate config
+ if _, err := cc.Validate(); err != nil {
+ return err
+ }
+
+ ci = &core{
+ env: env,
+ config: cc,
+ }
+
+ return nil
+}
+
+func Instance() Core {
+ if ci == nil {
+ log.Fatal("Core not initialized")
+ }
+
+ return ci
+}
+
+// Core config
+// =============================
+
+type CoreConfig struct {
+
+ // Container image name
+ Image string
+
+ SupportedDistributions []*SupportedDistribution
+
+ EnvironmentConfig *EnvironmentConfig
+ FilesystemConfig *FilesystemConfig
+ ServicesConfig *ServicesConfig
+ ProcessesConfig *ProcessesConfig
+}
+
+func (cc *CoreConfig) Validate() (bool, error) {
+
+ if cc.Image == "" {
+ return false, fmt.Errorf("Image: %w", errors.ErrRequired)
+ }
+
+ if len(cc.SupportedDistributions) == 0 {
+ return false, fmt.Errorf("SupportedDistributions: %w", errors.ErrRequired)
+ }
+
+ if cc.EnvironmentConfig == nil {
+ return false, fmt.Errorf("EnvironmentConfig: %w", errors.ErrRequired)
+ }
+ if cc.FilesystemConfig == nil {
+ return false, fmt.Errorf("FilesystemConfig: %w", errors.ErrRequired)
+ }
+ if cc.ServicesConfig == nil {
+ return false, fmt.Errorf("ServicesConfig: %w", errors.ErrRequired)
+ }
+ if cc.ProcessesConfig == nil {
+ return false, fmt.Errorf("ProcessesConfig: %w", errors.ErrRequired)
+ }
+
+ return true, nil
+}
+
+// Core
+// =============================
+
+type Core interface {
+ Install(ctx context.Context) error
+
+ Environment() Environment
+ Distribution() Distribution
+ Filesystem() Filesystem
+ Services() Services
+ Processes() Processes
+ Entrypoint() Entrypoint
+ Generator() Generator
+
+ Config() *CoreConfig
+}
+
+type core struct {
+ env Environment
+ dist Distribution
+ fs Filesystem
+ svcs Services
+ prcs Processes
+ gen Generator
+ ep Entrypoint
+
+ config *CoreConfig
+}
+
+func (c *core) Install(ctx context.Context) error {
+
+ log.Trace("core.Install called")
+
+ if err := c.Filesystem().Create(); err != nil {
+ return err
+ }
+
+ di := newDistributionInstaller(c.Filesystem(), c.Distribution())
+ if err := di.Install(ctx); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *core) Environment() Environment {
+ return c.env
+}
+
+func (c *core) Distribution() Distribution {
+
+ if c.dist == nil {
+
+ ciLock.Lock()
+ defer ciLock.Unlock()
+
+ var err error
+
+ c.dist, err = newDistribution(c.config.SupportedDistributions)
+ if err != nil {
+ log.Fatalf("Core: %v", err.Error())
+ }
+ }
+
+ return c.dist
+}
+
+func (c *core) Filesystem() Filesystem {
+
+ if c.fs == nil {
+
+ ciLock.Lock()
+ defer ciLock.Unlock()
+
+ var err error
+
+ c.fs, err = newFilesystem(c.config.FilesystemConfig)
+ if err != nil {
+ log.Fatalf("Core: %v", err.Error())
+ }
+ }
+
+ return c.fs
+}
+
+func (c *core) Services() Services {
+
+ if c.svcs == nil {
+
+ fs := c.Filesystem()
+
+ ciLock.Lock()
+ defer ciLock.Unlock()
+
+ var err error
+
+ c.svcs, err = newServices(fs, c.config.ServicesConfig)
+ if err != nil {
+ log.Fatalf("Core: %v", err.Error())
+ }
+ }
+
+ return c.svcs
+}
+
+func (c *core) Processes() Processes {
+
+ if c.prcs == nil {
+
+ fs := c.Filesystem()
+
+ ciLock.Lock()
+ defer ciLock.Unlock()
+
+ var err error
+
+ c.prcs, err = newProcesses(fs, c.config.ProcessesConfig)
+ if err != nil {
+ log.Fatalf("Core: %v", err.Error())
+ }
+ }
+
+ return c.prcs
+}
+
+func (c *core) Entrypoint() Entrypoint {
+
+ if c.ep == nil {
+
+ fs := c.Filesystem()
+ dist := c.Distribution()
+ svcs := c.Services()
+ prcs := c.Processes()
+
+ ciLock.Lock()
+ defer ciLock.Unlock()
+
+ var err error
+
+ c.ep, err = newEntrypoint(fs, dist, svcs, prcs)
+ if err != nil {
+ log.Fatalf("Core: %v", err.Error())
+ }
+ }
+
+ return c.ep
+}
+
+func (c *core) Generator() Generator {
+
+ if c.gen == nil {
+
+ env := c.Environment()
+ fs := c.Filesystem()
+ svcs := c.Services()
+ dist := c.Distribution()
+
+ genc := &GeneratorConfig{
+ fromImage: c.config.Image,
+ }
+
+ ciLock.Lock()
+ defer ciLock.Unlock()
+
+ var err error
+
+ c.gen, err = newGenerator(env, fs, svcs, dist, genc)
+ if err != nil {
+ log.Fatalf("Core: %v", err.Error())
+ }
+ }
+
+ return c.gen
+}
+
+func (c *core) Config() *CoreConfig {
+ return c.config
+}
diff --git a/core/distribution.go b/core/distribution.go
new file mode 100644
index 00000000..6a734423
--- /dev/null
+++ b/core/distribution.go
@@ -0,0 +1,339 @@
+package core
+
+import (
+ "bufio"
+ "context"
+ "embed"
+ "fmt"
+ iofs "io/fs"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/osixia/container-baseimage/errors"
+ "github.com/osixia/container-baseimage/helpers"
+ "github.com/osixia/container-baseimage/log"
+)
+
+// list generator templates environment and service-name so .env and .priority files are included (. files are ignored in subdirs otherwise)
+
+//go:embed assets/* assets/generator/templates/environment/* assets/generator/templates/services/service-name/*
+var assets embed.FS
+
+var coreDistributionConfig = &DistributionConfig{
+ Assets: []*embed.FS{&assets},
+
+ BinDest: "/usr/sbin",
+}
+
+// Supported distribution
+// =============================
+
+type SupportedDistribution struct {
+ Name string
+ Vendors []string
+
+ Config *DistributionConfig
+}
+
+// Distribution config
+// =============================
+
+type DistributionConfig struct {
+ DebugPackages []string
+
+ Assets []*embed.FS
+
+ InstallScript string
+
+ BinDest string
+
+ BinPackagesIndexUpdate string
+ BinPackagesInstallClean string
+ BinPackagesIndexClean string
+}
+
+func (dc *DistributionConfig) Merge(mdc *DistributionConfig) {
+
+ if mdc.DebugPackages != nil {
+ dc.DebugPackages = append(dc.DebugPackages, mdc.DebugPackages...)
+ }
+
+ if mdc.Assets != nil {
+ dc.Assets = append(dc.Assets, mdc.Assets...)
+ }
+
+ if mdc.InstallScript != "" {
+ dc.InstallScript = mdc.InstallScript
+ }
+
+ if mdc.BinDest != "" {
+ dc.BinDest = mdc.BinDest
+ }
+
+ if mdc.BinPackagesIndexUpdate != "" {
+ dc.BinPackagesIndexUpdate = mdc.BinPackagesIndexUpdate
+ }
+ if mdc.BinPackagesInstallClean != "" {
+ dc.BinPackagesInstallClean = mdc.BinPackagesInstallClean
+ }
+ if mdc.BinPackagesIndexClean != "" {
+ dc.BinPackagesIndexClean = mdc.BinPackagesIndexClean
+ }
+
+}
+
+func (dc *DistributionConfig) Validate() (bool, error) {
+
+ if dc.InstallScript == "" {
+ return false, fmt.Errorf("InstallScript: %w", errors.ErrRequired)
+ }
+
+ if dc.BinDest == "" {
+ return false, fmt.Errorf("BinDest: %w", errors.ErrRequired)
+ }
+
+ if dc.BinPackagesIndexUpdate == "" {
+ return false, fmt.Errorf("BinPackagesIndexUpdate: %w", errors.ErrRequired)
+ }
+ if dc.BinPackagesInstallClean == "" {
+ return false, fmt.Errorf("BinPackagesInstallClean: %w", errors.ErrRequired)
+ }
+ if dc.BinPackagesIndexClean == "" {
+ return false, fmt.Errorf("BinPackagesIndexClean: %w", errors.ErrRequired)
+ }
+
+ return true, nil
+}
+
+// Distribution
+// =============================
+
+type Distribution interface {
+ Name() string
+ Vendor() string
+ Version() string
+ VersionCodename() string
+
+ InstallPackages(ctx context.Context, packages []string) error
+
+ PackagesIndexUpdate(ctx context.Context) error
+ PackagesIndexClean(ctx context.Context) error
+ PackagesInstallClean(ctx context.Context, packages []string) error
+
+ Config() *DistributionConfig
+}
+
+type distribution struct {
+ name string
+ vendor string
+ version string
+ versionCodename string // can be empty
+
+ config *DistributionConfig
+}
+
+func newDistribution(sds []*SupportedDistribution) (Distribution, error) {
+
+ f, err := os.Open("/etc/os-release")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ dist := &distribution{}
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ if m := regexp.MustCompile(`^PRETTY_NAME=(.*)$`).FindStringSubmatch(s.Text()); m != nil {
+ dist.name = strings.Trim(m[1], `"`)
+ } else if m := regexp.MustCompile(`^ID=(.*)$`).FindStringSubmatch(s.Text()); m != nil {
+ dist.vendor = strings.Trim(m[1], `"`)
+ } else if m := regexp.MustCompile(`^VERSION_ID=(.*)$`).FindStringSubmatch(s.Text()); m != nil {
+ dist.version = strings.Trim(m[1], `"`)
+ } else if m := regexp.MustCompile(`^VERSION_CODENAME=(.*)$`).FindStringSubmatch(s.Text()); m != nil {
+ dist.versionCodename = strings.Trim(m[1], `"`)
+ }
+ }
+
+ if dist.name == "" || dist.vendor == "" || dist.version == "" {
+ return nil, fmt.Errorf("%+v: distribution %w", dist, errors.ErrUnknown)
+ }
+
+ dist.config = coreDistributionConfig
+
+ vendor := strings.ToLower(dist.vendor)
+ for _, sd := range sds {
+ log.Tracef("Supported distribution: %v", sd.Name)
+
+ if sd.Config == nil {
+ continue
+ }
+
+ // nil vendors -> distribution configuration apply to all vendors
+ if sd.Vendors == nil {
+ dist.config.Merge(sd.Config)
+ continue
+ }
+
+ // iterate distribution config vendors
+ for _, distVendor := range sd.Vendors {
+ if distVendor == vendor {
+ log.Tracef("Use \"%v\" config (%v vendor match this config) ...", sd.Name, vendor)
+ dist.config.Merge(sd.Config)
+ break
+ }
+ }
+ }
+
+ if _, err := dist.config.Validate(); err != nil {
+ return nil, err
+ }
+
+ return dist, nil
+}
+
+func (dist *distribution) Name() string {
+ return dist.name
+}
+
+func (dist *distribution) Vendor() string {
+ return dist.vendor
+}
+
+func (dist *distribution) Version() string {
+ return dist.version
+}
+
+func (dist *distribution) VersionCodename() string {
+ return dist.versionCodename
+}
+
+func (dist *distribution) InstallPackages(ctx context.Context, packages []string) error {
+
+ log.Tracef("distribution.InstallPackages called with packages: %v", packages)
+
+ if len(packages) == 0 {
+ return nil
+ }
+
+ subCtx, cancelCtx := context.WithCancel(ctx)
+ defer cancelCtx()
+
+ if err := dist.PackagesIndexUpdate(subCtx); err != nil {
+ return err
+ }
+
+ if err := dist.PackagesInstallClean(subCtx, packages); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (dist *distribution) PackagesIndexUpdate(ctx context.Context) error {
+
+ log.Trace("distribution.PackagesIndexUpdate called")
+
+ return dist.execPackageManagerScript(ctx, dist.config.BinPackagesIndexUpdate)
+}
+func (dist *distribution) PackagesIndexClean(ctx context.Context) error {
+
+ log.Trace("distribution.PackagesIndexClean called")
+
+ return dist.execPackageManagerScript(ctx, dist.config.BinPackagesIndexClean)
+}
+
+func (dist *distribution) PackagesInstallClean(ctx context.Context, packages []string) error {
+
+ log.Tracef("distribution.PackagesInstallClean called with packages: %+v", packages)
+
+ return dist.execPackageManagerScript(ctx, dist.config.BinPackagesInstallClean, packages...)
+}
+
+func (dist *distribution) Config() *DistributionConfig {
+ return dist.config
+}
+
+func (dist *distribution) execPackageManagerScript(ctx context.Context, script string, args ...string) error {
+
+ log.Tracef("distribution.execPackageManagerScript called with script: %v %+v", script, args)
+
+ return helpers.NewExec(ctx).Command(script, args...).Run()
+}
+
+type DistributionInstaller interface {
+ Install(ctx context.Context) error
+}
+
+type distributionInstaller struct {
+ dist Distribution
+ fs Filesystem
+}
+
+func newDistributionInstaller(fs Filesystem, dist Distribution) DistributionInstaller {
+ return &distributionInstaller{
+ dist: dist,
+ fs: fs,
+ }
+}
+
+func (di *distributionInstaller) Install(ctx context.Context) error {
+
+ log.Trace("distributionInstaller.Install called")
+
+ log.Infof("Copying %v assets to container filesystem ...", di.dist.Name())
+ for _, assets := range di.dist.Config().Assets {
+ if err := di.copyAssets(assets); err != nil {
+ return err
+ }
+ }
+
+ log.Infof("Linking %v files to %v ...", di.fs.Paths().Bin, di.dist.Config().BinDest)
+ if err := helpers.SymlinkAll(di.fs.Paths().Bin, di.dist.Config().BinDest); err != nil {
+ return err
+ }
+
+ // exec container install.sh script
+ installSh := filepath.Join(di.fs.Paths().Root, di.dist.Config().InstallScript)
+
+ subCtx, cancelCtx := context.WithCancel(ctx)
+ defer cancelCtx()
+
+ if err := helpers.NewExec(subCtx).Command(installSh).Run(); err != nil {
+ return err
+ }
+
+ // remove container install.sh script
+ if err := helpers.Remove(installSh); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (di *distributionInstaller) copyAssets(efs *embed.FS) error {
+
+ log.Tracef("distributionInstaller.copyAssets called with efs: %+v", efs)
+
+ if err := helpers.CopyEmbedDir(efs, di.fs.Paths().Root, di.assetPerm); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (di *distributionInstaller) assetPerm(file string) iofs.FileMode {
+
+ log.Tracef("distributionInstaller.assetPerm called with file: %v", file)
+
+ var perm iofs.FileMode = 0644
+
+ // add execute to files in container bin directory and *.sh, *.sh.* files
+ if strings.HasPrefix(file, di.fs.Paths().Bin) || strings.HasSuffix(file, ".sh") || strings.Contains(file, ".sh.") {
+ perm = 0755
+ }
+
+ return perm
+}
diff --git a/core/entrypoint.go b/core/entrypoint.go
new file mode 100644
index 00000000..eb6e89fd
--- /dev/null
+++ b/core/entrypoint.go
@@ -0,0 +1,168 @@
+package core
+
+import (
+ "context"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/osixia/container-baseimage/log"
+)
+
+// Entrypoint options
+// =============================
+
+type EntrypointOptions struct {
+ SkipEnvFiles bool
+
+ LifecycleOptions
+
+ Services []Service
+
+ UnsecureFastWrite bool
+
+ InstallPackages []string
+
+ KeepAlive bool
+}
+
+// Entrypoint
+// =============================
+
+type Entrypoint interface {
+ Run(ctx context.Context, epo EntrypointOptions) (int, error)
+}
+
+type entrypoint struct {
+ fs Filesystem
+ dist Distribution
+ svcs Services
+ prcs Processes
+}
+
+func newEntrypoint(fs Filesystem, dist Distribution, svcs Services, prcs Processes) (Entrypoint, error) {
+
+ return &entrypoint{
+ fs: fs,
+ dist: dist,
+ svcs: svcs,
+ prcs: prcs,
+ }, nil
+}
+
+func (ep *entrypoint) Run(ctx context.Context, epo EntrypointOptions) (int, error) {
+
+ log.Tracef("entrypoint.Run called with epo: %+v", epo)
+
+ // set unsecure fast write
+ if epo.UnsecureFastWrite {
+ log.Info("Unsecure fast write is enabled: setting LD_PRELOAD=libeatmydata.so")
+ os.Setenv("LD_PRELOAD", "libeatmydata.so")
+ }
+
+ // create filesystem
+ if err := ep.fs.Create(); err != nil {
+ return 1, err
+ }
+
+ // install packages
+ if err := ep.dist.InstallPackages(ctx, epo.InstallPackages); err != nil {
+ return 1, err
+ }
+
+ // services to run
+ services, err := ep.getServices(epo.Services)
+ if err != nil {
+ return 1, err
+ }
+
+ // prepare services to run
+ if err := ep.prepareServices(ctx, services); err != nil {
+ return 1, err
+ }
+
+ // set environment variables from environment files
+ if epo.SkipEnvFiles {
+ log.Info("Skipping getting environment variables values from environment file(s) ...")
+ } else if err := ep.fs.LoadDotEnv(); err != nil {
+ return 1, err
+ }
+
+ // log environment variables values
+ log.Debugf("Environment variables:\n%v", strings.Join(os.Environ(), "\n"))
+
+ // run entrypoint lifecycle
+ lc := newLifecycle(ep.prcs, &epo.LifecycleOptions, services)
+
+ lc.run(ctx)
+
+ // keep alive
+ if epo.KeepAlive {
+ log.Info("All processes have exited, keep container alive ☠ ...")
+ for {
+ time.Sleep(24 * time.Hour)
+ }
+ }
+
+ return lc.ExitCode(), nil
+}
+
+func (ep *entrypoint) getServices(services []Service) ([]Service, error) {
+
+ log.Tracef("entrypoint.getServices called with services: %v", services)
+
+ // if no service is specified run service(s) linked to the entrypoint
+ if len(services) == 0 {
+
+ log.Trace("no service is specified, search all services linked to the entrypoint")
+
+ var err error
+ services, err = ep.svcs.List(WithServicesLinked(true), SortServicesByPriority(true))
+ if err != nil {
+ return nil, err
+ }
+ log.Tracef("services linked to entrypoint: %+v", services)
+ }
+
+ // sort services by priority
+ ep.svcs.SortByPriority(services)
+
+ return services, nil
+}
+
+func (ep *entrypoint) prepareServices(ctx context.Context, services []Service) error {
+
+ // list services to download and install
+ var servicesToDownload, servicesToInstall []Service
+ for _, s := range services {
+ if s.DownloadFile() != "" && !s.IsDownloaded() {
+ servicesToDownload = append(servicesToDownload, s)
+ }
+
+ if s.InstallFile() != "" && !s.IsInstalled() {
+ servicesToInstall = append(servicesToInstall, s)
+ }
+ }
+
+ // download services
+ if len(servicesToDownload) > 0 {
+ if err := ep.dist.PackagesIndexUpdate(ctx); err != nil {
+ return err
+ }
+
+ if err := ep.svcs.Download(ctx, servicesToDownload); err != nil {
+ return err
+ }
+
+ if err := ep.dist.PackagesIndexClean(ctx); err != nil {
+ return err
+ }
+ }
+
+ // install services
+ if err := ep.svcs.Install(ctx, servicesToInstall); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/core/environment.go b/core/environment.go
new file mode 100644
index 00000000..a4e6210a
--- /dev/null
+++ b/core/environment.go
@@ -0,0 +1,67 @@
+package core
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/osixia/container-baseimage/errors"
+)
+
+// Environment config
+// =============================
+
+type EnvironmentConfig struct {
+ ImageKey string
+ DebugPackagesKey string
+}
+
+func (envc *EnvironmentConfig) Validate() (bool, error) {
+
+ if envc.ImageKey == "" {
+ return false, fmt.Errorf("ImageKey: %w", errors.ErrRequired)
+ }
+
+ if envc.DebugPackagesKey == "" {
+ return false, fmt.Errorf("DebugPackagesKey: %w", errors.ErrRequired)
+ }
+
+ return true, nil
+}
+
+// Environment
+// =============================
+
+type Environment interface {
+ Image() string
+
+ DebugPackages() string
+
+ Config() *EnvironmentConfig
+}
+
+type environment struct {
+ config *EnvironmentConfig
+}
+
+func newEnvironment(envc *EnvironmentConfig) (Environment, error) {
+
+ if _, err := envc.Validate(); err != nil {
+ return nil, err
+ }
+
+ return &environment{
+ config: envc,
+ }, nil
+}
+
+func (env *environment) Image() string {
+ return os.Getenv(env.config.ImageKey)
+}
+
+func (env *environment) DebugPackages() string {
+ return os.Getenv(env.config.DebugPackagesKey)
+}
+
+func (env *environment) Config() *EnvironmentConfig {
+ return env.config
+}
diff --git a/core/filesystem.go b/core/filesystem.go
new file mode 100644
index 00000000..b9324a3e
--- /dev/null
+++ b/core/filesystem.go
@@ -0,0 +1,206 @@
+package core
+
+import (
+ "fmt"
+ iofs "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/subosito/gotenv"
+
+ "github.com/osixia/container-baseimage/errors"
+ "github.com/osixia/container-baseimage/log"
+)
+
+// Filesystem config
+// =============================
+
+type FilesystemConfig struct {
+ RootPath string
+ RunRootPath string
+
+ EnvironmentFilesPrefix string
+}
+
+func (fsc *FilesystemConfig) Validate() (bool, error) {
+
+ if fsc.RootPath == "" {
+ return false, fmt.Errorf("RootPath: %w", errors.ErrRequired)
+ }
+ if fsc.RunRootPath == "" {
+ return false, fmt.Errorf("RunRootPath: %w", errors.ErrRequired)
+ }
+
+ if fsc.EnvironmentFilesPrefix == "" {
+ return false, fmt.Errorf("EnvironmentFilesPrefix: %w", errors.ErrRequired)
+ }
+
+ return true, nil
+}
+
+// Filesystem paths
+// =============================
+
+type filesystemPaths struct {
+ Root string
+
+ Environment string
+ Services string
+ GeneratorTemplates string
+
+ Bin string
+
+ RunRoot string
+ RunProcess string
+ RunGenerator string
+}
+
+func newFilesystemPaths(fsc *FilesystemConfig) *filesystemPaths {
+
+ return &filesystemPaths{
+ Root: fsc.RootPath,
+
+ Environment: fsc.RootPath + "/environment",
+ Services: fsc.RootPath + "/services",
+ GeneratorTemplates: fsc.RootPath + "/generator/templates",
+
+ Bin: fsc.RootPath + "/bin",
+
+ RunRoot: fsc.RunRootPath,
+ RunProcess: fsc.RunRootPath + "/process",
+ RunGenerator: fsc.RunRootPath + "/generator",
+ }
+}
+
+// Filesystem
+// =============================
+
+type Filesystem interface {
+ Create() error
+
+ ListDotEnv() ([]string, error)
+ LoadDotEnv() error
+
+ Paths() *filesystemPaths
+ Config() *FilesystemConfig
+}
+
+type filesystem struct {
+ paths *filesystemPaths
+ config *FilesystemConfig
+}
+
+func newFilesystem(fsc *FilesystemConfig) (Filesystem, error) {
+
+ if _, err := fsc.Validate(); err != nil {
+ return nil, err
+ }
+
+ fsp := newFilesystemPaths(fsc)
+
+ return &filesystem{
+ paths: fsp,
+ config: fsc,
+ }, nil
+}
+
+func (fs *filesystem) Create() error {
+
+ log.Trace("filesystem.Create called")
+
+ log.Info("Creating container filesystem ...")
+
+ dirs := []string{
+ fs.paths.Root,
+ fs.paths.Environment,
+ fs.paths.GeneratorTemplates,
+ fs.paths.Services,
+ fs.paths.Bin,
+ fs.paths.RunRoot,
+ fs.paths.RunProcess,
+ fs.paths.RunGenerator,
+ }
+
+ for _, dir := range dirs {
+ log.Tracef("Creating directory %v ...", dir)
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ return err
+ }
+ }
+
+ worldWritableDirs := []string{fs.paths.RunRoot, fs.paths.RunProcess, fs.paths.RunGenerator}
+ var worldWritablePerm iofs.FileMode = 0777
+ for _, dir := range worldWritableDirs {
+
+ fi, err := os.Stat(dir)
+ if err != nil {
+ return err
+ }
+
+ if fi.Mode().Perm() != worldWritablePerm {
+ log.Tracef("Setting %v permissions to %v", worldWritablePerm, dir)
+ if err := os.Chmod(dir, worldWritablePerm); err != nil {
+ log.Warning(err.Error())
+ }
+ }
+
+ }
+
+ return nil
+}
+
+func (fs *filesystem) LoadDotEnv() error {
+
+ log.Trace("filesystem.LoadDotEnv called")
+
+ files, err := fs.ListDotEnv()
+ if err != nil {
+ return nil
+ }
+
+ if len(files) == 0 {
+ return nil
+ }
+
+ log.Infof("Loading environment variables from %v ...", strings.Join(files, ", "))
+ envBackup := os.Environ()
+ if err := gotenv.OverLoad(files...); err != nil {
+ return err
+ }
+
+ return gotenv.OverApply(strings.NewReader(strings.Join(envBackup, "\n")))
+}
+
+func (fs *filesystem) ListDotEnv() ([]string, error) {
+
+ log.Trace("filesystem.ListDotEnv called")
+
+ var files []string
+
+ log.Debugf("Searching for %v files in %v ...", fs.config.EnvironmentFilesPrefix, fs.paths.Environment)
+ err := filepath.Walk(fs.paths.Environment, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if !info.IsDir() && strings.HasPrefix(info.Name(), fs.config.EnvironmentFilesPrefix) {
+ files = append(files, path)
+ }
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return files, nil
+}
+
+func (fs *filesystem) Paths() *filesystemPaths {
+ return fs.paths
+}
+
+func (fs *filesystem) Config() *FilesystemConfig {
+ return fs.config
+}
diff --git a/core/generator.go b/core/generator.go
new file mode 100644
index 00000000..6495c2cc
--- /dev/null
+++ b/core/generator.go
@@ -0,0 +1,316 @@
+package core
+
+import (
+ goerrors "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/osixia/container-baseimage/errors"
+ "github.com/osixia/container-baseimage/helpers"
+ "github.com/osixia/container-baseimage/log"
+)
+
+// Generator config
+// =============================
+
+type GeneratorConfig struct {
+ fromImage string
+}
+
+func (genc *GeneratorConfig) Validate() (bool, error) {
+
+ if genc.fromImage == "" {
+ return false, fmt.Errorf("fromImage: %w", errors.ErrRequired)
+ }
+
+ return true, nil
+}
+
+// Generator options
+// =============================
+
+type GenerateBootstrapOptions struct {
+ GenerateDockerfileOptions
+ GenerateServicesOptions
+
+ Multiprocess bool
+}
+
+type GenerateDockerfileOptions struct {
+ Image string
+}
+
+type GenerateServicesOptions struct {
+ Names []string
+ Priority int
+ Optional bool
+ Tags []string
+}
+
+// Generator
+// =============================
+
+type Generator interface {
+ GenerateBootstrap(gopt *GenerateBootstrapOptions) ([]string, error)
+
+ GenerateDockerfile(gopt *GenerateDockerfileOptions) ([]string, error)
+ GenerateEnvironment() ([]string, error)
+ GenerateServices(gopt *GenerateServicesOptions) ([]string, error)
+
+ Config() *GeneratorConfig
+}
+
+type generator struct {
+ env Environment
+ fs Filesystem
+ svcs Services
+ dist Distribution
+
+ envBackup []string
+
+ config *GeneratorConfig
+}
+
+func newGenerator(env Environment, fs Filesystem, svcs Services, dist Distribution, genc *GeneratorConfig) (Generator, error) {
+
+ if _, err := genc.Validate(); err != nil {
+ return nil, err
+ }
+
+ return &generator{
+ env: env,
+ fs: fs,
+ svcs: svcs,
+ dist: dist,
+ config: genc,
+ }, nil
+}
+
+func (gen *generator) GenerateBootstrap(gopt *GenerateBootstrapOptions) ([]string, error) {
+
+ log.Tracef("generator.GenerateBootstrap called with gopt: %v", gopt)
+
+ if len(gopt.Names) > 1 {
+ gopt.Multiprocess = true
+ }
+
+ if gopt.Multiprocess {
+ for len(gopt.Names) < 2 {
+ gopt.Names = append(gopt.Names, fmt.Sprintf("service-%v", len(gopt.Names)+1))
+ }
+ }
+
+ var files []string
+
+ // add dockerfile
+ df, err := gen.GenerateDockerfile(&gopt.GenerateDockerfileOptions)
+ if err != nil {
+ return nil, err
+ }
+ files = append(files, df...)
+
+ // add services
+ sf, err := gen.GenerateServices(&gopt.GenerateServicesOptions)
+ if err != nil {
+ return nil, err
+ }
+ files = append(files, sf...)
+
+ // add environment
+ ef, err := gen.GenerateEnvironment()
+ if err != nil {
+ return nil, err
+ }
+ files = append(files, ef...)
+
+ return files, nil
+}
+
+func (gen *generator) GenerateDockerfile(gopt *GenerateDockerfileOptions) ([]string, error) {
+
+ log.Trace("generator.GenerateDockerfile called")
+
+ envDir, err := gen.envDir(0)
+ if err != nil {
+ return nil, err
+ }
+
+ gen.backupEnv()
+ defer gen.restoreEnv()
+
+ gen.setEnv(map[string]string{
+ "FROM_IMAGE": gen.config.fromImage,
+ "IMAGE": gopt.Image,
+ "CONTAINER_IMAGE_ENV_KEY": gen.env.Config().ImageKey,
+ "PACKAGES_INDEX_UPDATE_BIN": gen.dist.Config().BinPackagesIndexUpdate,
+ "PACKAGES_INSTALL_CLEAN_BIN": gen.dist.Config().BinPackagesInstallClean,
+ "DOCKERFILE_SERVICES_DIR": gen.dockerfileServicesDir(),
+ "CONTAINER_SERVICES_DIR": gen.fs.Paths().Services,
+ "DOCKERFILE_ENVIRONMENT_FILES_DIR": gen.dockerfileEnvDir(),
+ "CONTAINER_ENVIRONMENT_FILES_DIR": envDir,
+ })
+
+ // add dockerfile
+ return gen.output("Dockerfile.template", "Dockerfile.template")
+}
+
+func (gen *generator) GenerateEnvironment() ([]string, error) {
+
+ log.Tracef("generator.GenerateEnvironment called")
+
+ gen.backupEnv()
+ defer gen.restoreEnv()
+
+ gen.setEnv(nil)
+
+ // add environment
+ return gen.output("/environment", gen.dockerfileEnvDir())
+}
+
+func (gen *generator) GenerateServices(gopt *GenerateServicesOptions) ([]string, error) {
+
+ log.Tracef("generator.GenerateServices called with gopt: %v", gopt)
+
+ if len(gopt.Names) == 0 {
+ gopt.Names = append(gopt.Names, "service-1")
+ }
+
+ gen.backupEnv()
+ defer gen.restoreEnv()
+
+ var files []string
+
+ for _, service := range gopt.Names {
+
+ gen.setEnv(map[string]string{
+ "SERVICE_NAME": service,
+ "SERVICE_PRIORITY": strconv.Itoa(gopt.Priority),
+ })
+
+ sd := filepath.Join(gen.dockerfileServicesDir(), service)
+
+ var rps []string
+ if !gopt.Optional {
+ rps = []string{".optional.template", "download.sh.template"}
+ }
+
+ // add service
+ sf, err := gen.output("/services/service-name", sd, rps...)
+ if err != nil {
+ return nil, err
+ }
+ files = append(files, sf...)
+
+ // add services extra files
+ extraFiles := make([]string, 0, len(gopt.Tags))
+ for _, tag := range gopt.Tags {
+ extraFiles = append(extraFiles, filepath.Join(sd, gen.svcs.Config().TagsDir, tag))
+ }
+
+ ef, err := gen.create(extraFiles...)
+ if err != nil {
+ return nil, err
+ }
+ files = append(files, ef...)
+ }
+
+ return files, nil
+}
+
+func (gen *generator) Config() *GeneratorConfig {
+ return gen.config
+}
+
+func (gen *generator) output(path string, dest string, excls ...string) ([]string, error) {
+
+ log.Tracef("generator.output called with path: %v, dest: %v", path, dest)
+
+ tmpDir, err := os.MkdirTemp(gen.fs.Paths().RunRoot, "gen-")
+ if err != nil {
+ return nil, err
+ }
+ defer os.RemoveAll(tmpDir)
+
+ if err := helpers.Copy(filepath.Join(gen.fs.Paths().GeneratorTemplates, path), filepath.Join(tmpDir, dest)); err != nil {
+ return nil, err
+ }
+
+ for _, excl := range excls {
+ if err := helpers.Remove(filepath.Join(tmpDir, dest, excl)); err != nil {
+ return nil, err
+ }
+ }
+
+ return helpers.EnvsubstTemplates(tmpDir, gen.fs.Paths().RunGenerator, ".template")
+}
+
+func (gen *generator) create(paths ...string) ([]string, error) {
+
+ log.Tracef("generator.create called with paths: %v", paths)
+
+ files := make([]string, 0, len(paths))
+ for _, p := range paths {
+
+ dest := filepath.Join(gen.fs.Paths().RunGenerator, p)
+
+ if _, err := helpers.Create(dest); err != nil {
+ return nil, err
+ }
+
+ files = append(files, dest)
+ }
+
+ return files, nil
+}
+
+func (gen *generator) backupEnv() {
+ gen.envBackup = os.Environ()
+}
+
+func (gen *generator) restoreEnv() {
+ os.Clearenv()
+
+ for _, e := range gen.envBackup {
+ kv := strings.Split(e, "=")
+ os.Setenv(kv[0], kv[1])
+ }
+}
+
+func (gen *generator) setEnv(kv map[string]string) {
+ os.Clearenv()
+
+ for k, v := range kv {
+ os.Setenv(k, v)
+ }
+}
+
+func (gen *generator) envDir(child int) (string, error) {
+
+ envDir := gen.fs.Paths().Environment
+
+ if child != 0 {
+ envDir = filepath.Join(envDir, fmt.Sprintf("%v-child", child))
+ }
+
+ envFiles, err := os.ReadDir(envDir)
+ if goerrors.Is(err, os.ErrNotExist) {
+ return envDir, nil
+ }
+
+ if len(envFiles) != 0 {
+ return gen.envDir(child + 1)
+ }
+
+ return envDir, nil
+}
+
+func (gen *generator) dockerfileEnvDir() string {
+ return filepath.Base(gen.fs.Paths().Environment)
+}
+
+func (gen *generator) dockerfileServicesDir() string {
+ return filepath.Base(gen.fs.Paths().Services)
+}
diff --git a/core/lifecycle.go b/core/lifecycle.go
new file mode 100644
index 00000000..49ab17dc
--- /dev/null
+++ b/core/lifecycle.go
@@ -0,0 +1,575 @@
+package core
+
+import (
+ "context"
+ goerrors "errors"
+ "fmt"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/fsnotify/fsnotify"
+ "github.com/google/shlex"
+ "github.com/hashicorp/go-reap"
+ "golang.org/x/sync/errgroup"
+
+ "github.com/osixia/container-baseimage/errors"
+ "github.com/osixia/container-baseimage/helpers"
+ "github.com/osixia/container-baseimage/log"
+)
+
+// Lifecycle steps constantes and global variables
+// =============================
+
+const (
+ LifecycleStepStartup LifecycleStep = "startup"
+ LifecycleStepProcess LifecycleStep = "process"
+ LifecycleStepFinish LifecycleStep = "finish"
+)
+
+var LifecycleSteps = []LifecycleStep{
+ LifecycleStepStartup,
+ LifecycleStepProcess,
+ LifecycleStepFinish,
+}
+
+var LifecycleInterceptedSignals = []os.Signal{
+ syscall.SIGINT,
+ syscall.SIGTERM,
+ syscall.SIGQUIT,
+}
+
+// Lifecycle steps
+// =============================
+
+type LifecycleStep string
+
+func LifecycleStepsList() []string {
+ values := make([]string, 0, len(LifecycleSteps))
+ for _, f := range LifecycleSteps {
+ values = append(values, string(f))
+ }
+
+ return values
+}
+
+// Lifecycle options
+// =============================
+
+type LifecycleOptions struct {
+ SkipStartup bool
+ SkipProcess bool
+ SkipFinish bool
+
+ PreStartupCmds []string
+ PreProcessCmds []string
+ PreFinishCmds []string
+ PreExitCmds []string
+
+ Args []string
+ RunBash bool
+
+ TerminateAllOnExit bool
+ TerminateAllOnExitTimeout time.Duration
+ RestartProcesses *bool
+}
+
+// Lifecycle
+// =============================
+
+type lifecycle struct {
+ prcs Processes
+
+ startupServices []Service
+ processServices []Service
+ finishServices []Service
+
+ step LifecycleStep
+ exitCode int
+
+ processes []*lifecycleProcess
+
+ options *LifecycleOptions
+}
+
+func newLifecycle(prcs Processes, lco *LifecycleOptions, ss []Service) *lifecycle {
+
+ var sss, pss, fss []Service
+
+ for _, s := range ss {
+ if s.StartupFile() != "" {
+ sss = append(sss, s)
+ }
+ if s.ProcessFile() != "" {
+ pss = append(pss, s)
+ }
+ if s.FinishFile() != "" {
+ fss = append(fss, s)
+ }
+ }
+
+ return &lifecycle{
+ prcs: prcs,
+
+ startupServices: sss,
+ processServices: pss,
+ finishServices: fss,
+
+ options: lco,
+ }
+}
+
+func (lc *lifecycle) ExitCode() int {
+ return lc.exitCode
+}
+
+func (lc *lifecycle) exec(ctx context.Context) *helpers.Exec {
+
+ return helpers.NewExec(ctx).WithTimeout(lc.options.TerminateAllOnExitTimeout)
+}
+
+func (lc *lifecycle) run(ctx context.Context) {
+
+ log.Trace("lifecycle.Run called")
+
+ runCtx, cancelRun := context.WithCancel(ctx)
+ defer lc.exit(ctx, cancelRun) // exit
+
+ processCtx, cancelProcess := context.WithCancel(runCtx)
+
+ // catch first interrupt
+ interrupt := make(chan os.Signal, 1)
+ signal.Notify(interrupt, LifecycleInterceptedSignals...)
+
+ go func() {
+
+ <-interrupt
+
+ log.Info("Container execution aborted (SIGINT, SIGTERM or SIGQUIT signal received)")
+
+ if lc.step == LifecycleStepProcess {
+ log.Trace("cancel process context")
+ cancelProcess()
+ } else {
+ log.Trace("cancel run context")
+ cancelRun()
+ }
+ }()
+
+ // reap zombie processes
+ pids := make(reap.PidCh, 1)
+ errs := make(reap.ErrorCh, 1)
+ done := make(chan struct{})
+
+ reapLock := &sync.RWMutex{}
+
+ reapLock.Lock()
+ defer reapLock.Unlock()
+
+ go reap.ReapChildren(pids, errs, done, reapLock)
+
+ go func() {
+ log.Trace("Starting zombie process reaper ...")
+ for {
+ select {
+ case pid, ok := <-pids:
+ if !ok {
+ continue
+ }
+ log.Debugf("Reaped pid: %v", pid)
+
+ case e, ok := <-errs:
+ if !ok {
+ continue
+ }
+ log.Warningf("Failed to reap zombie process: %v", e)
+ }
+ }
+ }()
+
+ // startup
+ if !lc.options.SkipStartup {
+ if err := lc.runStep(runCtx, LifecycleStepStartup, lc.options.PreStartupCmds, lc.startup); err != nil {
+ lc.exitCode = errors.ExitCode(err)
+
+ log.Error(err.Error())
+ return
+ }
+ }
+
+ // process
+ if !lc.options.SkipProcess {
+ if err := lc.runStep(processCtx, LifecycleStepProcess, lc.options.PreProcessCmds, lc.process); err != nil {
+ lc.exitCode = errors.ExitCode(err)
+
+ log.Error(err.Error())
+ }
+ }
+
+ // finish
+ if !lc.options.SkipFinish {
+ if err := lc.runStep(runCtx, LifecycleStepFinish, lc.options.PreFinishCmds, lc.finish); err != nil {
+ if lc.exitCode == 0 {
+ lc.exitCode = errors.ExitCode(err)
+ }
+
+ log.Error(err.Error())
+ return
+ }
+ }
+}
+
+func (lc *lifecycle) runStep(ctx context.Context, step LifecycleStep, preCmds []string, stepFunc func(context.Context) error) error {
+
+ log.Trace("lifecycle.runStep called")
+
+ if ctx.Err() == context.Canceled {
+ log.Debugf("Ignoring %v lifecycle step (container execution aborted) ...", step)
+ return nil
+ }
+
+ lc.step = step
+
+ log.Debugf("Starting %v lifecycle step ...", step)
+ if err := lc.execPreCommands(ctx, preCmds, string(lc.step)); err != nil {
+ return err
+ }
+
+ if err := stepFunc(ctx); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (lc *lifecycle) startup(ctx context.Context) error {
+
+ log.Trace("lifecycle.startup called")
+
+ for _, s := range lc.startupServices {
+ if err := lc.exec(ctx).Command(s.StartupFile()).Run(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (lc *lifecycle) process(ctx context.Context) error {
+
+ log.Trace("lifecycle.process called")
+
+ g, subCtx := errgroup.WithContext(ctx)
+
+ // run commands
+ lc.runProcessCommand(subCtx, g)
+
+ // run services
+ lc.runProcessServices(subCtx, g)
+
+ if len(lc.processes) > 0 {
+
+ // forward signals to processes
+ sigChan := make(chan os.Signal, 1)
+ signal.Notify(sigChan)
+
+ go lc.forwardSignals(sigChan)
+
+ // watch processes files
+ w, err := lc.prcs.NewWatcher()
+ if err != nil {
+ return err
+ }
+ defer w.Close()
+
+ go lc.watchProcesses(w)
+ }
+
+ return g.Wait()
+}
+
+type lifecycleProcess struct {
+ process Process
+ exec *helpers.Exec
+ cancel context.CancelFunc
+}
+
+func (lc *lifecycle) runProcessServices(ctx context.Context, g *errgroup.Group) {
+
+ log.Trace("lifecycle.runProcessServices called")
+
+ // do not restart services by default
+ restart := false
+
+ // if option is set, set restart accordingly
+ if lc.options.RestartProcesses != nil {
+ restart = *lc.options.RestartProcesses
+ }
+
+ // if option is not set, and this is a multiprocess set restart to true
+ if lc.options.RestartProcesses == nil && len(lc.processServices) > 1 {
+ restart = true
+ }
+
+ for _, s := range lc.processServices {
+
+ s := s
+
+ log.Tracef("prepare running service %v", s.Name())
+
+ p, err := lc.prcs.New(s)
+ if err != nil {
+ log.Warning(err.Error())
+ }
+
+ lcp := &lifecycleProcess{
+ process: p,
+ }
+
+ lc.processes = append(lc.processes, lcp)
+
+ g.Go(func() error {
+
+ for {
+
+ if ctx.Err() == context.Canceled {
+ log.Tracef("%v: context cancelled", s.Name())
+ break
+ }
+
+ if p.IsWantedDown() {
+ log.Tracef("%v: wanted down", s.Name())
+ time.Sleep(1 * time.Second)
+ continue
+ }
+
+ subCtx, cancelCtx := context.WithCancel(ctx)
+
+ lcp.cancel = cancelCtx
+ lcp.exec = lc.exec(subCtx).WithSetGPID(true).WithPIDFile(p.PIDFile()).Command(s.ProcessFile(), lc.options.Args...)
+
+ err = lcp.exec.Run()
+
+ if err != nil {
+ err = fmt.Errorf("%v: %w", s.Name(), err)
+ }
+
+ if !restart {
+ log.Tracef("%v: ended restart disabled", s.Name())
+ break
+ }
+ }
+
+ return err
+ })
+ }
+}
+
+func (lc *lifecycle) watchProcesses(w *fsnotify.Watcher) {
+
+ for {
+ select {
+ // Read from Errors.
+ case _, ok := <-w.Errors:
+ if !ok { // Channel was closed (i.e. Watcher.Close() was called).
+ return
+ }
+ // Read from Events.
+ case e, ok := <-w.Events:
+ if !ok { // Channel was closed (i.e. Watcher.Close() was called).
+ return
+ }
+
+ log.Tracef("recieved watch event %+v", e)
+
+ if e.Op&fsnotify.Create == fsnotify.Create {
+
+ for _, lcp := range lc.processes {
+ if e.Name != lcp.process.WantedDownFile() {
+ continue
+ }
+
+ log.Infof("Stopping %v process ...", lcp.process.Name())
+
+ if lcp.exec != nil && lcp.cancel != nil {
+ log.Tracef("cancelling %v process context", lcp.process.Name())
+ lcp.cancel()
+ }
+
+ break
+ }
+ }
+ }
+ }
+}
+
+func (lc *lifecycle) forwardSignals(sigChan <-chan os.Signal) {
+
+ for sig := range sigChan {
+
+ ignoreSignals := append(LifecycleInterceptedSignals, syscall.SIGCHLD)
+ ignore := false
+
+ for _, isig := range ignoreSignals {
+ if sig == isig {
+ ignore = true
+ break
+ }
+ }
+
+ if ignore {
+ log.Debugf("Ignoring signal: %v", sig)
+ continue
+ }
+
+ log.Debugf("Sending %v signal to childs processes ...", sig)
+
+ for _, lcp := range lc.processes {
+ if lcp.exec == nil || lcp.exec.Cmd == nil || lcp.exec.Cmd.Process == nil {
+ continue
+ }
+
+ if err := lcp.exec.Cmd.Process.Signal(sig); err != nil && !goerrors.Is(err, os.ErrProcessDone) {
+ log.Warningf("Error during %v signal transmission to pid %v: %v", sig, lcp.exec.Cmd.Process.Pid, err)
+ }
+ }
+ }
+}
+
+func (lc *lifecycle) runProcessCommand(ctx context.Context, g *errgroup.Group) {
+
+ log.Trace("lifecycle.processCommand called")
+
+ var cmds [][]string
+
+ runBash := lc.options.RunBash
+
+ // no service to run
+ if len(lc.processServices) == 0 {
+
+ // empty command line: force to run bash
+ if len(lc.options.Args) == 0 {
+ runBash = true
+ } else {
+ // else run command line
+ cmds = append(cmds, lc.options.Args)
+ }
+ }
+
+ // add bash to commands to run
+ if runBash {
+ cmds = append(cmds, []string{"bash"})
+ }
+
+ for _, cmd := range cmds {
+ cmd := cmd
+
+ g.Go(func() error {
+ exec := lc.exec(ctx).Command(cmd[0], cmd[1:]...)
+ return exec.Run()
+ })
+ }
+}
+
+func (lc *lifecycle) finish(ctx context.Context) error {
+
+ log.Trace("lifecycle.finish called")
+
+ for _, s := range lc.finishServices {
+ if err := lc.exec(ctx).Command(s.FinishFile()).Run(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (lc *lifecycle) exit(ctx context.Context, ctxCancelFunc context.CancelFunc) {
+
+ log.Tracef("lifecycle.exit called with ctxCancelFunc: %v", ctxCancelFunc)
+
+ log.Trace("Calling ctxCancelFunc ...")
+ ctxCancelFunc()
+
+ log.Debug("Starting exit lifecycle step ...")
+ if err := lc.execPreCommands(ctx, lc.options.PreExitCmds, "exit"); err != nil {
+ lc.exitCode = errors.ExitCode(err)
+
+ log.Error(err.Error())
+ }
+
+ if lc.options.TerminateAllOnExit {
+ lc.killAll()
+ }
+
+ log.Info("Exiting ...")
+}
+
+func (lc *lifecycle) killAll() {
+
+ log.Trace("lifecycle.killAll called")
+
+ // security to not kill all processes if container-baseimage is run outside a container
+ if os.Getpid() != 1 {
+ log.Warning("Current process is not pid 1: ignoring terminating all processes ...")
+ return
+ }
+
+ // if no others proccess is running return
+ if pids, _ := helpers.ListPIDs(); len(pids) == 0 {
+ return
+ }
+
+ timeout := lc.options.TerminateAllOnExitTimeout
+ log.Infof("Terminating all processes (timeout: %v) ...", timeout)
+
+ if err := helpers.KillAll(syscall.SIGTERM); err != nil {
+ log.Errorf("Error terminating all processes: %v", err.Error())
+ }
+
+ timer := time.AfterFunc(timeout, func() {
+ log.Info("Terminating all processes: timeout reached, killing all processes ...")
+ if err := helpers.KillAll(syscall.SIGKILL); err != nil {
+ log.Errorf("Error killing all processes: %v", err.Error())
+ }
+ })
+ defer timer.Stop()
+
+ for {
+ pids, _ := helpers.ListPIDs()
+ childs := len(pids)
+ if childs == 0 {
+ break
+ }
+ log.Debugf("%v child processes still running ...", childs)
+ log.Tracef("child processes: %v ...", pids)
+ time.Sleep(250 * time.Millisecond)
+ }
+}
+
+func (lc *lifecycle) execPreCommands(ctx context.Context, commands []string, step string) error {
+
+ log.Tracef("lifecycle.execPreCommand called with commands: %v, step: %v", commands, step)
+
+ if len(commands) == 0 {
+ return nil
+ }
+
+ log.Infof("Running pre-%v commands...", step)
+
+ for _, command := range commands {
+
+ shCmd, err := shlex.Split(command)
+ if err != nil {
+ return err
+ }
+
+ if err := lc.exec(ctx).Command(shCmd[0], shCmd[1:]...).Run(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/core/processes.go b/core/processes.go
new file mode 100644
index 00000000..08a08781
--- /dev/null
+++ b/core/processes.go
@@ -0,0 +1,450 @@
+package core
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "slices"
+ "strings"
+ "time"
+
+ "github.com/fsnotify/fsnotify"
+
+ "github.com/osixia/container-baseimage/errors"
+ "github.com/osixia/container-baseimage/helpers"
+ "github.com/osixia/container-baseimage/log"
+)
+
+// Processes config
+// =============================
+
+type ProcessesConfig struct {
+ PIDFileSuffix string
+ WantedDownFileSuffix string
+ TagsDir string
+}
+
+func (prcsc *ProcessesConfig) Validate() (bool, error) {
+ if prcsc.PIDFileSuffix == "" {
+ return false, fmt.Errorf("PIDFileSuffix: %w", errors.ErrRequired)
+ }
+
+ if prcsc.WantedDownFileSuffix == "" {
+ return false, fmt.Errorf("WantedDownFileSuffix: %w", errors.ErrRequired)
+ }
+
+ if prcsc.TagsDir == "" {
+ return false, fmt.Errorf("TagsDir: %w", errors.ErrRequired)
+ }
+
+ return true, nil
+}
+
+// Processes list options
+// =============================
+
+type ProcessesListOptions struct {
+ Names []string
+
+ TagPrefixInNames string
+
+ Tags []string
+
+ Up *bool
+ WantedDown *bool
+}
+
+func WithProcessesNames(names []string) ProcessesListOption {
+ return func(prcslo *ProcessesListOptions) {
+ if prcslo.Names == nil {
+ prcslo.Names = []string{}
+ }
+ prcslo.Names = append(prcslo.Names, names...)
+ }
+}
+
+func HandleProcessesTagPrefixInNames(tagPrefix string) ProcessesListOption {
+ return func(prcslo *ProcessesListOptions) {
+ prcslo.TagPrefixInNames = tagPrefix
+ }
+}
+
+func WithProcessesTags(tags []string) ProcessesListOption {
+ return func(prcslo *ProcessesListOptions) {
+ if prcslo.Tags == nil {
+ prcslo.Tags = []string{}
+ }
+ prcslo.Tags = append(prcslo.Tags, tags...)
+ }
+}
+
+func WithProcessesUp(b bool) ProcessesListOption {
+ return func(prcslo *ProcessesListOptions) {
+ prcslo.Up = &b
+ }
+}
+
+func WithProcessesWantedDown(b bool) ProcessesListOption {
+ return func(prcslo *ProcessesListOptions) {
+ prcslo.WantedDown = &b
+ }
+}
+
+type ProcessesListOption func(*ProcessesListOptions)
+
+// Processes
+// =============================
+
+type Processes interface {
+ New(s Service) (Process, error)
+
+ Get(name string) Process
+ List(opts ...ProcessesListOption) ([]Process, error)
+
+ Start(ps []Process) error
+ Stop(ps []Process) error
+
+ NewWatcher() (*fsnotify.Watcher, error)
+}
+
+type processes struct {
+ fs Filesystem
+
+ config *ProcessesConfig
+}
+
+func newProcesses(fs Filesystem, prcsc *ProcessesConfig) (Processes, error) {
+
+ if _, err := prcsc.Validate(); err != nil {
+ return nil, err
+ }
+
+ return &processes{
+ fs: fs,
+
+ config: prcsc,
+ }, nil
+}
+
+func (prcs *processes) New(s Service) (Process, error) {
+
+ log.Tracef("processes.New called with s: %v", s)
+
+ p := prcs.new(s.Name())
+
+ // create process tag files
+ for _, t := range s.Tags() {
+ tf := filepath.Join(p.tagsDir, t)
+ if _, err := helpers.Create(tf); err != nil {
+ return nil, err
+ }
+ }
+
+ return p, nil
+}
+
+func (prcs *processes) Get(name string) Process {
+
+ log.Tracef("processes.Get called with name: %v", name)
+
+ return prcs.new(name)
+}
+
+func (prcs *processes) new(name string) *process {
+ rf := filepath.Join(prcs.fs.Paths().RunProcess, name)
+
+ pf := rf + prcs.config.PIDFileSuffix
+ df := rf + prcs.config.WantedDownFileSuffix
+
+ td := filepath.Join(prcs.fs.Paths().RunProcess, name, prcs.config.TagsDir)
+
+ return &process{
+ name: name,
+ tagsDir: td,
+ pidFile: pf,
+ wantedDownFile: df,
+ }
+}
+
+func (prcs *processes) List(opts ...ProcessesListOption) ([]Process, error) {
+
+ log.Tracef("processes.List called with opts: %+v", opts)
+
+ prcslo := &ProcessesListOptions{}
+ for _, opt := range opts {
+ opt(prcslo)
+ }
+ log.Tracef("processes list options %+v", prcslo)
+
+ if (prcslo.Names != nil && len(prcslo.Names) == 0) || (prcslo.Tags != nil && len(prcslo.Tags) == 0) {
+ log.Trace("empty names or tags")
+ return nil, nil
+ }
+
+ // candidates
+ cp := make(map[string]Process)
+
+ // search tags in names
+ if prcslo.TagPrefixInNames != "" {
+ log.Tracef("search tags in name with prefix: \"%v\"", prcslo.TagPrefixInNames)
+ var tags []string
+ for _, name := range prcslo.Names {
+ if !strings.HasPrefix(name, prcslo.TagPrefixInNames) {
+ continue
+ }
+
+ tag := strings.TrimLeft(name, prcslo.TagPrefixInNames)
+ tags = append(tags, tag)
+ }
+ log.Tracef("tags found: %v", tags)
+
+ // search processes with those tags
+ tps, err := prcs.List(WithProcessesTags(tags))
+ if err != nil {
+ return nil, err
+ }
+
+ // add matching processes to candidates
+ for _, tp := range tps {
+ cp[tp.Name()] = tp
+ }
+
+ log.Tracef("tags services candidates: %v", cp)
+ }
+
+ // get services by names
+ if prcslo.Names != nil {
+ for _, name := range prcslo.Names {
+
+ // if already in candidates skip
+ if _, ok := cp[name]; ok {
+ continue
+ }
+
+ // get service by name
+ s := prcs.Get(name)
+
+ // add service to candidates
+ cp[s.Name()] = s
+ }
+ } else {
+ // search all processes in processes directory
+ processesDir := prcs.fs.Paths().RunProcess
+
+ files, err := os.ReadDir(processesDir)
+ if err != nil {
+ return nil, err
+ }
+ for _, file := range files {
+
+ if file.IsDir() {
+ log.Infof("Ignoring directory %v", file)
+ continue
+ }
+
+ fn := file.Name()
+
+ var name string
+
+ // match pid prefix
+ if strings.HasSuffix(fn, prcs.config.PIDFileSuffix) {
+ name = strings.TrimRight(fn, prcs.config.PIDFileSuffix)
+ } else if strings.HasSuffix(fn, prcs.config.WantedDownFileSuffix) {
+ name = strings.TrimRight(fn, prcs.config.WantedDownFileSuffix)
+ } else {
+ log.Infof("Ignoring file %v: not matching %v or %v files suffix", file, prcs.config.PIDFileSuffix, prcs.config.WantedDownFileSuffix)
+ continue
+ }
+
+ if name == "" {
+ log.Warningf("Failed to get name from filename %v", fn)
+ continue
+ }
+
+ // if already in candidates skip
+ if _, ok := cp[name]; ok {
+ continue
+ }
+
+ // get process by name
+ cp[name] = prcs.Get(name)
+ }
+ }
+
+ // filter candidates
+ var ps []Process
+
+ for _, c := range cp {
+
+ // filter up processes
+ if prcslo.Up != nil && *prcslo.Up != c.IsUp() {
+ continue
+ }
+
+ // filter wanted up processes
+ if prcslo.WantedDown != nil && *prcslo.WantedDown != c.IsWantedDown() {
+ continue
+ }
+
+ // filter tags
+ if prcslo.Tags != nil && c.HasTag(prcslo.Tags...) {
+ continue
+ }
+
+ ps = append(ps, c)
+ }
+
+ return ps, nil
+}
+
+func (prcs *processes) Start(ps []Process) error {
+
+ log.Tracef("processes.Start called with ps: %+v", ps)
+
+ for _, p := range ps {
+
+ log.Infof("Starting %v ...", p.Name())
+
+ if err := p.SetWantedDown(false); err != nil {
+ return err
+ }
+
+ for !p.IsUp() {
+ log.Debugf("Waiting %v to be up", p.Name())
+ time.Sleep(1 * time.Second)
+ }
+ }
+
+ return nil
+}
+
+func (prcs *processes) Stop(ps []Process) error {
+
+ log.Tracef("processes.Stop called with ps: %+v", ps)
+
+ for _, p := range ps {
+
+ log.Infof("Stoping %v ...", p.Name())
+
+ if err := p.SetWantedDown(true); err != nil {
+ return err
+ }
+
+ for p.IsUp() {
+ log.Debugf("Waiting %v to be down", p.Name())
+ time.Sleep(1 * time.Second)
+ }
+ }
+
+ return nil
+}
+
+func (prcs *processes) NewWatcher() (*fsnotify.Watcher, error) {
+ return helpers.NewFSWatcher(prcs.fs.Paths().RunProcess)
+}
+
+// Processes
+// =============================
+
+type Process interface {
+ Name() string
+
+ Tags() []string
+ HasTag(tags ...string) bool
+
+ IsUp() bool
+
+ SetWantedDown(b bool) error
+ IsWantedDown() bool
+
+ PIDFile() string
+ WantedDownFile() string
+
+ Status() string
+}
+
+type process struct {
+ name string
+
+ tags []string
+ tagsDir string
+
+ pidFile string
+ wantedDownFile string
+}
+
+func (p *process) Name() string {
+ return p.name
+}
+
+func (p *process) Tags() []string {
+ if p.tags != nil {
+ return p.tags
+ }
+
+ files, err := os.ReadDir(p.tagsDir)
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+
+ for _, file := range files {
+ if !file.Type().IsRegular() {
+ continue
+ }
+ p.tags = append(p.tags, file.Name())
+ }
+
+ return p.tags
+}
+
+func (p *process) HasTag(tags ...string) bool {
+ for _, t := range tags {
+ if slices.Contains(p.Tags(), t) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (p *process) IsUp() bool {
+ ok, _ := helpers.IsFile(p.pidFile)
+ return ok
+}
+
+func (p *process) SetWantedDown(b bool) error {
+
+ if b {
+ // already wanted down
+ if p.IsWantedDown() {
+ return nil
+ }
+
+ _, err := helpers.Create(p.wantedDownFile)
+ return err
+ }
+
+ // already not wanted down
+ if !p.IsWantedDown() {
+ return nil
+ }
+
+ return helpers.Remove(p.wantedDownFile)
+}
+
+func (p *process) IsWantedDown() bool {
+ ok, _ := helpers.IsFile(p.wantedDownFile)
+ return ok
+}
+
+func (p *process) PIDFile() string {
+ return p.pidFile
+}
+
+func (p *process) WantedDownFile() string {
+ return p.wantedDownFile
+}
+
+func (p *process) Status() string {
+ return fmt.Sprintf("Name:%v Up:%v WantedDown:%v", p.Name(), p.IsUp(), p.IsWantedDown())
+}
diff --git a/core/services.go b/core/services.go
new file mode 100644
index 00000000..360e0f7a
--- /dev/null
+++ b/core/services.go
@@ -0,0 +1,776 @@
+package core
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/osixia/container-baseimage/errors"
+ "github.com/osixia/container-baseimage/helpers"
+ "github.com/osixia/container-baseimage/log"
+)
+
+// Services config
+// =============================
+
+type ServicesConfig struct {
+ TagsDir string
+
+ DefaultPriority int
+ PriorityFilename string
+
+ OptionalFilename string
+
+ DownloadFilename string
+ DownloadedFilename string
+
+ InstallFilename string
+ InstalledFilename string
+
+ StartupFilename string
+ ProcessFilename string
+ FinishFilename string
+
+ LinkedFilename string
+}
+
+func (svcsc *ServicesConfig) Validate() (bool, error) {
+ if svcsc.TagsDir == "" {
+ return false, fmt.Errorf("TagDir: %w", errors.ErrRequired)
+ }
+
+ if svcsc.PriorityFilename == "" {
+ return false, fmt.Errorf("PriorityFilename: %w", errors.ErrRequired)
+ }
+
+ if svcsc.OptionalFilename == "" {
+ return false, fmt.Errorf("OptionalFilename: %w", errors.ErrRequired)
+ }
+
+ if svcsc.DownloadFilename == "" {
+ return false, fmt.Errorf("DownloadFilename: %w", errors.ErrRequired)
+ }
+ if svcsc.DownloadedFilename == "" {
+ return false, fmt.Errorf("DownloadedFilename: %w", errors.ErrRequired)
+ }
+
+ if svcsc.InstallFilename == "" {
+ return false, fmt.Errorf("InstallFilename: %w", errors.ErrRequired)
+ }
+ if svcsc.InstalledFilename == "" {
+ return false, fmt.Errorf("InstalledFilename: %w", errors.ErrRequired)
+ }
+
+ if svcsc.StartupFilename == "" {
+ return false, fmt.Errorf("StartupFilename: %w", errors.ErrRequired)
+ }
+ if svcsc.ProcessFilename == "" {
+ return false, fmt.Errorf("ProcessFilename: %w", errors.ErrRequired)
+ }
+ if svcsc.FinishFilename == "" {
+ return false, fmt.Errorf("FinishFilename: %w", errors.ErrRequired)
+ }
+
+ if svcsc.LinkedFilename == "" {
+ return false, fmt.Errorf("LinkedFilename: %w", errors.ErrRequired)
+ }
+
+ return true, nil
+}
+
+// Services list options
+// =============================
+
+type ServicesListOptions struct {
+ Names []string
+
+ TagPrefixInNames string
+
+ Tags []string
+
+ Optional *bool
+ Downloaded *bool
+ Installed *bool
+ Linked *bool
+
+ SortByPriority *bool
+}
+
+func WithServicesNames(names []string) ServicesListOption {
+ return func(svcslo *ServicesListOptions) {
+ if svcslo.Names == nil {
+ svcslo.Names = []string{}
+ }
+ svcslo.Names = append(svcslo.Names, names...)
+ }
+}
+
+func HandleServicesTagPrefixInNames(tagPrefix string) ServicesListOption {
+ return func(svcslo *ServicesListOptions) {
+ svcslo.TagPrefixInNames = tagPrefix
+ }
+}
+
+func WithServicesTags(tags []string) ServicesListOption {
+ return func(svcslo *ServicesListOptions) {
+ if svcslo.Tags == nil {
+ svcslo.Tags = []string{}
+ }
+ svcslo.Tags = append(svcslo.Tags, tags...)
+ }
+}
+
+func WithServicesOptional(b bool) ServicesListOption {
+ return func(svcslo *ServicesListOptions) {
+ svcslo.Optional = &b
+ }
+}
+
+func WithServicesDownloaded(b bool) ServicesListOption {
+ return func(svcslo *ServicesListOptions) {
+ svcslo.Downloaded = &b
+ }
+}
+
+func WithServicesInstalled(b bool) ServicesListOption {
+ return func(svcslo *ServicesListOptions) {
+ svcslo.Installed = &b
+ }
+}
+
+func WithServicesLinked(b bool) ServicesListOption {
+ return func(svcslo *ServicesListOptions) {
+ svcslo.Linked = &b
+ }
+}
+
+func SortServicesByPriority(b bool) ServicesListOption {
+ return func(svcslo *ServicesListOptions) {
+ svcslo.SortByPriority = &b
+ }
+}
+
+type ServicesListOption func(*ServicesListOptions)
+
+// Services
+// =============================
+
+type Services interface {
+ Get(name string) (Service, error)
+
+ Exists(name string) (bool, error)
+
+ List(opts ...ServicesListOption) ([]Service, error)
+ SortByPriority(ss []Service)
+ JoinNames(ss []Service, sep string) string
+
+ Require(ss []Service) error
+ Download(ctx context.Context, ss []Service) error
+ Install(ctx context.Context, ss []Service) error
+ Link(ss []Service) error
+ Unlink(ss []Service) error
+
+ Config() *ServicesConfig
+}
+
+type services struct {
+ fs Filesystem
+
+ config *ServicesConfig
+}
+
+func newServices(fs Filesystem, svcsc *ServicesConfig) (Services, error) {
+
+ if _, err := svcsc.Validate(); err != nil {
+ return nil, err
+ }
+
+ return &services{
+ fs: fs,
+ config: svcsc,
+ }, nil
+}
+
+func (svcs *services) Get(name string) (Service, error) {
+
+ if _, err := svcs.Exists(name); err != nil {
+ return nil, err
+ }
+
+ d := filepath.Join(svcs.fs.Paths().Services, name)
+
+ s := &service{
+ name: name,
+
+ tagsDir: filepath.Join(d, svcs.config.TagsDir),
+
+ defaultPriority: svcs.config.DefaultPriority,
+ priorityFile: filepath.Join(d, svcs.config.PriorityFilename),
+
+ optionalFile: filepath.Join(d, svcs.config.OptionalFilename),
+
+ downloadFile: filepath.Join(d, svcs.config.DownloadFilename),
+ downloadedFile: filepath.Join(d, svcs.config.DownloadedFilename),
+
+ installFile: filepath.Join(d, svcs.config.InstallFilename),
+ installedFile: filepath.Join(d, svcs.config.InstalledFilename),
+
+ startupFile: filepath.Join(d, svcs.config.StartupFilename),
+ processFile: filepath.Join(d, svcs.config.ProcessFilename),
+ finishFile: filepath.Join(d, svcs.config.FinishFilename),
+
+ linkedFile: filepath.Join(d, svcs.config.LinkedFilename),
+ }
+
+ return s, nil
+}
+
+func (svcs *services) Exists(name string) (bool, error) {
+
+ log.Tracef("services.Exists called with name: %v", name)
+
+ d := filepath.Join(svcs.fs.Paths().Services, name)
+
+ if ok, err := helpers.IsDir(d); err != nil && !os.IsNotExist(err) {
+ return false, err
+ } else if os.IsNotExist(err) || !ok {
+ return false, fmt.Errorf("%v: service %w", name, errors.ErrUnknown)
+ }
+
+ return true, nil
+}
+
+func (svcs *services) List(opts ...ServicesListOption) ([]Service, error) {
+
+ log.Tracef("services.List called with opts: %+v", opts)
+
+ svcslo := &ServicesListOptions{}
+ for _, opt := range opts {
+ opt(svcslo)
+ }
+ log.Tracef("services list options %+v", svcslo)
+
+ if (svcslo.Names != nil && len(svcslo.Names) == 0) || (svcslo.Tags != nil && len(svcslo.Tags) == 0) {
+ log.Trace("empty names or tags")
+ return nil, nil
+ }
+
+ // candidates
+ cs := make(map[string]Service)
+
+ // search tags in names
+ if svcslo.TagPrefixInNames != "" {
+ log.Tracef("search tags in name with prefix: \"%v\"", svcslo.TagPrefixInNames)
+ var tags []string
+ for _, name := range svcslo.Names {
+ if !strings.HasPrefix(name, svcslo.TagPrefixInNames) {
+ continue
+ }
+
+ tag := strings.TrimLeft(name, svcslo.TagPrefixInNames)
+ tags = append(tags, tag)
+ }
+ log.Tracef("tags found: %v", tags)
+
+ // search services with those tags
+ tss, err := svcs.List(WithServicesTags(tags))
+ if err != nil {
+ return nil, err
+ }
+
+ // add matching services to candidates
+ for _, ts := range tss {
+ cs[ts.Name()] = ts
+ }
+
+ log.Tracef("tags services candidates: %v", cs)
+ }
+
+ // get services by names
+ if svcslo.Names != nil {
+ for _, name := range svcslo.Names {
+
+ // not realy a name but a tag that must have been already be handle (skip)
+ if svcslo.TagPrefixInNames != "" && strings.HasPrefix(name, svcslo.TagPrefixInNames) {
+ continue
+ }
+
+ // if already in candidates (skip)
+ if _, ok := cs[name]; ok {
+ continue
+ }
+
+ // get service by name
+ s, err := svcs.Get(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // add service to candidates
+ cs[s.Name()] = s
+ }
+ } else {
+ // search all services in services directory
+ servicesDir := svcs.fs.Paths().Services
+
+ subdirs, err := os.ReadDir(servicesDir)
+ if err != nil {
+ return nil, err
+ }
+ for _, subDir := range subdirs {
+
+ if !subDir.IsDir() {
+ log.Infof("Ignoring %v: not a directory", subDir)
+ continue
+ }
+
+ name := subDir.Name()
+
+ // if already in candidates (skip)
+ if _, ok := cs[name]; ok {
+ continue
+ }
+
+ // get service by name
+ s, err := svcs.Get(name)
+ if err != nil {
+ return nil, err
+ }
+
+ // add service to candidates
+ cs[s.Name()] = s
+ }
+ }
+
+ // filter candidates
+ var ss []Service
+
+ for _, c := range cs {
+
+ // filter linked services
+ if svcslo.Linked != nil && *svcslo.Linked != c.IsLinked() {
+ continue
+ }
+
+ // filter installed services
+ if svcslo.Installed != nil && *svcslo.Installed != c.IsInstalled() {
+ continue
+ }
+
+ // filter downloaded services
+ if svcslo.Downloaded != nil && *svcslo.Downloaded != c.IsDownloaded() {
+ continue
+ }
+
+ // filter optional services
+ if svcslo.Optional != nil && *svcslo.Optional != c.IsOptional() {
+ continue
+ }
+
+ // filter tags
+ if svcslo.Tags != nil && !c.HasTag(svcslo.Tags...) {
+ continue
+ }
+
+ ss = append(ss, c)
+ }
+
+ // sort services
+ if svcslo.SortByPriority != nil && *svcslo.SortByPriority {
+ svcs.SortByPriority(ss)
+ }
+
+ return ss, nil
+}
+
+func (svcs *services) SortByPriority(ss []Service) {
+
+ log.Tracef("services.SortByPriority called with services: %v", ss)
+
+ sort.Slice(ss, func(i, j int) bool {
+ // if the priorities are not equals: sort by priority
+ if ss[i].Priority() != ss[j].Priority() {
+ return ss[i].Priority() < ss[j].Priority()
+ }
+ // else sort alphabetically
+ return ss[i].Name() < ss[j].Name()
+ })
+}
+
+func (svcs *services) JoinNames(ss []Service, sep string) string {
+ names := make([]string, 0, len(ss))
+
+ for _, s := range ss {
+ names = append(names, s.Name())
+ }
+
+ return strings.Join(names, sep)
+}
+
+func (svcs *services) Require(ss []Service) error {
+
+ log.Tracef("services.Require called with services: %v", ss)
+
+ for _, s := range ss {
+ if err := s.Require(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (svcs *services) Download(ctx context.Context, ss []Service) error {
+
+ log.Tracef("services.Download called with services: %v", ss)
+
+ subCtx, cancelCtx := context.WithCancel(ctx)
+ defer cancelCtx()
+
+ for _, s := range ss {
+ if err := s.Download(subCtx); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (svcs *services) Install(ctx context.Context, ss []Service) error {
+
+ log.Tracef("services.Install called with services: %v", svcs.JoinNames(ss, ", "))
+
+ subCtx, cancelCtx := context.WithCancel(ctx)
+ defer cancelCtx()
+
+ for _, s := range ss {
+ if err := s.Install(subCtx); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (svcs *services) Link(ss []Service) error {
+
+ log.Tracef("services.Link called with services: %v", svcs.JoinNames(ss, ", "))
+
+ for _, s := range ss {
+ if err := s.Link(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (svcs *services) Unlink(ss []Service) error {
+
+ log.Tracef("services.Unlink called with services: %v", svcs.JoinNames(ss, ", "))
+
+ for _, s := range ss {
+ if err := s.Unlink(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (svcs *services) Config() *ServicesConfig {
+ return svcs.config
+}
+
+// Service
+// =============================
+
+type Service interface {
+ Name() string
+
+ Tags() []string
+ HasTag(tags ...string) bool
+
+ Priority() int
+
+ Require() error
+ IsOptional() bool
+
+ Download(ctx context.Context) error
+ DownloadFile() string
+ IsDownloaded() bool
+
+ Install(ctx context.Context) error
+ InstallFile() string
+ IsInstalled() bool
+
+ StartupFile() string
+ ProcessFile() string
+ FinishFile() string
+
+ Link() error
+ Unlink() error
+ IsLinked() bool
+
+ Status() string
+}
+
+type service struct {
+ name string
+
+ tags []string
+ tagsDir string
+
+ defaultPriority int
+
+ priority *int
+ priorityFile string
+
+ optionalFile string
+
+ downloadFile string
+ downloadedFile string
+
+ installFile string
+ installedFile string
+
+ startupFile string
+ processFile string
+ finishFile string
+
+ linkedFile string
+}
+
+func (s *service) Name() string {
+ return s.name
+}
+
+func (s *service) Tags() []string {
+ if s.tags != nil {
+ return s.tags
+ }
+
+ files, err := os.ReadDir(s.tagsDir)
+ if os.IsNotExist(err) {
+ s.tags = []string{}
+ return s.tags
+ } else if err != nil {
+ log.Fatal(err.Error())
+ }
+
+ for _, file := range files {
+ if !file.Type().IsRegular() {
+ continue
+ }
+ s.tags = append(s.tags, file.Name())
+ }
+
+ return s.tags
+}
+
+func (s *service) HasTag(tags ...string) bool {
+ for _, t := range tags {
+ if slices.Contains(s.Tags(), t) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (s *service) Priority() int {
+
+ log.Trace("service.Priority called")
+
+ if s.priority != nil {
+ return *s.priority
+ }
+
+ file, err := os.Open(s.priorityFile)
+ if err != nil {
+ log.Debugf("%v file not found. Using default priority %v for service %v", s.priorityFile, s.defaultPriority, s.name)
+ return s.defaultPriority
+ }
+
+ defer file.Close()
+
+ reader := bufio.NewReader(file)
+
+ line, _, err := reader.ReadLine()
+ if err != nil {
+ log.Debug(err.Error())
+ return s.defaultPriority
+ }
+
+ priority, err := strconv.Atoi(strings.TrimSpace(string(line)))
+ if err != nil {
+ log.Debug(err.Error())
+ return s.defaultPriority
+ }
+
+ s.priority = &priority
+
+ return *s.priority
+}
+
+func (s *service) Require() error {
+
+ log.Trace("service.Require called")
+
+ log.Infof("Requiring %v service ...", s.Name())
+
+ if !s.IsOptional() {
+ log.Warningf("Service %v is not marked as optional", s.Name())
+ return nil
+ }
+
+ if err := helpers.Remove(s.optionalFile); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *service) IsOptional() bool {
+ ok, _ := helpers.IsFile(s.optionalFile)
+ return ok
+}
+
+func (s *service) Download(ctx context.Context) error {
+
+ log.Trace("service.Download called")
+
+ log.Infof("Downloading %v service ...", s.Name())
+
+ if s.IsDownloaded() {
+ log.Warningf("Service %v already downloaded", s.Name())
+ }
+
+ if s.DownloadFile() == "" {
+ return nil
+ }
+
+ if err := helpers.NewExec(ctx).Command(s.DownloadFile()).Run(); err != nil {
+ return err
+ }
+
+ if _, err := helpers.Create(s.downloadedFile); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *service) DownloadFile() string {
+ return s.filePath(s.downloadFile)
+}
+
+func (s *service) IsDownloaded() bool {
+ ok, _ := helpers.IsFile(s.downloadedFile)
+ return ok
+}
+
+func (s *service) Install(ctx context.Context) error {
+
+ log.Trace("service.Install called")
+
+ log.Infof("Installing %v service...", s.Name())
+
+ if s.IsInstalled() {
+ log.Warningf("Service %v already installed", s.Name())
+ }
+
+ if s.InstallFile() == "" {
+ return nil
+ }
+
+ if err := helpers.NewExec(ctx).Command(s.InstallFile()).Run(); err != nil {
+ return err
+ }
+
+ if _, err := helpers.Create(s.installedFile); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *service) InstallFile() string {
+ return s.filePath(s.installFile)
+}
+
+func (s *service) IsInstalled() bool {
+ ok, _ := helpers.IsFile(s.installedFile)
+ return ok
+}
+
+func (s *service) StartupFile() string {
+ return s.filePath(s.startupFile)
+}
+
+func (s *service) ProcessFile() string {
+ return s.filePath(s.processFile)
+}
+
+func (s *service) FinishFile() string {
+ return s.filePath(s.finishFile)
+}
+
+func (s *service) Link() error {
+
+ log.Trace("services.Link called")
+
+ log.Infof("Linking %v service to entrypoint ...", s.Name())
+
+ if s.IsLinked() {
+ log.Warningf("Service %v already linked", s.Name())
+ return nil
+ }
+
+ if _, err := helpers.Create(s.linkedFile); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *service) Unlink() error {
+
+ log.Trace("services.Unlink called")
+
+ log.Infof("Unlinking %v service to entrypoint ...", s.Name())
+
+ if !s.IsLinked() {
+ log.Warningf("Service %v not linked", s.Name())
+ return nil
+ }
+
+ if err := helpers.Remove(s.linkedFile); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *service) IsLinked() bool {
+ ok, _ := helpers.IsFile(s.linkedFile)
+ return ok
+}
+
+func (s *service) Status() string {
+ return fmt.Sprintf("Name:%v Optional:%v Downloaded:%v Installed:%v Linked:%v Tags:%v\n", s.Name(), s.IsOptional(), s.IsDownloaded(), s.IsInstalled(), s.IsLinked(), s.Tags())
+}
+
+func (s *service) filePath(file string) string {
+ if ok, _ := helpers.IsFile(file); !ok {
+ return ""
+ }
+
+ return file
+}
diff --git a/debian/assets/bin/install-debug-packages b/debian/assets/bin/install-debug-packages
new file mode 100644
index 00000000..aaa42540
--- /dev/null
+++ b/debian/assets/bin/install-debug-packages
@@ -0,0 +1,9 @@
+#!/bin/bash -e
+
+# Usage:
+# install-debug-packages [extra package 1] [extra package 2] ...
+
+container-logger level eq trace && set -x
+
+package-index-update
+packages-install-clean "$@" ${CONTAINER_DEBUG_PACKAGES}
diff --git a/debian/assets/bin/packages-index-clean b/debian/assets/bin/packages-index-clean
new file mode 100755
index 00000000..ef642e77
--- /dev/null
+++ b/debian/assets/bin/packages-index-clean
@@ -0,0 +1,13 @@
+#!/bin/bash -e
+
+# Usage:
+# packages-index-clean
+
+container-logger level eq trace && set -x
+
+set -o pipefail
+
+container-logger info "Clean packages index"
+apt -qy autoremove 2>&1 | container-logger debug
+apt clean 2>&1 | container-logger debug
+rm -rf /var/lib/apt/lists/*
diff --git a/debian/assets/bin/packages-index-update b/debian/assets/bin/packages-index-update
new file mode 100755
index 00000000..e3fd7e70
--- /dev/null
+++ b/debian/assets/bin/packages-index-update
@@ -0,0 +1,11 @@
+#!/bin/bash -e
+
+# Usage:
+# packages-index-update
+
+container-logger level eq trace && set -x
+
+set -o pipefail
+
+container-logger info "Update packages index"
+apt -qy update 2>&1 | container-logger info
diff --git a/debian/assets/bin/packages-install b/debian/assets/bin/packages-install
new file mode 100755
index 00000000..8ca3ef53
--- /dev/null
+++ b/debian/assets/bin/packages-install
@@ -0,0 +1,22 @@
+#!/bin/bash -e
+
+# Usage:
+# packages-install [package 1] [package 2] ...
+
+container-logger level eq trace && set -x
+
+set -o pipefail
+
+export LC_ALL=C
+export DEBIAN_FRONTEND=noninteractive
+
+ARGS="$*"
+NO_RECOMMENDS="--no-install-recommends"
+RECOMMENDS="--install-recommends"
+if [[ ${ARGS} =~ ${RECOMMENDS} ]]; then
+ NO_RECOMMENDS=""
+ ARGS=${ARGS//${RECOMMENDS}/}
+fi
+
+container-logger info "Install packages: ${ARGS}"
+eval apt install -qy "${NO_RECOMMENDS}" "${ARGS}" 2>&1 | container-logger info
diff --git a/debian/assets/bin/packages-install-clean b/debian/assets/bin/packages-install-clean
new file mode 100755
index 00000000..1d8008b8
--- /dev/null
+++ b/debian/assets/bin/packages-install-clean
@@ -0,0 +1,9 @@
+#!/bin/bash -e
+
+# Usage:
+# packages-install-clean [package]...
+
+container-logger level eq trace && set -x
+
+packages-install "$@"
+packages-index-clean
diff --git a/debian/assets/bin/packages-remove b/debian/assets/bin/packages-remove
new file mode 100755
index 00000000..f5a64b44
--- /dev/null
+++ b/debian/assets/bin/packages-remove
@@ -0,0 +1,12 @@
+#!/bin/bash -e
+
+# Usage:
+# packages-remove [package 1] [package 2] ...
+
+container-logger level eq trace && set -x
+
+set -o pipefail
+
+container-logger info "Remove packages: $*"
+apt remove -qy --purge "$@" 2>&1 | container-logger debug
+apt -qy autoremove 2>&1 | container-logger debug
diff --git a/debian/assets/install.sh b/debian/assets/install.sh
new file mode 100755
index 00000000..27d8c8a7
--- /dev/null
+++ b/debian/assets/install.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -e
+
+# Install required packages.
+packages-index-update
+packages-install-clean bash-completion locales eatmydata
+
+# Set locale.
+echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen
+locale-gen en_US.UTF-8 2>&1 | container-logger info
+update-locale LANG=en_US.UTF-8 LC_CTYPE=en_US.UTF-8
+
+# Add container-baseimage bash completion.
+container-baseimage completion bash > /usr/share/bash-completion/completions/container-baseimage
+echo ". /etc/profile.d/bash_completion.sh" >> /root/.bashrc
+
+# Clean.
+rm -rf /tmp/* /var/tmp/*
diff --git a/debian/assets/services/cron/.optional b/debian/assets/services/cron/.optional
new file mode 100644
index 00000000..e69de29b
diff --git a/debian/assets/services/cron/.priority b/debian/assets/services/cron/.priority
new file mode 100644
index 00000000..a6905f8b
--- /dev/null
+++ b/debian/assets/services/cron/.priority
@@ -0,0 +1 @@
+999
diff --git a/debian/assets/services/cron/download.sh b/debian/assets/services/cron/download.sh
new file mode 100755
index 00000000..979f0f67
--- /dev/null
+++ b/debian/assets/services/cron/download.sh
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+
+# download cron
+packages-install cron
diff --git a/image/service-available/:cron/install.sh b/debian/assets/services/cron/install.sh
similarity index 59%
rename from image/service-available/:cron/install.sh
rename to debian/assets/services/cron/install.sh
index ece21913..cf77b52c 100755
--- a/image/service-available/:cron/install.sh
+++ b/debian/assets/services/cron/install.sh
@@ -1,4 +1,4 @@
-#!/bin/sh -e
+#!/bin/bash -e
chmod 600 /etc/crontab
@@ -7,8 +7,4 @@ sed -i 's/^\s*session\s\+required\s\+pam_loginuid.so/# &/' /etc/pam.d/cron
## Remove useless cron entries.
# Checks for lost+found and scans for mtab.
-rm -f /etc/cron.daily/standard
-rm -f /etc/cron.daily/upstart
-rm -f /etc/cron.daily/dpkg
-rm -f /etc/cron.daily/password
-rm -f /etc/cron.weekly/fstrim
+rm -f /etc/cron.daily/standard /etc/cron.daily/upstart /etc/cron.daily/dpkg /etc/cron.daily/password /etc/cron.weekly/fstrim
diff --git a/debian/assets/services/cron/process.sh b/debian/assets/services/cron/process.sh
new file mode 100755
index 00000000..f182a61b
--- /dev/null
+++ b/debian/assets/services/cron/process.sh
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+container-logger level eq trace && set -x
+
+exec /usr/sbin/cron -f
diff --git a/debian/assets/services/cron/startup.sh b/debian/assets/services/cron/startup.sh
new file mode 100755
index 00000000..9484cded
--- /dev/null
+++ b/debian/assets/services/cron/startup.sh
@@ -0,0 +1,8 @@
+#!/bin/bash -e
+container-logger level eq trace && set -x
+
+# prevent NUMBER OF HARD LINKS > 1 error
+# https://github.com/phusion/baseimage-docker/issues/198
+for dir in /etc/crontab /etc/cron.d /etc/cron.daily /etc/cron.hourly /etc/cron.monthly /etc/cron.weekly; do
+ find ${dir} -exec touch {} +
+done
diff --git a/debian/assets/services/logrotate/.optional b/debian/assets/services/logrotate/.optional
new file mode 100644
index 00000000..e69de29b
diff --git a/debian/assets/services/logrotate/.priority b/debian/assets/services/logrotate/.priority
new file mode 100644
index 00000000..a6905f8b
--- /dev/null
+++ b/debian/assets/services/logrotate/.priority
@@ -0,0 +1 @@
+999
diff --git a/debian/assets/services/logrotate/.tags/logs-stack b/debian/assets/services/logrotate/.tags/logs-stack
new file mode 100644
index 00000000..e69de29b
diff --git a/debian/assets/services/logrotate/config/logrotate.conf b/debian/assets/services/logrotate/config/logrotate.conf
new file mode 100644
index 00000000..c99018c5
--- /dev/null
+++ b/debian/assets/services/logrotate/config/logrotate.conf
@@ -0,0 +1,23 @@
+# see "man logrotate" for details
+
+# global options do not affect preceding include directives
+
+# rotate log files weekly
+weekly
+
+# keep 4 weeks worth of backlogs
+rotate 4
+
+# create new (empty) log files after rotating old ones
+create
+
+# use date as a suffix of the rotated file
+#dateext
+
+# uncomment this if you want your log files compressed
+#compress
+
+# packages drop log rotation information into this directory
+include /etc/logrotate.d
+
+# system-specific logs may also be configured here.
diff --git a/debian/assets/services/logrotate/config/syslog-ng b/debian/assets/services/logrotate/config/syslog-ng
new file mode 100644
index 00000000..0f52bd4b
--- /dev/null
+++ b/debian/assets/services/logrotate/config/syslog-ng
@@ -0,0 +1,42 @@
+/var/log/syslog
+{
+ rotate 7
+ daily
+ missingok
+ notifempty
+ delaycompress
+ compress
+ postrotate
+ if [ -f /run/container/syslog-ng.pid ]; then
+ kill -HUP `cat /run/container/syslog-ng.pid`
+ fi
+ endscript
+}
+
+/var/log/mail.info
+/var/log/mail.warn
+/var/log/mail.err
+/var/log/mail.log
+/var/log/daemon.log
+/var/log/kern.log
+/var/log/auth.log
+/var/log/user.log
+/var/log/lpr.log
+/var/log/cron.log
+/var/log/debug
+/var/log/messages
+/var/log/error
+{
+ rotate 4
+ weekly
+ missingok
+ notifempty
+ compress
+ delaycompress
+ sharedscripts
+ postrotate
+ if [ -f /run/container/syslog-ng.pid ]; then
+ kill -HUP `cat /run/container/syslog-ng.pid`
+ fi
+ endscript
+}
diff --git a/debian/assets/services/logrotate/download.sh b/debian/assets/services/logrotate/download.sh
new file mode 100755
index 00000000..448fe816
--- /dev/null
+++ b/debian/assets/services/logrotate/download.sh
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+
+# download logrotate
+packages-install logrotate
diff --git a/debian/assets/services/logrotate/install.sh b/debian/assets/services/logrotate/install.sh
new file mode 100755
index 00000000..4c72328d
--- /dev/null
+++ b/debian/assets/services/logrotate/install.sh
@@ -0,0 +1,8 @@
+#!/bin/bash -e
+
+rm -f /etc/logrotate.conf /etc/logrotate.d/syslog-ng
+
+ln -sf /container/services/logrotate/config/logrotate.conf /etc/logrotate.conf
+ln -sf /container/services/logrotate/config/syslog-ng /etc/logrotate.d/syslog-ng
+
+chmod 444 -R /container/services/logrotate/config/*
diff --git a/debian/assets/services/syslog-ng/.optional b/debian/assets/services/syslog-ng/.optional
new file mode 100644
index 00000000..e69de29b
diff --git a/debian/assets/services/syslog-ng/.priority b/debian/assets/services/syslog-ng/.priority
new file mode 100644
index 00000000..a6905f8b
--- /dev/null
+++ b/debian/assets/services/syslog-ng/.priority
@@ -0,0 +1 @@
+999
diff --git a/debian/assets/services/syslog-ng/.tags/logs-stack b/debian/assets/services/syslog-ng/.tags/logs-stack
new file mode 100644
index 00000000..e69de29b
diff --git a/debian/assets/services/syslog-ng/config/syslog-ng b/debian/assets/services/syslog-ng/config/syslog-ng
new file mode 100644
index 00000000..c9e70577
--- /dev/null
+++ b/debian/assets/services/syslog-ng/config/syslog-ng
@@ -0,0 +1,12 @@
+# If a variable is not set here, then the corresponding
+# parameter will not be changed.
+# If a variables is set, then every invocation of
+# syslog-ng's init script will set them using dmesg.
+
+# log level of messages which should go to console
+# see syslog(3) for details
+#
+#CONSOLE_LOG_LEVEL=1
+
+# Command line options to syslog-ng
+SYSLOGNG_OPTS="--no-caps"
diff --git a/debian/assets/services/syslog-ng/config/syslog-ng.conf.template b/debian/assets/services/syslog-ng/config/syslog-ng.conf.template
new file mode 100644
index 00000000..0324c9e1
--- /dev/null
+++ b/debian/assets/services/syslog-ng/config/syslog-ng.conf.template
@@ -0,0 +1,164 @@
+@version: 3.38
+@include "scl.conf"
+
+# Syslog-ng configuration file, compatible with default Debian syslogd
+# installation.
+
+# First, set some global options.
+options { chain_hostnames(off); flush_lines(0); use_dns(no); use_fqdn(no);
+ dns_cache(no); owner("root"); group("adm"); perm(0640);
+ stats_freq(0); bad_hostname("^gconfd$");
+};
+
+########################
+# Sources
+########################
+# This is the default behavior of sysklogd package
+# Logs may come from unix stream, but not from another machine.
+#
+source s_src {
+ unix-dgram("/dev/log");
+ internal();
+};
+
+# If you wish to get logs from remote machine you should uncomment
+# this and comment the above source line.
+#
+#source s_net { tcp(ip(127.0.0.1) port(1000)); };
+
+########################
+# Destinations
+########################
+# First some standard logfile
+#
+destination d_auth { file("/var/log/auth.log"); };
+destination d_cron { file("/var/log/cron.log"); };
+destination d_daemon { file("/var/log/daemon.log"); };
+destination d_kern { file("/var/log/kern.log"); };
+destination d_lpr { file("/var/log/lpr.log"); };
+destination d_mail { file("/var/log/mail.log"); };
+destination d_syslog { file("/var/log/syslog"); };
+destination d_user { file("/var/log/user.log"); };
+destination d_uucp { file("/var/log/uucp.log"); };
+
+# This files are the log come from the mail subsystem.
+#
+destination d_mailinfo { file("/var/log/mail.info"); };
+destination d_mailwarn { file("/var/log/mail.warn"); };
+destination d_mailerr { file("/var/log/mail.err"); };
+
+# Logging for INN news system
+#
+destination d_newscrit { file("/var/log/news/news.crit"); };
+destination d_newserr { file("/var/log/news/news.err"); };
+destination d_newsnotice { file("/var/log/news/news.notice"); };
+
+# Some 'catch-all' logfiles.
+#
+destination d_debug { file("/var/log/debug"); };
+destination d_error { file("/var/log/error"); };
+destination d_messages { file("/var/log/messages"); };
+
+# The root's console.
+#
+#destination d_console { usertty("root"); };
+
+# Virtual console.
+#
+#destination d_console_all { file(`tty10`); };
+
+# The named pipe /dev/xconsole is for the nsole' utility. To use it,
+# you must invoke nsole' with the -file' option:
+#
+# $ xconsole -file /dev/xconsole [...]
+#
+destination d_xconsole { pipe("/dev/xconsole"); };
+
+# Send the messages to an other host
+#
+#destination d_net { tcp("127.0.0.1" port(1000) log_fifo_size(1000)); };
+
+# Debian only
+destination d_ppp { file("/var/log/ppp.log"); };
+
+# stdout for docker
+destination d_stdout { ${SYSLOG_OUTPUT_MODE_DEV_STDOUT}("/dev/stdout"); };
+
+########################
+# Filters
+########################
+# Here's come the filter options. With this rules, we can set which
+# message go where.
+
+filter f_dbg { level(debug); };
+filter f_info { level(info); };
+filter f_notice { level(notice); };
+filter f_warn { level(warn); };
+filter f_err { level(err); };
+filter f_crit { level(crit .. emerg); };
+
+filter f_debug { level(debug) and not facility(auth, authpriv, news, mail); };
+filter f_error { level(err .. emerg) ; };
+filter f_messages { level(info,notice,warn) and
+ not facility(auth,authpriv,cron,daemon,mail,news); };
+
+filter f_auth { facility(auth, authpriv) and not filter(f_debug); };
+filter f_cron { facility(cron) and not filter(f_debug); };
+filter f_daemon { facility(daemon) and not filter(f_debug); };
+filter f_kern { facility(kern) and not filter(f_debug); };
+filter f_lpr { facility(lpr) and not filter(f_debug); };
+filter f_local { facility(local0, local1, local3, local4, local5,
+ local6, local7) and not filter(f_debug); };
+filter f_mail { facility(mail) and not filter(f_debug); };
+filter f_news { facility(news) and not filter(f_debug); };
+filter f_syslog3 { not facility(auth, authpriv, mail) and not filter(f_debug); };
+filter f_user { facility(user) and not filter(f_debug); };
+filter f_uucp { facility(uucp) and not filter(f_debug); };
+
+filter f_cnews { level(notice, err, crit) and facility(news); };
+filter f_cother { level(debug, info, notice, warn) or facility(daemon, mail); };
+
+filter f_ppp { facility(local2) and not filter(f_debug); };
+filter f_console { level(warn .. emerg); };
+
+########################
+# Log paths
+########################
+log { source(s_src); filter(f_auth); destination(d_auth); };
+log { source(s_src); filter(f_cron); destination(d_cron); };
+log { source(s_src); filter(f_daemon); destination(d_daemon); };
+log { source(s_src); filter(f_kern); destination(d_kern); };
+log { source(s_src); filter(f_lpr); destination(d_lpr); };
+log { source(s_src); filter(f_syslog3); destination(d_syslog); destination(d_stdout); };
+log { source(s_src); filter(f_user); destination(d_user); };
+log { source(s_src); filter(f_uucp); destination(d_uucp); };
+
+log { source(s_src); filter(f_mail); destination(d_mail); };
+#log { source(s_src); filter(f_mail); filter(f_info); destination(d_mailinfo); };
+#log { source(s_src); filter(f_mail); filter(f_warn); destination(d_mailwarn); };
+#log { source(s_src); filter(f_mail); filter(f_err); destination(d_mailerr); };
+
+log { source(s_src); filter(f_news); filter(f_crit); destination(d_newscrit); };
+log { source(s_src); filter(f_news); filter(f_err); destination(d_newserr); };
+log { source(s_src); filter(f_news); filter(f_notice); destination(d_newsnotice); };
+#log { source(s_src); filter(f_cnews); destination(d_console_all); };
+#log { source(s_src); filter(f_cother); destination(d_console_all); };
+
+#log { source(s_src); filter(f_ppp); destination(d_ppp); };
+
+log { source(s_src); filter(f_debug); destination(d_debug); };
+log { source(s_src); filter(f_error); destination(d_error); };
+log { source(s_src); filter(f_messages); destination(d_messages); };
+
+#log { source(s_src); filter(f_console); destination(d_console_all);
+# destination(d_xconsole); };
+#log { source(s_src); filter(f_crit); destination(d_console); };
+
+# All messages send to a remote site
+#
+#log { source(s_src); destination(d_net); };
+
+###
+# Include all config files in /etc/syslog-ng/conf.d/
+###
+@include "/etc/syslog-ng/conf.d/*.conf"
diff --git a/debian/assets/services/syslog-ng/download.sh b/debian/assets/services/syslog-ng/download.sh
new file mode 100755
index 00000000..ef862b0b
--- /dev/null
+++ b/debian/assets/services/syslog-ng/download.sh
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+
+# download syslog-ng-core
+packages-install syslog-ng-core
diff --git a/debian/assets/services/syslog-ng/install.sh b/debian/assets/services/syslog-ng/install.sh
new file mode 100755
index 00000000..1d53bebc
--- /dev/null
+++ b/debian/assets/services/syslog-ng/install.sh
@@ -0,0 +1,13 @@
+#!/bin/bash -e
+
+mkdir -p /var/lib/syslog-ng
+rm -f /etc/default/syslog-ng /etc/syslog-ng/syslog-ng.conf
+
+ln -sf /container/services/syslog-ng/config/syslog-ng /etc/default/syslog-ng
+
+touch /var/log/syslog
+chmod 640 /var/log/syslog
+
+# If /var/log is writable by another user logrotate will fail
+/bin/chown root:root /var/log
+/bin/chmod 0755 /var/log
diff --git a/debian/assets/services/syslog-ng/process.sh b/debian/assets/services/syslog-ng/process.sh
new file mode 100755
index 00000000..cc3940d6
--- /dev/null
+++ b/debian/assets/services/syslog-ng/process.sh
@@ -0,0 +1,9 @@
+#!/bin/bash -e
+container-logger level eq trace && set -x
+
+PIDFILE="/run/container/syslog-ng.pid"
+SYSLOGNG_OPTS=""
+
+[ -r /etc/default/syslog-ng ] && . /etc/default/syslog-ng
+
+exec /usr/sbin/syslog-ng --foreground --pidfile "${PIDFILE}" ${SYSLOGNG_OPTS}
diff --git a/debian/assets/services/syslog-ng/startup.sh b/debian/assets/services/syslog-ng/startup.sh
new file mode 100755
index 00000000..ae6ef665
--- /dev/null
+++ b/debian/assets/services/syslog-ng/startup.sh
@@ -0,0 +1,12 @@
+#!/bin/bash -e
+container-logger level eq trace && set -x
+
+# determine output mode on /dev/stdout because of the issue documented at https://github.com/phusion/baseimage-docker/issues/468
+if [ -p /dev/stdout ]; then
+ SYSLOG_OUTPUT_MODE_DEV_STDOUT="pipe"
+else
+ SYSLOG_OUTPUT_MODE_DEV_STDOUT="file"
+fi
+
+export SYSLOG_OUTPUT_MODE_DEV_STDOUT
+envsubst-templates /container/services/syslog-ng/config /etc/syslog-ng
diff --git a/debian/debian.go b/debian/debian.go
new file mode 100644
index 00000000..f08f25d3
--- /dev/null
+++ b/debian/debian.go
@@ -0,0 +1,28 @@
+package debian
+
+import (
+ "embed"
+
+ "github.com/osixia/container-baseimage/core"
+)
+
+// list all services so .priority files are included (. files are ignored in subdirs otherwise)
+
+//go:embed assets/* assets/services/cron/* assets/services/logrotate/* assets/services/syslog-ng/*
+var assets embed.FS
+
+var SupportedDistribution = &core.SupportedDistribution{
+ Name: "Debian & derivatives",
+ Vendors: []string{"debian", "ubuntu"},
+
+ Config: &core.DistributionConfig{
+ DebugPackages: []string{"curl", "less", "procps", "psmisc", "strace", "vim-tiny"},
+ Assets: []*embed.FS{&assets},
+
+ InstallScript: "install.sh",
+
+ BinPackagesIndexUpdate: "packages-index-update",
+ BinPackagesInstallClean: "packages-install-clean",
+ BinPackagesIndexClean: "packages-index-clean",
+ },
+}
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 00000000..98ac0ee3
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,3 @@
+# 📄 Documentation
+
+See full documentation and complete features list on [osixia/baseimage documentation](https://opensource.osixia.net/projects/container-images/baseimage/).
diff --git a/docs/examples/README.md b/docs/examples/README.md
new file mode 100644
index 00000000..3f8364fe
--- /dev/null
+++ b/docs/examples/README.md
@@ -0,0 +1,19 @@
+# 🗃 Examples
+
+## Generate examples
+
+### single-process
+```
+mkdir single-process
+```
+```
+docker run --rm --user $UID --volume $(pwd)/single-process:/run/container/generator osixia/baseimage generate bootstrap
+```
+
+### multiprocess
+```
+mkdir multiprocess
+```
+```
+docker run --rm --user $UID --volume $(pwd)/multiprocess:/run/container/generator osixia/baseimage generate bootstrap --multiprocess
+```
diff --git a/docs/examples/multiprocess/Dockerfile b/docs/examples/multiprocess/Dockerfile
new file mode 100644
index 00000000..fc955bf5
--- /dev/null
+++ b/docs/examples/multiprocess/Dockerfile
@@ -0,0 +1,20 @@
+FROM osixia/baseimage:develop
+
+# Set image name
+ARG IMAGE="osixia/baseimage-example:latest"
+ENV CONTAINER_IMAGE=${IMAGE}
+
+# Download service(s) required packages or resources
+# RUN packages-index-update \
+# && packages-install-clean \
+# [...]
+# && curl -o resources.tar.gz https://[...].tar.gz
+# && tar -xzf resources.tar.gz
+
+COPY services /container/services
+
+# Install and link service(s) to the entrypoint
+RUN container-baseimage services install \
+ && container-baseimage services link
+
+COPY environment /container/environment
diff --git a/docs/examples/multiprocess/environment/.env b/docs/examples/multiprocess/environment/.env
new file mode 100644
index 00000000..842ba0a1
--- /dev/null
+++ b/docs/examples/multiprocess/environment/.env
@@ -0,0 +1 @@
+EXAMPLE_ENV_VAR="Hello :)"
diff --git a/docs/examples/multiprocess/environment/README.md b/docs/examples/multiprocess/environment/README.md
new file mode 100644
index 00000000..ec9d9d4f
--- /dev/null
+++ b/docs/examples/multiprocess/environment/README.md
@@ -0,0 +1,8 @@
+# .env files
+
+.env files in this directory and any sub-directories are loaded before executing services lifeycle script(s) (startup.sh, process.sh and finish.sh) or entrypoint lifecycle pre-commands.
+The variables they contain are defined as environment variables in the container.
+
+Files are loaded in alphabetical order, and variables are overwrite if already defined in a previous file.
+
+**Container environment variables set at run time will overwrite value defined in .env files.**
diff --git a/docs/examples/multiprocess/services/service-1/.priority b/docs/examples/multiprocess/services/service-1/.priority
new file mode 100644
index 00000000..1b79f38e
--- /dev/null
+++ b/docs/examples/multiprocess/services/service-1/.priority
@@ -0,0 +1 @@
+500
diff --git a/docs/examples/multiprocess/services/service-1/README.md b/docs/examples/multiprocess/services/service-1/README.md
new file mode 100644
index 00000000..709c2dc9
--- /dev/null
+++ b/docs/examples/multiprocess/services/service-1/README.md
@@ -0,0 +1,36 @@
+# Service Files
+The files outlined below are not mandatory.
+
+## install.sh
+This script should exclusively contain instructions for the initial setup of the service.
+
+For improved image construction, all package installations or file downloads should occur within the Dockerfile.
+
+By separating time-intensive download operations from the setup, the docker build cache is utilized effectively.
+Changes to the `install.sh` file will not necessitate re-downloading dependencies,
+as the Dockerfile builder will only execute the service installation script.
+
+Note: The `install.sh` script executes during the docker build, thus runtime environment variables cannot be used for setup customization.
+Such customizations are handled in the `startup.sh` file.
+
+## startup.sh
+This script prepares `process.sh` for execution and tailors the service setup to runtime environment variables.
+
+## process.sh
+This script specifies the command to be executed.
+
+In images designed for multiple processes, all `process.sh` scripts are launched simultaneously.
+The order defined in the service `.priority` file is irrelevant.
+
+## finish.sh
+This script is executed once `process.sh` has concluded.
+
+## .priority
+The .priority file establishes the sequence in which services `startup.sh` or `finish.sh` scripts are invoked.
+The higher the number, the greater the priority. The default is 500.
+
+## .optional
+This file indicates that the service is optional and can be incorporated later via the `container-baseimage services require service-1` command.
+
+## download.sh
+This script is called during container build to download optional service resources.
diff --git a/docs/examples/multiprocess/services/service-1/finish.sh b/docs/examples/multiprocess/services/service-1/finish.sh
new file mode 100755
index 00000000..5306fbcf
--- /dev/null
+++ b/docs/examples/multiprocess/services/service-1/finish.sh
@@ -0,0 +1,7 @@
+#!/bin/bash -e
+
+# if container log level is trace:
+# print commands and their arguments as they are executed
+container-logger level eq trace && set -x
+
+echo "service-1: process ended ..."
diff --git a/docs/examples/multiprocess/services/service-1/install.sh b/docs/examples/multiprocess/services/service-1/install.sh
new file mode 100755
index 00000000..c19042d1
--- /dev/null
+++ b/docs/examples/multiprocess/services/service-1/install.sh
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+# this script is run during the image build
+
+echo "service-1: Installing some tools ..."
diff --git a/docs/examples/multiprocess/services/service-1/process.sh b/docs/examples/multiprocess/services/service-1/process.sh
new file mode 100755
index 00000000..c86bf4a8
--- /dev/null
+++ b/docs/examples/multiprocess/services/service-1/process.sh
@@ -0,0 +1,10 @@
+#!/bin/bash -e
+
+# if container log level is trace:
+# print commands and their arguments as they are executed
+container-logger level eq trace && set -x
+
+SLEEP=$(shuf -i 3-15 -n 1)
+
+echo "service-1: Just going to sleep for ${SLEEP} seconds ..."
+exec sleep "${SLEEP}"
diff --git a/docs/examples/multiprocess/services/service-1/startup.sh b/docs/examples/multiprocess/services/service-1/startup.sh
new file mode 100755
index 00000000..43c5c2bb
--- /dev/null
+++ b/docs/examples/multiprocess/services/service-1/startup.sh
@@ -0,0 +1,8 @@
+#!/bin/bash -e
+
+# if container log level is trace:
+# print commands and their arguments as they are executed
+container-logger level eq trace && set -x
+
+echo "service-1: Doing some container start setup ..."
+echo "service-1: EXAMPLE_ENV_VAR=${EXAMPLE_ENV_VAR} ..."
diff --git a/docs/examples/multiprocess/services/service-2/.priority b/docs/examples/multiprocess/services/service-2/.priority
new file mode 100644
index 00000000..1b79f38e
--- /dev/null
+++ b/docs/examples/multiprocess/services/service-2/.priority
@@ -0,0 +1 @@
+500
diff --git a/docs/examples/multiprocess/services/service-2/README.md b/docs/examples/multiprocess/services/service-2/README.md
new file mode 100644
index 00000000..e680c48d
--- /dev/null
+++ b/docs/examples/multiprocess/services/service-2/README.md
@@ -0,0 +1,36 @@
+# Service Files
+The files outlined below are not mandatory.
+
+## install.sh
+This script should exclusively contain instructions for the initial setup of the service.
+
+For improved image construction, all package installations or file downloads should occur within the Dockerfile.
+
+By separating time-intensive download operations from the setup, the docker build cache is utilized effectively.
+Changes to the `install.sh` file will not necessitate re-downloading dependencies,
+as the Dockerfile builder will only execute the service installation script.
+
+Note: The `install.sh` script executes during the docker build, thus runtime environment variables cannot be used for setup customization.
+Such customizations are handled in the `startup.sh` file.
+
+## startup.sh
+This script prepares `process.sh` for execution and tailors the service setup to runtime environment variables.
+
+## process.sh
+This script specifies the command to be executed.
+
+In images designed for multiple processes, all `process.sh` scripts are launched simultaneously.
+The order defined in the service `.priority` file is irrelevant.
+
+## finish.sh
+This script is executed once `process.sh` has concluded.
+
+## .priority
+The .priority file establishes the sequence in which services `startup.sh` or `finish.sh` scripts are invoked.
+The higher the number, the greater the priority. The default is 500.
+
+## .optional
+This file indicates that the service is optional and can be incorporated later via the `container-baseimage services require service-2` command.
+
+## download.sh
+This script is called during container build to download optional service resources.
diff --git a/docs/examples/multiprocess/services/service-2/finish.sh b/docs/examples/multiprocess/services/service-2/finish.sh
new file mode 100755
index 00000000..fd343081
--- /dev/null
+++ b/docs/examples/multiprocess/services/service-2/finish.sh
@@ -0,0 +1,7 @@
+#!/bin/bash -e
+
+# if container log level is trace:
+# print commands and their arguments as they are executed
+container-logger level eq trace && set -x
+
+echo "service-2: process ended ..."
diff --git a/docs/examples/multiprocess/services/service-2/install.sh b/docs/examples/multiprocess/services/service-2/install.sh
new file mode 100755
index 00000000..f6c288e4
--- /dev/null
+++ b/docs/examples/multiprocess/services/service-2/install.sh
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+# this script is run during the image build
+
+echo "service-2: Installing some tools ..."
diff --git a/docs/examples/multiprocess/services/service-2/process.sh b/docs/examples/multiprocess/services/service-2/process.sh
new file mode 100755
index 00000000..6e53c064
--- /dev/null
+++ b/docs/examples/multiprocess/services/service-2/process.sh
@@ -0,0 +1,10 @@
+#!/bin/bash -e
+
+# if container log level is trace:
+# print commands and their arguments as they are executed
+container-logger level eq trace && set -x
+
+SLEEP=$(shuf -i 3-15 -n 1)
+
+echo "service-2: Just going to sleep for ${SLEEP} seconds ..."
+exec sleep "${SLEEP}"
diff --git a/docs/examples/multiprocess/services/service-2/startup.sh b/docs/examples/multiprocess/services/service-2/startup.sh
new file mode 100755
index 00000000..dc109d6e
--- /dev/null
+++ b/docs/examples/multiprocess/services/service-2/startup.sh
@@ -0,0 +1,8 @@
+#!/bin/bash -e
+
+# if container log level is trace:
+# print commands and their arguments as they are executed
+container-logger level eq trace && set -x
+
+echo "service-2: Doing some container start setup ..."
+echo "service-2: EXAMPLE_ENV_VAR=${EXAMPLE_ENV_VAR} ..."
diff --git a/docs/examples/single-process/Dockerfile b/docs/examples/single-process/Dockerfile
new file mode 100644
index 00000000..fc955bf5
--- /dev/null
+++ b/docs/examples/single-process/Dockerfile
@@ -0,0 +1,20 @@
+FROM osixia/baseimage:develop
+
+# Set image name
+ARG IMAGE="osixia/baseimage-example:latest"
+ENV CONTAINER_IMAGE=${IMAGE}
+
+# Download service(s) required packages or resources
+# RUN packages-index-update \
+# && packages-install-clean \
+# [...]
+# && curl -o resources.tar.gz https://[...].tar.gz
+# && tar -xzf resources.tar.gz
+
+COPY services /container/services
+
+# Install and link service(s) to the entrypoint
+RUN container-baseimage services install \
+ && container-baseimage services link
+
+COPY environment /container/environment
diff --git a/docs/examples/single-process/environment/.env b/docs/examples/single-process/environment/.env
new file mode 100644
index 00000000..842ba0a1
--- /dev/null
+++ b/docs/examples/single-process/environment/.env
@@ -0,0 +1 @@
+EXAMPLE_ENV_VAR="Hello :)"
diff --git a/docs/examples/single-process/environment/README.md b/docs/examples/single-process/environment/README.md
new file mode 100644
index 00000000..ec9d9d4f
--- /dev/null
+++ b/docs/examples/single-process/environment/README.md
@@ -0,0 +1,8 @@
+# .env files
+
+.env files in this directory and any sub-directories are loaded before executing services lifeycle script(s) (startup.sh, process.sh and finish.sh) or entrypoint lifecycle pre-commands.
+The variables they contain are defined as environment variables in the container.
+
+Files are loaded in alphabetical order, and variables are overwrite if already defined in a previous file.
+
+**Container environment variables set at run time will overwrite value defined in .env files.**
diff --git a/docs/examples/single-process/services/service-1/.priority b/docs/examples/single-process/services/service-1/.priority
new file mode 100644
index 00000000..1b79f38e
--- /dev/null
+++ b/docs/examples/single-process/services/service-1/.priority
@@ -0,0 +1 @@
+500
diff --git a/docs/examples/single-process/services/service-1/README.md b/docs/examples/single-process/services/service-1/README.md
new file mode 100644
index 00000000..709c2dc9
--- /dev/null
+++ b/docs/examples/single-process/services/service-1/README.md
@@ -0,0 +1,36 @@
+# Service Files
+The files outlined below are not mandatory.
+
+## install.sh
+This script should exclusively contain instructions for the initial setup of the service.
+
+For improved image construction, all package installations or file downloads should occur within the Dockerfile.
+
+By separating time-intensive download operations from the setup, the docker build cache is utilized effectively.
+Changes to the `install.sh` file will not necessitate re-downloading dependencies,
+as the Dockerfile builder will only execute the service installation script.
+
+Note: The `install.sh` script executes during the docker build, thus runtime environment variables cannot be used for setup customization.
+Such customizations are handled in the `startup.sh` file.
+
+## startup.sh
+This script prepares `process.sh` for execution and tailors the service setup to runtime environment variables.
+
+## process.sh
+This script specifies the command to be executed.
+
+In images designed for multiple processes, all `process.sh` scripts are launched simultaneously.
+The order defined in the service `.priority` file is irrelevant.
+
+## finish.sh
+This script is executed once `process.sh` has concluded.
+
+## .priority
+The .priority file establishes the sequence in which services `startup.sh` or `finish.sh` scripts are invoked.
+The higher the number, the greater the priority. The default is 500.
+
+## .optional
+This file indicates that the service is optional and can be incorporated later via the `container-baseimage services require service-1` command.
+
+## download.sh
+This script is called during container build to download optional service resources.
diff --git a/docs/examples/single-process/services/service-1/finish.sh b/docs/examples/single-process/services/service-1/finish.sh
new file mode 100755
index 00000000..5306fbcf
--- /dev/null
+++ b/docs/examples/single-process/services/service-1/finish.sh
@@ -0,0 +1,7 @@
+#!/bin/bash -e
+
+# if container log level is trace:
+# print commands and their arguments as they are executed
+container-logger level eq trace && set -x
+
+echo "service-1: process ended ..."
diff --git a/docs/examples/single-process/services/service-1/install.sh b/docs/examples/single-process/services/service-1/install.sh
new file mode 100755
index 00000000..c19042d1
--- /dev/null
+++ b/docs/examples/single-process/services/service-1/install.sh
@@ -0,0 +1,4 @@
+#!/bin/bash -e
+# this script is run during the image build
+
+echo "service-1: Installing some tools ..."
diff --git a/docs/examples/single-process/services/service-1/process.sh b/docs/examples/single-process/services/service-1/process.sh
new file mode 100755
index 00000000..c86bf4a8
--- /dev/null
+++ b/docs/examples/single-process/services/service-1/process.sh
@@ -0,0 +1,10 @@
+#!/bin/bash -e
+
+# if container log level is trace:
+# print commands and their arguments as they are executed
+container-logger level eq trace && set -x
+
+SLEEP=$(shuf -i 3-15 -n 1)
+
+echo "service-1: Just going to sleep for ${SLEEP} seconds ..."
+exec sleep "${SLEEP}"
diff --git a/docs/examples/single-process/services/service-1/startup.sh b/docs/examples/single-process/services/service-1/startup.sh
new file mode 100755
index 00000000..43c5c2bb
--- /dev/null
+++ b/docs/examples/single-process/services/service-1/startup.sh
@@ -0,0 +1,8 @@
+#!/bin/bash -e
+
+# if container log level is trace:
+# print commands and their arguments as they are executed
+container-logger level eq trace && set -x
+
+echo "service-1: Doing some container start setup ..."
+echo "service-1: EXAMPLE_ENV_VAR=${EXAMPLE_ENV_VAR} ..."
diff --git a/errors/errors.go b/errors/errors.go
new file mode 100644
index 00000000..5889c393
--- /dev/null
+++ b/errors/errors.go
@@ -0,0 +1,28 @@
+package errors
+
+import (
+ "errors"
+ "os/exec"
+)
+
+// Errors global variables
+// =============================
+
+var ErrRequired = errors.New("required")
+var ErrUnknown = errors.New("unknown")
+var ErrUnavailable = errors.New("unavailable")
+
+func ExitCode(err error) int {
+ if err == nil {
+ return 0
+ }
+
+ exitCode := 1
+
+ var exitErr *exec.ExitError
+ if errors.As(err, &exitErr) {
+ exitCode = exitErr.ExitCode()
+ }
+
+ return exitCode
+}
diff --git a/example/multiple-process-image/Dockerfile b/example/multiple-process-image/Dockerfile
deleted file mode 100644
index 17e3e23c..00000000
--- a/example/multiple-process-image/Dockerfile
+++ /dev/null
@@ -1,29 +0,0 @@
-# Use osixia/light-baseimage
-# https://github.com/osixia/docker-light-baseimage
-FROM osixia/light-baseimage:1.3.3
-
-# Install multiple process stack, nginx and php7.0-fpm and clean apt-get files
-# https://github.com/osixia/docker-light-baseimage/blob/stable/image/tool/add-multiple-process-stack
-RUN apt-get -y update \
- && /container/tool/add-multiple-process-stack \
- && LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- nginx \
- php7.0-fpm \
- && apt-get clean \
- && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
-
-# Add service directory to /container/service
-ADD service /container/service
-
-# Use baseimage install-service script
-# https://github.com/osixia/docker-light-baseimage/blob/stable/image/tool/install-service
-RUN /container/tool/install-service
-
-# Add default env directory
-ADD environment /container/environment/99-default
-
-# Set /var/www/ in a data volume
-VOLUME /var/www/
-
-# Expose default http and https ports
-EXPOSE 80 443
diff --git a/example/multiple-process-image/Makefile b/example/multiple-process-image/Makefile
deleted file mode 100644
index 22a33521..00000000
--- a/example/multiple-process-image/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-NAME = example/multiple-process
-
-.PHONY: build build-nocache
-
-build:
- docker build -t $(NAME) --rm .
-
-build-nocache:
- docker build -t $(NAME) --no-cache --rm .
diff --git a/example/multiple-process-image/environment/default.startup.yaml b/example/multiple-process-image/environment/default.startup.yaml
deleted file mode 100644
index e381f17f..00000000
--- a/example/multiple-process-image/environment/default.startup.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-# This is the default image startup configuration file
-# this file define environment variables used during the container **first start** in **startup files**.
-
-# This file is deleted right after startup files are processed for the first time,
-# after that all these values will not be available in the container environment.
-# This helps to keep your container configuration secret.
-# more information : https://github.com/osixia/docker-light-baseimage
-
-FIRST_START_SETUP_ONLY_SECRET: The bdd password is Baw0unga!
diff --git a/example/multiple-process-image/environment/default.yaml b/example/multiple-process-image/environment/default.yaml
deleted file mode 100644
index 9986fd0f..00000000
--- a/example/multiple-process-image/environment/default.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-# This is the default image configuration file
-# These values will persists in container environment.
-
-# All environment variables used after the container first start
-# must be defined here.
-# more information : https://github.com/osixia/docker-light-baseimage
-
-WHO_AM_I: We are Anonymous. We are Legion. We do not forgive. We do not forget. Expect us.
diff --git a/example/multiple-process-image/service/nginx/install.sh b/example/multiple-process-image/service/nginx/install.sh
deleted file mode 100755
index f0fbebac..00000000
--- a/example/multiple-process-image/service/nginx/install.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash -e
-# this script is run during the image build
-
-rm -rf /var/www/html/index.nginx-debian.html
-echo "Hi!" > /var/www/html/index.html
diff --git a/example/multiple-process-image/service/nginx/process.sh b/example/multiple-process-image/service/nginx/process.sh
deleted file mode 100755
index 7afdb388..00000000
--- a/example/multiple-process-image/service/nginx/process.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash -e
-echo "The secret is: $FIRST_START_SETUP_ONLY_SECRET"
-exec /usr/sbin/nginx -g "daemon off;"
diff --git a/example/multiple-process-image/service/nginx/startup.sh b/example/multiple-process-image/service/nginx/startup.sh
deleted file mode 100755
index 20ef1d78..00000000
--- a/example/multiple-process-image/service/nginx/startup.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash -e
-FIRST_START_DONE="${CONTAINER_STATE_DIR}/nginx-first-start-done"
-
-# container first start
-if [ ! -e "$FIRST_START_DONE" ]; then
- echo ${WHO_AM_I} >> /var/www/html/index.html
- touch $FIRST_START_DONE
-fi
-
-echo "The secret is: $FIRST_START_SETUP_ONLY_SECRET"
-
-exit 0
diff --git a/example/multiple-process-image/service/php/config/default b/example/multiple-process-image/service/php/config/default
deleted file mode 100644
index 93472a8d..00000000
--- a/example/multiple-process-image/service/php/config/default
+++ /dev/null
@@ -1,26 +0,0 @@
-server {
- listen 80 default_server;
- listen [::]:80 default_server;
-
- root /var/www/html;
-
- # Add index.php to the list if you are using PHP
- index index.html index.htm index.nginx-debian.html;
-
- server_name _;
-
- location / {
- # First attempt to serve request as file, then
- # as directory, then fall back to displaying a 404.
- try_files $uri $uri/ =404;
- }
-
- location ~ \.php$ {
- fastcgi_split_path_info ^(.+\.php)(/.+)$;
- # With php fpm:
- fastcgi_pass unix:/run/php/php7.0-fpm.sock;
- fastcgi_index index.php;
- include fastcgi_params;
- include fastcgi.conf;
- }
-}
diff --git a/example/multiple-process-image/service/php/install.sh b/example/multiple-process-image/service/php/install.sh
deleted file mode 100755
index 6787fe5e..00000000
--- a/example/multiple-process-image/service/php/install.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash -e
-# this script is run during the image build
-
-# config
-sed -i -e "s/expose_php = On/expose_php = Off/g" /etc/php/7.0/fpm/php.ini
-sed -i -e "s/;cgi.fix_pathinfo=1/cgi.fix_pathinfo=0/g" /etc/php/7.0/fpm/php.ini
-sed -i -e "s/;listen.owner = www-data/listen.owner = www-data/g" /etc/php/7.0/fpm/php.ini
-sed -i -e "s/;listen.group = www-data/listen.group = www-data/g" /etc/php/7.0/fpm/php.ini
-
-# create php socket directory
-mkdir -p /run/php
-
-# replace default website with php service default website
-cp -f /container/service/php/config/default /etc/nginx/sites-available/default
-
-# create phpinfo.php
-echo " /var/www/html/phpinfo.php
diff --git a/example/multiple-process-image/service/php/process.sh b/example/multiple-process-image/service/php/process.sh
deleted file mode 100755
index 23165dd1..00000000
--- a/example/multiple-process-image/service/php/process.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash -e
-exec /usr/sbin/php-fpm7.0 --nodaemonize
diff --git a/example/single-process-image/Dockerfile b/example/single-process-image/Dockerfile
deleted file mode 100644
index 069efc3a..00000000
--- a/example/single-process-image/Dockerfile
+++ /dev/null
@@ -1,26 +0,0 @@
-# Use osixia/light-baseimage
-# https://github.com/osixia/docker-light-baseimage
-FROM osixia/light-baseimage:1.3.3
-
-# Download nginx from apt-get and clean apt-get files
-RUN apt-get -y update \
- && LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- nginx \
- && apt-get clean \
- && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
-
-# Add service directory to /container/service
-ADD service /container/service
-
-# Use baseimage install-service script
-# https://github.com/osixia/docker-light-baseimage/blob/stable/image/tool/install-service
-RUN /container/tool/install-service
-
-# Add default env directory
-ADD environment /container/environment/99-default
-
-# Set /var/www/ in a data volume
-VOLUME /var/www/
-
-# Expose default http and https ports
-EXPOSE 80 443
diff --git a/example/single-process-image/Makefile b/example/single-process-image/Makefile
deleted file mode 100644
index b65a62e2..00000000
--- a/example/single-process-image/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-NAME = example/single-process
-
-.PHONY: build build-nocache
-
-build:
- docker build -t $(NAME) --rm .
-
-build-nocache:
- docker build -t $(NAME) --no-cache --rm .
diff --git a/example/single-process-image/environment/default.startup.yaml b/example/single-process-image/environment/default.startup.yaml
deleted file mode 100644
index e381f17f..00000000
--- a/example/single-process-image/environment/default.startup.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-# This is the default image startup configuration file
-# this file define environment variables used during the container **first start** in **startup files**.
-
-# This file is deleted right after startup files are processed for the first time,
-# after that all these values will not be available in the container environment.
-# This helps to keep your container configuration secret.
-# more information : https://github.com/osixia/docker-light-baseimage
-
-FIRST_START_SETUP_ONLY_SECRET: The bdd password is Baw0unga!
diff --git a/example/single-process-image/environment/default.yaml b/example/single-process-image/environment/default.yaml
deleted file mode 100644
index 9986fd0f..00000000
--- a/example/single-process-image/environment/default.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-# This is the default image configuration file
-# These values will persists in container environment.
-
-# All environment variables used after the container first start
-# must be defined here.
-# more information : https://github.com/osixia/docker-light-baseimage
-
-WHO_AM_I: We are Anonymous. We are Legion. We do not forgive. We do not forget. Expect us.
diff --git a/example/single-process-image/service/nginx/install.sh b/example/single-process-image/service/nginx/install.sh
deleted file mode 100755
index f0fbebac..00000000
--- a/example/single-process-image/service/nginx/install.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash -e
-# this script is run during the image build
-
-rm -rf /var/www/html/index.nginx-debian.html
-echo "Hi!" > /var/www/html/index.html
diff --git a/example/single-process-image/service/nginx/process.sh b/example/single-process-image/service/nginx/process.sh
deleted file mode 100755
index 7afdb388..00000000
--- a/example/single-process-image/service/nginx/process.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash -e
-echo "The secret is: $FIRST_START_SETUP_ONLY_SECRET"
-exec /usr/sbin/nginx -g "daemon off;"
diff --git a/example/single-process-image/service/nginx/startup.sh b/example/single-process-image/service/nginx/startup.sh
deleted file mode 100755
index 20ef1d78..00000000
--- a/example/single-process-image/service/nginx/startup.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash -e
-FIRST_START_DONE="${CONTAINER_STATE_DIR}/nginx-first-start-done"
-
-# container first start
-if [ ! -e "$FIRST_START_DONE" ]; then
- echo ${WHO_AM_I} >> /var/www/html/index.html
- touch $FIRST_START_DONE
-fi
-
-echo "The secret is: $FIRST_START_SETUP_ONLY_SECRET"
-
-exit 0
diff --git a/example/single-process-image/test-custom-env/env.yaml b/example/single-process-image/test-custom-env/env.yaml
deleted file mode 100644
index 51b6719d..00000000
--- a/example/single-process-image/test-custom-env/env.yaml
+++ /dev/null
@@ -1 +0,0 @@
-WHO_AM_I: I'm bobby.
diff --git a/go.mod b/go.mod
new file mode 100644
index 00000000..5a0703f5
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,33 @@
+module github.com/osixia/container-baseimage
+
+go 1.21
+
+toolchain go1.21.6
+
+require (
+ dagger.io/dagger v0.9.7
+ github.com/a8m/envsubst v1.4.2
+ github.com/fsnotify/fsnotify v1.7.0
+ github.com/google/go-github/v41 v41.0.0
+ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
+ github.com/hashicorp/go-reap v0.0.0-20230117204525-bf69c61a7b71
+ github.com/hashicorp/go-version v1.6.0
+ github.com/spf13/cobra v1.7.0
+ github.com/spf13/pflag v1.0.5
+ github.com/subosito/gotenv v1.6.0
+ golang.org/x/sync v0.4.0
+ golang.org/x/text v0.12.0
+)
+
+require (
+ github.com/99designs/gqlgen v0.17.31 // indirect
+ github.com/Khan/genqlient v0.6.0 // indirect
+ github.com/adrg/xdg v0.4.0 // indirect
+ github.com/google/go-querystring v1.1.0 // indirect
+ github.com/inconshreveable/mousetrap v1.1.0 // indirect
+ github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/vektah/gqlparser/v2 v2.5.6 // indirect
+ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
+ golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect
+ golang.org/x/sys v0.13.0 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 00000000..f152bc28
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,94 @@
+dagger.io/dagger v0.9.7 h1:qPZRJvQWiSCr8OONEMQht62pWQ2lD68YMZpb3gFkBsE=
+dagger.io/dagger v0.9.7/go.mod h1:ic2UD6gS5iBp2e6VWPxyb7h6VpAyhFN6U7/TDlriox8=
+github.com/99designs/gqlgen v0.17.31 h1:VncSQ82VxieHkea8tz11p7h/zSbvHSxSDZfywqWt158=
+github.com/99designs/gqlgen v0.17.31/go.mod h1:i4rEatMrzzu6RXaHydq1nmEPZkb3bKQsnxNRHS4DQB4=
+github.com/Khan/genqlient v0.6.0 h1:Bwb1170ekuNIVIwTJEqvO8y7RxBxXu639VJOkKSrwAk=
+github.com/Khan/genqlient v0.6.0/go.mod h1:rvChwWVTqXhiapdhLDV4bp9tz/Xvtewwkon4DpWWCRM=
+github.com/a8m/envsubst v1.4.2 h1:4yWIHXOLEJHQEFd4UjrWDrYeYlV7ncFWJOCBRLOZHQg=
+github.com/a8m/envsubst v1.4.2/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY=
+github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls=
+github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E=
+github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
+github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
+github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-github/v41 v41.0.0 h1:HseJrM2JFf2vfiZJ8anY2hqBjdfY1Vlj/K27ueww4gg=
+github.com/google/go-github/v41 v41.0.0/go.mod h1:XgmCA5H323A9rtgExdTcnDkcqp6S30AVACCBDOonIxg=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
+github.com/hashicorp/go-reap v0.0.0-20230117204525-bf69c61a7b71 h1:ntMIobjNd0QLB/i6OQM/OV1B+k6RjmvtY84z/SUeYPA=
+github.com/hashicorp/go-reap v0.0.0-20230117204525-bf69c61a7b71/go.mod h1:qIFzeFcJU3OIFk/7JreWXcUjFmcCaeHTH9KoNyHYVCs=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
+github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
+github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
+github.com/vektah/gqlparser/v2 v2.5.6 h1:Ou14T0N1s191eRMZ1gARVqohcbe1e8FrcONScsq8cRU=
+github.com/vektah/gqlparser/v2 v2.5.6/go.mod h1:z8xXUff237NntSuH8mLFijZ+1tjV1swDbpDqjJmk6ME=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
+golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
+golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/helpers/embed.go b/helpers/embed.go
new file mode 100644
index 00000000..55db1643
--- /dev/null
+++ b/helpers/embed.go
@@ -0,0 +1,61 @@
+package helpers
+
+import (
+ "embed"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/osixia/container-baseimage/log"
+)
+
+// Embed filesystem functions
+// =============================
+
+func CopyEmbedDir(efs *embed.FS, dest string, filePermFunc FilePermFunc) error {
+
+ log.Tracef("CopyEmbedDir called with efs: %+v, dest: %v, filePermFunc: %+v", efs, dest, filePermFunc)
+
+ files, err := ListFiles(efs)
+ if err != nil {
+ return err
+ }
+
+ for _, f := range files {
+ // remove first directory from file path
+ fp := filepath.Join(strings.Split(f, "/")[1:]...)
+
+ // append dest path to file path
+ fp = filepath.Join(dest, fp)
+
+ perm := filePermFunc(fp)
+
+ if err := CopyEmbedFile(efs, f, fp, perm); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func CopyEmbedFile(efs *embed.FS, file string, dest string, perm fs.FileMode) error {
+
+ log.Tracef("CopyEmbedFile called with efs: %+v, file: %v, dest: %v, perm: %v", efs, file, dest, perm)
+
+ log.Debugf("Copying %v to %v ...", file, dest)
+ fc, err := efs.ReadFile(file)
+ if err != nil {
+ return err
+ }
+
+ if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil {
+ return err
+ }
+
+ if err := os.WriteFile(dest, fc, perm); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/helpers/envsubst.go b/helpers/envsubst.go
new file mode 100644
index 00000000..1fc72139
--- /dev/null
+++ b/helpers/envsubst.go
@@ -0,0 +1,96 @@
+package helpers
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/a8m/envsubst"
+
+ "github.com/osixia/container-baseimage/log"
+)
+
+// Envsubst functions
+// =============================
+
+func Envsubst(input string, output string) error {
+
+ log.Tracef("filesystem.envsubst called with input: %v, output: %v", input, output)
+
+ inputInfo, err := os.Stat(input)
+ if err != nil {
+ return err
+ }
+
+ inputDir := filepath.Dir(input)
+ inputDirInfo, err := os.Stat(inputDir)
+ if err != nil {
+ return err
+ }
+
+ outputDir := filepath.Dir(output)
+
+ log.Debugf("Creating output directory %v ...", outputDir)
+ if err := os.MkdirAll(outputDir, inputDirInfo.Mode().Perm()); err != nil {
+ return err
+ }
+
+ log.Debugf("Running envsubst on input %v ...", input)
+ bytes, err := envsubst.ReadFile(input)
+ if err != nil {
+ return err
+ }
+
+ if _, err := os.Stat(output); err == nil {
+ log.Warningf("File %v already exists and will be overwrited", output)
+ }
+
+ log.Debugf("Creating output file %v ...", output)
+ f, err := os.Create(output)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ log.Debugf("Writting output file %v ...", output)
+ if _, err := f.Write(bytes); err != nil {
+ return err
+ }
+
+ log.Debugf("Applying input file %v permissions to %v ...", inputInfo.Mode(), output)
+ if err := os.Chmod(output, inputInfo.Mode()); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func EnvsubstTemplates(templatesDir string, outputDir string, templatesFilesSuffix string) ([]string, error) {
+
+ log.Tracef("filesystem.EnvsubstTemplates called with templatesDir: %v, outputDir: %v, templatesFilesSuffix: %v", templatesDir, outputDir, templatesFilesSuffix)
+
+ log.Debugf("EnvsubstTemplates %v files from %v to %v ...", templatesFilesSuffix, templatesDir, outputDir)
+ log.Debugf("Environment variables:\n%v", strings.Join(os.Environ(), "\n"))
+
+ var files []string
+ err := filepath.Walk(templatesDir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if !info.IsDir() && strings.HasSuffix(path, templatesFilesSuffix) {
+ outputPath := outputDir + strings.TrimSuffix(strings.TrimPrefix(path, templatesDir), templatesFilesSuffix)
+
+ log.Infof("Running envsubst on %v output to %v ...", path, outputPath)
+ if err := Envsubst(path, outputPath); err != nil {
+ return err
+ }
+
+ files = append(files, outputPath)
+ }
+
+ return nil
+ })
+
+ return files, err
+}
diff --git a/helpers/exec.go b/helpers/exec.go
new file mode 100644
index 00000000..02b7b919
--- /dev/null
+++ b/helpers/exec.go
@@ -0,0 +1,206 @@
+package helpers
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "strconv"
+ "syscall"
+ "time"
+
+ "github.com/osixia/container-baseimage/log"
+)
+
+// Exec wrapper
+// =============================
+
+type Exec struct {
+ context context.Context
+ timeout time.Duration
+ setGPID bool
+
+ Cmd *exec.Cmd
+ pidFile string
+
+ stdout *bytes.Buffer
+}
+
+func NewExec(ctx context.Context) *Exec {
+
+ return &Exec{
+ context: ctx,
+ }
+}
+
+func (e *Exec) WithTimeout(timeout time.Duration) *Exec {
+ e.timeout = timeout
+
+ return e
+}
+
+func (e *Exec) WithStdout(b *bytes.Buffer) *Exec {
+ e.stdout = b
+
+ return e
+}
+
+func (e *Exec) WithSetGPID(b bool) *Exec {
+ e.setGPID = b
+
+ return e
+}
+
+func (e *Exec) WithPIDFile(f string) *Exec {
+ e.pidFile = f
+
+ return e
+}
+
+func (e *Exec) Command(name string, args ...string) *Exec {
+ log.Tracef("Exec.Command called with cmd: %v, args: %v", name, args)
+
+ cmd := exec.CommandContext(e.context, name, args...)
+ cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
+
+ if e.stdout != nil {
+ cmd.Stdout = e.stdout
+ }
+
+ if e.setGPID {
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Setpgid: true,
+ }
+ }
+
+ e.Cmd = cmd
+
+ return e
+}
+
+func (e *Exec) Run() error {
+
+ if err := e.Start(); err != nil {
+ return err
+ }
+
+ return e.Wait()
+}
+
+func (e *Exec) Start() error {
+
+ log.Infof("Running %v ...", e.Cmd)
+
+ if err := e.Cmd.Start(); err != nil {
+ return err
+ }
+
+ pid := e.Cmd.Process.Pid
+
+ if e.pidFile != "" {
+ f, err := Create(e.pidFile)
+ if err != nil {
+ log.Warningf("error creating pid file %v: %v", e.pidFile, err.Error())
+ } else {
+ if _, err := f.WriteString(strconv.Itoa(pid)); err != nil {
+ log.Warningf("error writing in pid file %v: %v", e.pidFile, err.Error())
+ }
+ }
+ }
+
+ log.Debugf("%v: started (pid %v)", e.Cmd, pid)
+
+ return nil
+}
+
+func (e *Exec) Wait() error {
+
+ var err error
+ if e.timeout > 0 {
+ err = e.waitOrStop(e.context, e.Cmd, os.Interrupt, e.timeout)
+ } else {
+ err = e.Cmd.Wait()
+ }
+
+ if e.pidFile != "" {
+ if err := Remove(e.pidFile); err != nil {
+ log.Warningf("error removing pid file %v: %v", e.pidFile, err.Error())
+ }
+ }
+
+ if errors.Is(err, context.Canceled) {
+ err = nil
+ }
+
+ if err != nil {
+ err = fmt.Errorf("%v: %w", e.Cmd, err)
+ }
+
+ return err
+}
+
+// waitOrStop waits for the already-started command cmd by calling its Wait method.
+//
+// If cmd does not return before ctx is done, waitOrStop sends it the given interrupt signal.
+// If killDelay is positive, waitOrStop waits that additional period for Wait to return before sending os.Kill.
+//
+// This function is copied from the one added to x/playground/internal in
+// http://golang.org/cl/228438.
+func (e *Exec) waitOrStop(ctx context.Context, cmd *exec.Cmd, interrupt os.Signal, killDelay time.Duration) error {
+
+ log.Tracef("Exec.waitOrStop called with cmd: %v, interrupt: %v, killDelay: %v", cmd, interrupt, killDelay)
+
+ if cmd.Process == nil {
+ log.Fatal("waitOrStop called with a nil cmd.Process — missing Start call?")
+ }
+ if interrupt == nil {
+ log.Fatal("waitOrStop requires a non-nil interrupt signal")
+ }
+
+ errc := make(chan error)
+ go func() {
+ select {
+ case errc <- nil:
+ return
+ case <-ctx.Done():
+ }
+
+ err := cmd.Process.Signal(interrupt)
+ if err == nil {
+ err = ctx.Err() // Report ctx.Err() as the reason we interrupted.
+ } else if errors.Is(err, os.ErrProcessDone) {
+ errc <- nil
+ return
+ }
+
+ if killDelay > 0 {
+ timer := time.NewTimer(killDelay)
+ select {
+ // Report ctx.Err() as the reason we interrupted the process...
+ case errc <- ctx.Err():
+ timer.Stop()
+ return
+ // ...but after killDelay has elapsed, fall back to a stronger signal.
+ case <-timer.C:
+ }
+
+ // Wait still hasn't returned.
+ // Kill the process harder to make sure that it exits.
+ //
+ // Ignore any error: if cmd.Process has already terminated, we still
+ // want to send ctx.Err() (or the error from the Interrupt call)
+ // to properly attribute the signal that may have terminated it.
+ _ = cmd.Process.Kill()
+ }
+
+ errc <- err
+ }()
+
+ waitErr := cmd.Wait()
+ if interruptErr := <-errc; interruptErr != nil {
+ return interruptErr
+ }
+ return waitErr
+}
diff --git a/helpers/os.go b/helpers/os.go
new file mode 100644
index 00000000..9dbd78fe
--- /dev/null
+++ b/helpers/os.go
@@ -0,0 +1,219 @@
+package helpers
+
+import (
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "github.com/fsnotify/fsnotify"
+
+ "github.com/osixia/container-baseimage/errors"
+ "github.com/osixia/container-baseimage/log"
+)
+
+// Filesystem functions
+// =============================
+
+type FilePermFunc func(file string) fs.FileMode
+type CopyFunc func(path string, dest string) error
+
+func Create(name string) (*os.File, error) {
+ log.Tracef("Create directory %v", filepath.Dir(name))
+ if err := os.MkdirAll(filepath.Dir(name), 0755); err != nil {
+ return nil, err
+ }
+
+ log.Debugf("Create %v", name)
+ return os.Create(name)
+}
+
+func Remove(name string) error {
+ log.Debugf("Removing %v ...", name)
+ if err := os.Remove(name); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func Symlink(target string, dest string) error {
+
+ log.Tracef("Symlink called with target: %v, dest: %v", target, dest)
+
+ log.Tracef("Create directory %v", filepath.Dir(dest))
+ if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil {
+ return err
+ }
+
+ log.Debugf("Link %v to %v", target, dest)
+ if err := os.Symlink(target, dest); err != nil {
+ if link, _ := os.Readlink(dest); link != target {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func SymlinkAll(target string, dest string) error {
+
+ log.Tracef("SymlinkAll called with target: %v, dest: %v", target, dest)
+
+ isDir, err := IsDir(target)
+ if err != nil {
+ return err
+ }
+
+ if !isDir {
+ return Symlink(target, dest)
+ }
+
+ files, err := os.ReadDir(target)
+ if err != nil {
+ return err
+ }
+
+ for _, file := range files {
+ if err := Symlink(filepath.Join(target, file.Name()), filepath.Join(dest, file.Name())); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func ListFiles(fsys fs.FS) (files []string, err error) {
+
+ log.Tracef("ListFiles called with fs: %v", fsys)
+
+ err = fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error {
+ if d.IsDir() {
+ return nil
+ }
+
+ files = append(files, path)
+
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return files, nil
+}
+
+func Copy(path string, dest string) error {
+
+ log.Tracef("Copy called with path: %v, dest: %v", path, dest)
+
+ isDir, err := IsDir(path)
+ if err != nil {
+ return err
+ }
+
+ var copyFunc CopyFunc = CopyFile
+ if isDir {
+ copyFunc = CopyDir
+ }
+
+ if err := copyFunc(path, dest); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func CopyDir(dir string, dest string) error {
+
+ log.Tracef("CopyDir called with dir: %v, dest: %v", dir, dest)
+
+ files, err := ListFiles(os.DirFS(dir))
+ if err != nil {
+ return err
+ }
+
+ for _, f := range files {
+
+ fp := filepath.Join(dir, f)
+ dest := filepath.Join(dest, f)
+
+ if err := CopyFile(fp, dest); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func CopyFile(file string, dest string) error {
+
+ log.Tracef("CopyFile called with file: %v, dest: %v", file, dest)
+ log.Debugf("Copying %v to %v ...", file, dest)
+
+ inputInfo, err := os.Stat(file)
+ if err != nil {
+ return err
+ }
+
+ input, err := os.ReadFile(file)
+ if err != nil {
+ return err
+ }
+
+ if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil {
+ return err
+ }
+
+ err = os.WriteFile(dest, input, inputInfo.Mode().Perm())
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func IsDir(name string) (bool, error) {
+
+ log.Tracef("IsDir called with name: %v", name)
+
+ if fi, err := os.Stat(name); err != nil || !fi.Mode().IsDir() {
+ return false, err
+ }
+
+ return true, nil
+}
+
+func IsFile(name string) (bool, error) {
+
+ log.Tracef("IsFile called with name: %v", name)
+
+ if fi, err := os.Stat(name); err != nil || fi.Mode().IsDir() {
+ return false, err
+ }
+
+ return true, nil
+}
+
+func NewFSWatcher(paths ...string) (*fsnotify.Watcher, error) {
+ if len(paths) < 1 {
+ return nil, fmt.Errorf("paths: %w", errors.ErrRequired)
+ }
+
+ // Create a new watcher.
+ w, err := fsnotify.NewWatcher()
+ if err != nil {
+ return nil, err
+ }
+
+ // Add all paths from the commandline.
+ for _, p := range paths {
+ err = w.Add(p)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return w, nil
+}
diff --git a/helpers/pid.go b/helpers/pid.go
new file mode 100644
index 00000000..3dac9758
--- /dev/null
+++ b/helpers/pid.go
@@ -0,0 +1,72 @@
+package helpers
+
+import (
+ "os"
+ "strconv"
+ "syscall"
+
+ "github.com/osixia/container-baseimage/log"
+)
+
+// PIDs functions
+// =============================
+
+func ListPIDs() ([]int, error) {
+
+ log.Tracef("ListPIDs called")
+
+ var ret []int
+
+ d, err := os.Open("/proc")
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ fnames, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ for _, fname := range fnames {
+ pid, err := strconv.ParseInt(fname, 10, 32)
+ if err != nil {
+ // if not numeric name, just skip
+ continue
+ }
+
+ ipid := int(pid)
+
+ // ignore self pid
+ if ipid == os.Getpid() {
+ continue
+ }
+
+ ret = append(ret, ipid)
+ }
+
+ return ret, nil
+}
+
+// Signals functions
+// =============================
+
+func KillAll(sig syscall.Signal) error {
+
+ log.Tracef("KillAll called with sig: %v", sig)
+
+ pids, err := ListPIDs()
+ if err != nil {
+ return nil
+ }
+
+ log.Tracef("pids: %v", pids)
+
+ for _, pid := range pids {
+ log.Tracef("Sending %v to pid %v ...", sig, pid)
+ if err := syscall.Kill(pid, sig); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/image/Dockerfile b/image/Dockerfile
deleted file mode 100644
index be4b87b1..00000000
--- a/image/Dockerfile
+++ /dev/null
@@ -1,10 +0,0 @@
-FROM debian:buster-slim
-
-COPY . /container
-RUN /container/build.sh
-
-ENV LANG="en_US.UTF-8" \
- LANGUAGE="en_US:en" \
- LC_ALL="en_US.UTF-8"
-
-ENTRYPOINT ["/container/tool/run"]
diff --git a/image/build.sh b/image/build.sh
deleted file mode 100755
index 342444e6..00000000
--- a/image/build.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh -ex
-
-## Add bash tools to /sbin
-ln -s /container/tool/* /sbin/
-
-mkdir -p /container/service
-mkdir -p /container/environment /container/environment/startup
-chmod 700 /container/environment/ /container/environment/startup
-
-groupadd -g 8377 docker_env
-
-# dpkg options
-cp /container/file/dpkg_nodoc /etc/dpkg/dpkg.cfg.d/01_nodoc
-cp /container/file/dpkg_nolocales /etc/dpkg/dpkg.cfg.d/01_nolocales
-
-# General config
-export LC_ALL=C
-export DEBIAN_FRONTEND=noninteractive
-MINIMAL_APT_GET_INSTALL='apt-get install -y --no-install-recommends'
-
-## Prevent initramfs updates from trying to run grub and lilo.
-## https://journal.paul.querna.org/articles/2013/10/15/docker-ubuntu-on-rackspace/
-## http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=594189
-export INITRD=no
-printf no > /container/environment/INITRD
-
-apt-get update
-
-## Fix some issues with APT packages.
-## See https://github.com/dotcloud/docker/issues/1024
-dpkg-divert --local --rename --add /sbin/initctl
-ln -sf /bin/true /sbin/initctl
-
-## Replace the 'ischroot' tool to make it always return true.
-## Prevent initscripts updates from breaking /dev/shm.
-## https://journal.paul.querna.org/articles/2013/10/15/docker-ubuntu-on-rackspace/
-## https://bugs.launchpad.net/launchpad/+bug/974584
-dpkg-divert --local --rename --add /usr/bin/ischroot
-ln -sf /bin/true /usr/bin/ischroot
-
-## Install apt-utils.
-$MINIMAL_APT_GET_INSTALL apt-utils apt-transport-https ca-certificates locales procps dirmngr gnupg iproute2 python3-minimal python3-yaml
-
-## Upgrade all packages.
-apt-get dist-upgrade -y --no-install-recommends -o Dpkg::Options::="--force-confold"
-
-# fix locale
-echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen
-locale-gen en_US
-update-locale LANG=en_US.UTF-8 LC_CTYPE=en_US.UTF-8
-
-printf en_US.UTF-8 > /container/environment/LANG
-printf en_US.UTF-8 > /container/environment/LANGUAGE
-printf en_US.UTF-8 > /container/environment/LC_CTYPE
-
-apt-get clean
-rm -rf /tmp/* /var/tmp/*
-rm -rf /var/lib/apt/lists/*
-
-# Remove useless files
-rm -rf /container/file
-rm -rf /container/build.sh /container/Dockerfile
diff --git a/image/file/dpkg_nodoc b/image/file/dpkg_nodoc
deleted file mode 100644
index 7320020e..00000000
--- a/image/file/dpkg_nodoc
+++ /dev/null
@@ -1,9 +0,0 @@
-path-exclude /usr/share/doc/*
-# we need to keep copyright files for legal reasons
-path-include /usr/share/doc/*/copyright
-path-exclude /usr/share/man/*
-path-exclude /usr/share/groff/*
-path-exclude /usr/share/info/*
-# lintian stuff is small, but really unnecessary
-path-exclude /usr/share/lintian/*
-path-exclude /usr/share/linda/*
diff --git a/image/file/dpkg_nolocales b/image/file/dpkg_nolocales
deleted file mode 100644
index 384dc19b..00000000
--- a/image/file/dpkg_nolocales
+++ /dev/null
@@ -1,2 +0,0 @@
-path-exclude /usr/share/locale/*
-path-include /usr/share/locale/en*
diff --git a/image/service-available/:cron/download.sh b/image/service-available/:cron/download.sh
deleted file mode 100755
index b4f814a0..00000000
--- a/image/service-available/:cron/download.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh -e
-
-# download cron from apt-get
-LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends cron
diff --git a/image/service-available/:cron/process.sh b/image/service-available/:cron/process.sh
deleted file mode 100755
index 6b4d6332..00000000
--- a/image/service-available/:cron/process.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh -e
-log-helper level eq trace && set -x
-
-exec /usr/sbin/cron -f
diff --git a/image/service-available/:cron/startup.sh b/image/service-available/:cron/startup.sh
deleted file mode 100755
index 5f799253..00000000
--- a/image/service-available/:cron/startup.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh -e
-log-helper level eq trace && set -x
-
-# prevent NUMBER OF HARD LINKS > 1 error
-# https://github.com/phusion/baseimage-docker/issues/198
-touch /etc/crontab /etc/cron.d /etc/cron.daily /etc/cron.hourly /etc/cron.monthly /etc/cron.weekly
-
-find /etc/cron.d/ -exec touch {} \;
-find /etc/cron.daily/ -exec touch {} \;
-find /etc/cron.hourly/ -exec touch {} \;
-find /etc/cron.monthly/ -exec touch {} \;
-find /etc/cron.weekly/ -exec touch {} \;
diff --git a/image/service-available/:logrotate/download.sh b/image/service-available/:logrotate/download.sh
deleted file mode 100755
index 155c7abe..00000000
--- a/image/service-available/:logrotate/download.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh -e
-
-# download logrotate from apt-get
-LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends logrotate
diff --git a/image/service-available/:logrotate/startup.sh b/image/service-available/:logrotate/startup.sh
deleted file mode 100755
index 2d2d1223..00000000
--- a/image/service-available/:logrotate/startup.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh -e
-log-helper level eq trace && set -x
-ln -sf "${CONTAINER_SERVICE_DIR}/:logrotate/assets/config/logrotate.conf" /etc/logrotate.conf
-ln -sf "${CONTAINER_SERVICE_DIR}/:logrotate/assets/config/logrotate_syslogng" /etc/logrotate.d/syslog-ng
-
-chmod 444 -R "${CONTAINER_SERVICE_DIR}"/:logrotate/assets/config/*
diff --git a/image/service-available/:runit/download.sh b/image/service-available/:runit/download.sh
deleted file mode 100755
index e1d66a2e..00000000
--- a/image/service-available/:runit/download.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh -e
-
-# download runit from apt-get
-LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends runit
diff --git a/image/service-available/:ssl-tools/assets/cfssl-default-env b/image/service-available/:ssl-tools/assets/cfssl-default-env
deleted file mode 100644
index ed8d49f9..00000000
--- a/image/service-available/:ssl-tools/assets/cfssl-default-env
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash
-#
-# Default CA config
-#
-CFSSL_DEFAULT_CACERT="${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/default-ca/default-ca.pem"
-CFSSL_DEFAULT_CA_KEY="${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/default-ca/default-ca-key.pem"
-CFSSL_DEFAULT_CA_CONFIG="${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/default-ca/config/ca-config.json"
-CFSSL_DEFAULT_CSR="${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/default-ca/config/req-csr.json.tmpl"
-
-# default csr file params
-CFSSL_DEFAULT_CA_CSR_CN=${CFSSL_DEFAULT_CA_CSR_CN:-${HOSTNAME}}
-
-CFSSL_DEFAULT_CA_CSR_KEY_ALGO=${CFSSL_DEFAULT_CA_CSR_KEY_ALGO:-"ecdsa"}
-CFSSL_DEFAULT_CA_CSR_KEY_SIZE=${CFSSL_DEFAULT_CA_CSR_KEY_SIZE:-384}
-
-CFSSL_DEFAULT_CA_CSR_ORGANIZATION=${CFSSL_DEFAULT_CA_CSR_ORGANIZATION:-"A1A Car Wash"}
-CFSSL_DEFAULT_CA_CSR_ORGANIZATION_UNIT=${CFSSL_DEFAULT_CA_CSR_ORGANIZATION_UNIT:-"Information Technology Dep."}
-CFSSL_DEFAULT_CA_CSR_LOCATION=${CFSSL_DEFAULT_CA_CSR_LOCATION:-"Albuquerque"}
-CFSSL_DEFAULT_CA_CSR_STATE=${CFSSL_DEFAULT_CA_CSR_STATE:-"New Mexico"}
-CFSSL_DEFAULT_CA_CSR_COUNTRY=${CFSSL_DEFAULT_CA_CSR_COUNTRY:-"US"}
-
-#
-# General CFSSL config
-#
-
-CFSSL_RETRY=${CFSSL_RETRY:-3}
-CFSSL_RETRY_DELAY=${CFSSL_RETRY_DELAY:-1}
-
-# remote config
-CFSSL_REMOTE=${CFSSL_REMOTE:-}
-CFSSL_REMOTE_HTTPS_CA_CERT=${CFSSL_REMOTE_HTTPS_CA_CERT:-}
-
-# local config
-CFSSL_CA_CERT=${CFSSL_CA_CERT:-${CFSSL_DEFAULT_CACERT}}
-CFSSL_CA_KEY=${CFSSL_CA_KEY:-${CFSSL_DEFAULT_CA_KEY}}
-
-# gencert
-CFSSL_CSR=${CFSSL_CSR:-${CFSSL_DEFAULT_CSR}}
-CFSSL_CSR_JSON=${CFSSL_CSR_JSON:-}
-CFSSL_CONFIG=${CFSSL_CONFIG:-${CFSSL_CA_CONFIG}}
-CFSSL_CONFIG_JSON=${CFSSL_CONFIG_JSON:-${CFSSL_CA_CONFIG_JSON}}
-CFSSL_HOSTNAME=${CFSSL_HOSTNAME:-${HOSTNAME}}
-CFSSL_PROFILE=${CFSSL_PROFILE:-}
-CFSSL_LABEL=${CFSSL_LABEL:-}
diff --git a/image/service-available/:ssl-tools/assets/default-ca/README.md b/image/service-available/:ssl-tools/assets/default-ca/README.md
deleted file mode 100644
index 5c4f2021..00000000
--- a/image/service-available/:ssl-tools/assets/default-ca/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-# How to generate the default CA:
-cfssl gencert -initca config/ca-csr.json | cfssljson -bare default-ca
diff --git a/image/service-available/:ssl-tools/assets/default-ca/config/ca-config.json b/image/service-available/:ssl-tools/assets/default-ca/config/ca-config.json
deleted file mode 100644
index e492de1a..00000000
--- a/image/service-available/:ssl-tools/assets/default-ca/config/ca-config.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "signing": {
- "default": {
- "usages": [
- "signing",
- "key encipherment",
- "server auth",
- "client auth"
- ],
- "expiry": "8760h"
- }
- }
-}
diff --git a/image/service-available/:ssl-tools/assets/default-ca/config/ca-csr.json b/image/service-available/:ssl-tools/assets/default-ca/config/ca-csr.json
deleted file mode 100644
index d9fb6d37..00000000
--- a/image/service-available/:ssl-tools/assets/default-ca/config/ca-csr.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "CN": "docker-light-baseimage",
- "key": {
- "algo": "ecdsa",
- "size": 384
- },
- "names": [
- {
- "O": "A1A Car Wash",
- "OU": "Information Technology Dep.",
- "L": "Albuquerque",
- "ST": "New Mexico",
- "C": "US"
- }
- ]
-}
diff --git a/image/service-available/:ssl-tools/assets/default-ca/config/req-csr.json.tmpl b/image/service-available/:ssl-tools/assets/default-ca/config/req-csr.json.tmpl
deleted file mode 100644
index d9f45453..00000000
--- a/image/service-available/:ssl-tools/assets/default-ca/config/req-csr.json.tmpl
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "CN": "{{ CFSSL_DEFAULT_CA_CSR_CN }}",
- "hosts": [
- "{{ CFSSL_DEFAULT_CA_CSR_CN }}"
- ],
- "key": {
- "algo": "{{ CFSSL_DEFAULT_CA_CSR_KEY_ALGO }}",
- "size": {{ CFSSL_DEFAULT_CA_CSR_KEY_SIZE }}
- },
- "names": [
- {
- "O": "{{ CFSSL_DEFAULT_CA_CSR_ORGANIZATION }}",
- "OU": "{{ CFSSL_DEFAULT_CA_CSR_ORGANIZATION_UNIT }}",
- "L": "{{ CFSSL_DEFAULT_CA_CSR_LOCATION }}",
- "ST": "{{ CFSSL_DEFAULT_CA_CSR_STATE }}",
- "C": "{{ CFSSL_DEFAULT_CA_CSR_COUNTRY }}"
- }
- ]
-}
diff --git a/image/service-available/:ssl-tools/assets/default-ca/default-ca-key.pem b/image/service-available/:ssl-tools/assets/default-ca/default-ca-key.pem
deleted file mode 100644
index b7a004cd..00000000
--- a/image/service-available/:ssl-tools/assets/default-ca/default-ca-key.pem
+++ /dev/null
@@ -1,6 +0,0 @@
------BEGIN EC PRIVATE KEY-----
-MIGkAgEBBDABfvSnlC9AZQjMRTc5o/BcUQCoBkVN8y17VaezYR709tqPptcQ9fC9
-4wtM1qDVho2gBwYFK4EEACKhZANiAATWvTsmK1cEzy4711tv5oRRTJkAGUhYsoKP
-YV6p8M/zQ8tGbkCrFBc0nnelFzbtXkIDB00rsFotk3W4El/KWs/sNkBs5tkFoUBZ
-HAPeqc01M40Gpw77qoFVIU1rJiNOFNk=
------END EC PRIVATE KEY-----
diff --git a/image/service-available/:ssl-tools/assets/default-ca/default-ca.csr b/image/service-available/:ssl-tools/assets/default-ca/default-ca.csr
deleted file mode 100644
index c173322f..00000000
--- a/image/service-available/:ssl-tools/assets/default-ca/default-ca.csr
+++ /dev/null
@@ -1,11 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIIBkTCCARYCAQAwgZYxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgTWV4aWNv
-MRQwEgYDVQQHEwtBbGJ1cXVlcnF1ZTEVMBMGA1UEChMMQTFBIENhciBXYXNoMSQw
-IgYDVQQLExtJbmZvcm1hdGlvbiBUZWNobm9sb2d5IERlcC4xHzAdBgNVBAMTFmRv
-Y2tlci1saWdodC1iYXNlaW1hZ2UwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATWvTsm
-K1cEzy4711tv5oRRTJkAGUhYsoKPYV6p8M/zQ8tGbkCrFBc0nnelFzbtXkIDB00r
-sFotk3W4El/KWs/sNkBs5tkFoUBZHAPeqc01M40Gpw77qoFVIU1rJiNOFNmgADAK
-BggqhkjOPQQDAwNpADBmAjEApeMZVHllW2JJGkaFQ6DAJXTKvISiPwj8L41AeSJk
-LkrdH/eq6toM06sWkSCTdsJxAjEAlJKESvBJA3MZPmUGhG4AqZ70nHTvz0GJ1fsB
-T5TnyBv0ERmNCCQo3AaHJLkSqDfo
------END CERTIFICATE REQUEST-----
diff --git a/image/service-available/:ssl-tools/assets/default-ca/default-ca.pem b/image/service-available/:ssl-tools/assets/default-ca/default-ca.pem
deleted file mode 100644
index 382c4f6f..00000000
--- a/image/service-available/:ssl-tools/assets/default-ca/default-ca.pem
+++ /dev/null
@@ -1,17 +0,0 @@
------BEGIN CERTIFICATE-----
-MIICrjCCAjWgAwIBAgIUcun3KuyYiVryQCfOcWz7gNP0x/AwCgYIKoZIzj0EAwMw
-gZYxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgTWV4aWNvMRQwEgYDVQQHEwtB
-bGJ1cXVlcnF1ZTEVMBMGA1UEChMMQTFBIENhciBXYXNoMSQwIgYDVQQLExtJbmZv
-cm1hdGlvbiBUZWNobm9sb2d5IERlcC4xHzAdBgNVBAMTFmRvY2tlci1saWdodC1i
-YXNlaW1hZ2UwHhcNMjEwMTE2MTE0MjAwWhcNMjYwMTE1MTE0MjAwWjCBljELMAkG
-A1UEBhMCVVMxEzARBgNVBAgTCk5ldyBNZXhpY28xFDASBgNVBAcTC0FsYnVxdWVy
-cXVlMRUwEwYDVQQKEwxBMUEgQ2FyIFdhc2gxJDAiBgNVBAsTG0luZm9ybWF0aW9u
-IFRlY2hub2xvZ3kgRGVwLjEfMB0GA1UEAxMWZG9ja2VyLWxpZ2h0LWJhc2VpbWFn
-ZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABNa9OyYrVwTPLjvXW2/mhFFMmQAZSFiy
-go9hXqnwz/NDy0ZuQKsUFzSed6UXNu1eQgMHTSuwWi2TdbgSX8paz+w2QGzm2QWh
-QFkcA96pzTUzjQanDvuqgVUhTWsmI04U2aNCMEAwDgYDVR0PAQH/BAQDAgEGMA8G
-A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNcSeGQ+1u3nsr2BcYY2jVecyBQlMAoG
-CCqGSM49BAMDA2cAMGQCMBHppmoY8E2fv0PIg8lR3Xq4bKNTH7cG3WEbR10NHPeJ
-NHtBrXWsnjAouXKFGS+1vgIwAVP1gZCPOTvChfTF8uOHW7RZ3UnC3xcJlGaOrC7s
-uElSBnLT7DIT3uBSxmIegHNH
------END CERTIFICATE-----
diff --git a/image/service-available/:ssl-tools/assets/default-env b/image/service-available/:ssl-tools/assets/default-env
deleted file mode 100644
index 96e53808..00000000
--- a/image/service-available/:ssl-tools/assets/default-env
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-SSL_HELPER_TOOL=${SSL_HELPER_TOOL:-"cfssl-helper"}
-
-SSL_HELPER_AUTO_RENEW=${SSL_HELPER_AUTO_RENEW:-false}
-SSL_HELPER_AUTO_RENEW_CRON_EXP=${SSL_HELPER_AUTO_RENEW_CRON_EXP:-"0 0 * * *"} # every day at 00:00
-SSL_HELPER_AUTO_RENEW_SERVICES_IMPACTED=${SSL_HELPER_AUTO_RENEW_SERVICES_IMPACTED:-}
-SSL_HELPER_AUTO_RENEW_FROM_FILES=${SSL_HELPER_AUTO_RENEW_FROM_FILES:-false}
-SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE=${SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE:-}
-SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE=${SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE:-}
-SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE=${SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE:-}
diff --git a/image/service-available/:ssl-tools/assets/jsonssl-default-env b/image/service-available/:ssl-tools/assets/jsonssl-default-env
deleted file mode 100644
index 0aa00df9..00000000
--- a/image/service-available/:ssl-tools/assets/jsonssl-default-env
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-JSONSSL_FILE_DEFAULT="${CONTAINER_SERVICE_DIR}/ssl-tools/assets/certs/certs.json"
-
-JSONSSL_FILE=${JSONSSL_FILE:-} # don't set default immediatly because we print a warning in jsonssl-helper
-JSONSSL_HOSTNAME=${JSONSSL_HOSTNAME:-${HOSTNAME}}
-JSONSSL_PROFILE=${JSONSSL_PROFILE:-} # traefik / traefik_up_to_v1_6
-
-JSONSSL_GET_CA_CERT_CMD=${JSONSSL_GET_CA_CERT_CMD:-}
-JSONSSL_GET_CERT_CMD=${JSONSSL_GET_CERT_CMD:-}
-JSONSSL_GET_KEY_CMD=${JSONSSL_GET_KEY_CMD:-}
diff --git a/image/service-available/:ssl-tools/assets/tool/cfssl-helper b/image/service-available/:ssl-tools/assets/tool/cfssl-helper
deleted file mode 100755
index 9e144221..00000000
--- a/image/service-available/:ssl-tools/assets/tool/cfssl-helper
+++ /dev/null
@@ -1,238 +0,0 @@
-#!/bin/bash
-log-helper level eq trace && set -x
-
-# This tool helps to generate tls certificates with cfssl
-# It takes cfssl configuration from environment variable.
-# See cfssl-default-env file
-
-PREFIX=$1
-CERT_FILE=$2
-KEY_FILE=$3
-CA_FILE=$4
-
-log-helper debug "cfssl-helper is launched, everybody on the floor!"
-
-# before 0.2.5 retro compatibility, will be removed.
-mkdir -p "${CONTAINER_SERVICE_DIR}/:cfssl/assets/default-ca"
-ln -sf "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/default-ca/default-ca.pem" "${CONTAINER_SERVICE_DIR}/:cfssl/assets/default-ca/default-ca.pem"
-
-if [ -z "${PREFIX}" ] || [ -z "${CERT_FILE}" ] || [ -z "${KEY_FILE}" ] || [ -z "${CA_FILE}" ]; then
- log-helper error "Usage: cfssl-helper prefix cert_file key_file ca_file"
- exit 1
-fi
-
-if [ ! -e "${CERT_FILE}" ] && [ ! -e "${KEY_FILE}" ]; then
-
- log-helper info "No certificate file and certificate key provided, generate:"
- log-helper info "${CERT_FILE} and ${KEY_FILE}"
-
- LOG_LEVEL_PARAM=""
-
- case ${CONTAINER_LOG_LEVEL} in
- 0 )
- LOG_LEVEL_PARAM="-loglevel 4";;
- 1 )
- LOG_LEVEL_PARAM="-loglevel 3";;
- 2 )
- LOG_LEVEL_PARAM="-loglevel 2";;
- 3 )
- LOG_LEVEL_PARAM="-loglevel 1";;
- 4 )
- LOG_LEVEL_PARAM="-loglevel 0";;
- 5 )
- LOG_LEVEL_PARAM="-loglevel 0";;
- esac
-
- # set env vars
- PREFIX=${PREFIX^^} # uppercase
-
- # search for prefixed env var first
-
- # set prefix variable name
- # example : PREFIX_CFSSL_REMOTE='MARIADB_CFSSL_REMOTE'
- PREFIX_CFSSL_REMOTE=${PREFIX}_CFSSL_REMOTE
- PREFIX_CFSSL_REMOTE_HTTPS_CA_CERT=${PREFIX}_CFSSL_REMOTE_HTTPS_CA_CERT
- PREFIX_CFSSL_CA_CERT=${PREFIX}_CFSSL_CA_CERT
- PREFIX_CFSSL_CA_KEY=${PREFIX}_CFSSL_CA_KEY
- PREFIX_CFSSL_CSR=${PREFIX}_CFSSL_CSR
- PREFIX_CFSSL_CSR_JSON=${PREFIX}_CFSSL_CSR_JSON
- PREFIX_CFSSL_CONFIG=${PREFIX}_CFSSL_CONFIG
- PREFIX_CFSSL_CONFIG_JSON=${PREFIX}_CFSSL_CONFIG_JSON
- PREFIX_CFSSL_HOSTNAME=${PREFIX}_CFSSL_HOSTNAME
- PREFIX_CFSSL_PROFILE=${PREFIX}_CFSSL_PROFILE
- PREFIX_CFSSL_LABEL=${PREFIX}_CFSSL_LABEL
- PREFIX_CFSSL_RETRY=${PREFIX}_CFSSL_RETRY
- PREFIX_CFSSL_RETRY_DELAY=${PREFIX}_CFSSL_RETRY_DELAY
-
- # assign CFSSL_REMOTE=${!PREFIX_CFSSL_REMOTE} if value is not empty otherwise CFSSL_REMOTE=CFSSL_REMOTE
- CFSSL_REMOTE=${!PREFIX_CFSSL_REMOTE:-$CFSSL_REMOTE}
- CFSSL_REMOTE_HTTPS_CA_CERT=${!PREFIX_CFSSL_REMOTE_HTTPS_CA_CERT:-$CFSSL_REMOTE_HTTPS_CA_CERT}
- CFSSL_CA_CERT=${!PREFIX_CFSSL_CA_CERT:-$CFSSL_CA_CERT}
- CFSSL_CA_KEY=${!PREFIX_CFSSL_CA_KEY:-$CFSSL_CA_KEY}
- CFSSL_CSR=${!PREFIX_CFSSL_CSR:-$CFSSL_CSR}
- CFSSL_CSR_JSON=${!PREFIX_CFSSL_CSR_JSON:-$CFSSL_CSR_JSON}
- CFSSL_CONFIG=${!PREFIX_CFSSL_CONFIG:-$CFSSL_CONFIG}
- CFSSL_CONFIG_JSON=${!PREFIX_CFSSL_CONFIG_JSON:-$CFSSL_CONFIG_JSON}
- CFSSL_HOSTNAME=${!PREFIX_CFSSL_HOSTNAME:-$CFSSL_HOSTNAME}
- CFSSL_PROFILE=${!PREFIX_CFSSL_PROFILE:-$CFSSL_PROFILE}
- CFSSL_LABEL=${!PREFIX_CFSSL_LABEL:-$CFSSL_LABEL}
- CFSSL_RETRY=${!PREFIX_CFSSL_RETRY:-$CFSSL_RETRY}
- CFSSL_RETRY_DELAY=${!PREFIX_CFSSL_RETRY_DELAY:-$CFSSL_RETRY_DELAY}
-
- source "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/cfssl-default-env"
-
- # set csr file
- CSR_FILE="/tmp/csr-file"
- if [ -n "${CFSSL_CSR_JSON}" ]; then
- log-helper debug "use CFSSL_CSR_JSON value as csr file"
- echo "${CFSSL_CSR_JSON}" > "${CSR_FILE}"
- elif [ -n "${CFSSL_CSR}" ]; then
- log-helper debug "use ${CFSSL_CSR} as csr file"
- cp -f "${CFSSL_CSR}" "${CSR_FILE}"
-
- # it's the default csr
- if [ "${CFSSL_CSR}" = "${CFSSL_DEFAULT_CSR}" ]; then
- sed -i "s|{{ CFSSL_DEFAULT_CA_CSR_CN }}|${CFSSL_DEFAULT_CA_CSR_CN}|g" "${CSR_FILE}"
- sed -i "s|{{ CFSSL_DEFAULT_CA_CSR_KEY_ALGO }}|${CFSSL_DEFAULT_CA_CSR_KEY_ALGO}|g" "${CSR_FILE}"
- sed -i "s|{{ CFSSL_DEFAULT_CA_CSR_KEY_SIZE }}|${CFSSL_DEFAULT_CA_CSR_KEY_SIZE}|g" "${CSR_FILE}"
- sed -i "s|{{ CFSSL_CERT_ORGANIZATION_UNIT }}|${CFSSL_CERT_ORGANIZATION_UNIT}|g" "${CSR_FILE}"
- sed -i "s|{{ CFSSL_DEFAULT_CA_CSR_ORGANIZATION }}|${CFSSL_DEFAULT_CA_CSR_ORGANIZATION}|g" "${CSR_FILE}"
- sed -i "s|{{ CFSSL_DEFAULT_CA_CSR_ORGANIZATION_UNIT }}|${CFSSL_DEFAULT_CA_CSR_ORGANIZATION_UNIT}|g" "${CSR_FILE}"
- sed -i "s|{{ CFSSL_DEFAULT_CA_CSR_LOCATION }}|${CFSSL_DEFAULT_CA_CSR_LOCATION}|g" "${CSR_FILE}"
- sed -i "s|{{ CFSSL_DEFAULT_CA_CSR_STATE }}|${CFSSL_DEFAULT_CA_CSR_STATE}|g" "${CSR_FILE}"
- sed -i "s|{{ CFSSL_DEFAULT_CA_CSR_COUNTRY }}|${CFSSL_DEFAULT_CA_CSR_COUNTRY}|g" "${CSR_FILE}"
- fi
- else
- log-helper error "error: no csr file provided"
- log-helper error "CFSSL_CSR_JSON and CFSSL_CSR are empty"
- exit 1
- fi
-
- # generate cert
- CONFIG_FILE="/tmp/config-file"
- CERT_NAME="cert"
-
- REMOTE_PARAM=""
- CA_CERT_PARAM=""
- CA_KEY_PARAM=""
- CONFIG_PARAM=""
- HOSTNAME_PARAM=""
- PROFILE_PARAM=""
- LABEL_PARAM=""
-
- if [ -n "${CFSSL_REMOTE}" ]; then
- REMOTE_PARAM="-remote=${CFSSL_REMOTE}"
-
- # add remote https ca cert to known certificates if not empty
- if [ -n "${CFSSL_REMOTE_HTTPS_CA_CERT}" ]; then
- if [ -e "${CFSSL_REMOTE_HTTPS_CA_CERT}" ]; then
- [[ ! -d "/etc/ssl/certs/" ]] && mkdir -p /etc/ssl/certs/
- cat "${CFSSL_REMOTE_HTTPS_CA_CERT}" >> /etc/ssl/certs/ca-certificates.crt
- else
- log-helper error "error: remote https ca cert file ${CFSSL_REMOTE_HTTPS_CA_CERT} not found"
- fi
- fi
-
- else
-
- # files path with : may cause issue with cfssl tools due to :
- # ReadBytes - https://github.com/cloudflare/cfssl/blob/master/helpers/helpers.go#L573
- # : is used to split env from file path
- # so we copy ca cert and key to tmp
- if [ -n "${CFSSL_CA_CERT}" ]; then
-
- CFSSL_CA_CERT_FILE="/tmp/ca-cert-file"
- cp -f "${CFSSL_CA_CERT}" "${CFSSL_CA_CERT_FILE}"
- chmod 644 "${CFSSL_CA_CERT_FILE}"
-
- CA_CERT_PARAM="-ca ${CFSSL_CA_CERT_FILE}"
- fi
-
- if [ -n "${CFSSL_CA_KEY}" ]; then
-
- CFSSL_CA_KEY_FILE="/tmp/ca-key-file"
- cp -f "${CFSSL_CA_KEY}" "${CFSSL_CA_KEY_FILE}"
- chmod 600 "${CFSSL_CA_CERT_FILE}"
-
- CA_KEY_PARAM="-ca-key ${CFSSL_CA_KEY_FILE}"
- fi
-
- fi
-
- if [ -n "${CFSSL_CONFIG_JSON}" ]; then
- log-helper debug "use CFSSL_CONFIG_JSON value as config file"
- echo "${CFSSL_CONFIG_JSON}" > "${CONFIG_FILE}"
- CONFIG_PARAM="-config ${CONFIG_FILE}"
-
- elif [ -n "${CFSSL_CONFIG}" ]; then
- log-helper debug "use ${CFSSL_CONFIG} as config file"
- cp -f "${CFSSL_CONFIG}" "${CONFIG_FILE}"
- CONFIG_PARAM="-config ${CONFIG_FILE}"
- fi
-
- if [ -n "$ADDITIONAL_HOSTNAMES" ]; then
- log-helper debug "additional hostnames found"
- CFSSL_HOSTNAME="${CFSSL_HOSTNAME},${ADDITIONAL_HOSTNAMES}"
- fi
-
- [[ -n "${CFSSL_HOSTNAME}" ]] && HOSTNAME_PARAM="-hostname ${CFSSL_HOSTNAME}"
- [[ -n "${CFSSL_PROFILE}" ]] && PROFILE_PARAM="-profile ${CFSSL_PROFILE}"
- [[ -n "${CFSSL_LABEL}" ]] && LABEL_PARAM="-label ${CFSSL_LABEL}"
-
- retry=0
- while [ $retry -lt "${CFSSL_RETRY}" ]; do
- log-helper debug "cfssl gencert ${LOG_LEVEL_PARAM} ${REMOTE_PARAM} ${CA_CERT_PARAM} ${CA_KEY_PARAM} ${CONFIG_PARAM} ${HOSTNAME_PARAM} ${PROFILE_PARAM} ${LABEL_PARAM} ${CSR_FILE} | cfssljson -bare /tmp/${CERT_NAME}"
- eval cfssl gencert "${LOG_LEVEL_PARAM}" "${REMOTE_PARAM}" "${CA_CERT_PARAM}" "${CA_KEY_PARAM}" "${CONFIG_PARAM}" "${HOSTNAME_PARAM}" "${PROFILE_PARAM}" "${LABEL_PARAM}" "${CSR_FILE}" | cfssljson -bare "/tmp/${CERT_NAME}" && break
- sleep "${CFSSL_RETRY_DELAY}"
- ((retry++))
- done
-
- # move generated files
- [[ ! -e "/tmp/${CERT_NAME}.pem" ]] && exit 1
- log-helper debug "move /tmp/${CERT_NAME}.pem to ${CERT_FILE}"
- mv "/tmp/${CERT_NAME}.pem" "${CERT_FILE}"
-
- log-helper debug "move /tmp/${CERT_NAME}-key.pem to ${KEY_FILE}"
- mv "/tmp/${CERT_NAME}-key.pem" "${KEY_FILE}"
-
- # if ca file don't exists
- if [ ! -e "${CA_FILE}" ]; then
-
- if [ -n "${CFSSL_REMOTE}" ]; then
- log-helper debug "Get CA certificate from ${CFSSL_REMOTE}"
-
- retry=0
- while [ $retry -lt "${CFSSL_RETRY}" ]; do
- log-helper debug "cfssl info ${LOG_LEVEL_PARAM} ${REMOTE_PARAM} ${CONFIG_PARAM} ${PROFILE_PARAM} ${LABEL_PARAM}"
- eval cfssl info "${LOG_LEVEL_PARAM}" "${REMOTE_PARAM}" "${CONFIG_PARAM}" "${PROFILE_PARAM}" "${LABEL_PARAM}" | sed -e "s/.*certificate\":\"\(.*-----\)\".*/\1/g" | sed 's/\\n/\n/g' > "${CA_FILE}" && log-helper debug "CA certificate returned save as ${CA_FILE}" && break
- sleep "${CFSSL_RETRY_DELAY}"
- ((retry++))
- done
-
- [[ ! -e "${CA_FILE}" ]] && exit 1
-
- elif [ -n "${CFSSL_CA_CERT}" ]; then
- log-helper info "Link ${CFSSL_CA_CERT} to ${CA_FILE}"
- ln -sf "${CFSSL_CA_CERT}" "${CA_FILE}"
- fi
-
- fi
-
- # delete tmp files
- rm -f /tmp/${CERT_NAME}.csr ${CONFIG_FILE} "${CSR_FILE}"
- [[ -e "${CFSSL_CA_CERT_FILE}" ]] && rm "${CFSSL_CA_CERT_FILE}"
- [[ -e "${CFSSL_CA_KEY_FILE}" ]] && rm "${CFSSL_CA_KEY_FILE}"
-
- log-helper debug "done :)"
-
- elif [ ! -e "${KEY_FILE}" ]; then
- log-helper error "Certificate file ${CERT_FILE} exists but not key file ${KEY_FILE}"
- exit 1
- elif [ ! -e "${CERT_FILE}" ]; then
- log-helper error "Key file ${KEY_FILE} exists but not certificate file ${CERT_FILE}"
- exit 1
-else
- log-helper debug "Files ${CERT_FILE} and ${KEY_FILE} exists, fix files permissions"
- chmod 644 "${CERT_FILE}"
- chmod 600 "${KEY_FILE}"
-fi
diff --git a/image/service-available/:ssl-tools/assets/tool/jsonssl-helper b/image/service-available/:ssl-tools/assets/tool/jsonssl-helper
deleted file mode 100755
index abd524aa..00000000
--- a/image/service-available/:ssl-tools/assets/tool/jsonssl-helper
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/bin/bash
-log-helper level eq trace && set -x
-
-# This tool helps get certificates from json files
-# like kubernetes secrets or traefik acme.json
-# It takes its configuration from environment variable.
-# See json-default-env file
-
-PREFIX=$1
-CERT_FILE=$2
-KEY_FILE=$3
-CA_FILE=$4
-
-log-helper debug "jsonssl-helper is launched, everybody on the floor!"
-
-if [ -z "${PREFIX}" ] || [ -z "${CERT_FILE}" ] || [ -z "${KEY_FILE}" ] || [ -z "${CA_FILE}" ]; then
- log-helper error "Usage: jsonssl-helper prefix cert_file key_file ca_file"
- exit 1
-fi
-
-if [ ! -e "${CERT_FILE}" ] && [ ! -e "${KEY_FILE}" ]; then
-
- # set env vars
- PREFIX=${PREFIX^^} # uppercase
-
- # search for prefixed env var first
-
- # set prefix variable name
- # example : PREFIX_JSONSSL_FILE='MARIADB_JSONSSL_FILE'
- PREFIX_JSONSSL_FILE=${PREFIX}_JSONSSL_FILE
- PREFIX_JSONSSL_HOSTNAME=${PREFIX}_JSONSSL_HOSTNAME
-
- PREFIX_JSONSSL_PROFILE=${PREFIX}_JSONSSL_PROFILE
- PREFIX_JSONSSL_GET_CA_CERT_CMD=${PREFIX}_JSONSSL_GET_CA_CERT_CMD
- PREFIX_JSONSSL_GET_CERT_CMD=${PREFIX}_JSONSSL_GET_CERT_CMD
- PREFIX_JSONSSL_GET_KEY_CMD=${PREFIX}_JSONSSL_GET_KEY_CMD
-
- # assign JSONSSL_FILE=${!PREFIX_JSONSSL_FILE} if value is not empty otherwise JSONSSL_FILE=JSONSSL_FILE
- JSONSSL_FILE=${!PREFIX_JSONSSL_FILE:-$JSONSSL_FILE}
- JSONSSL_HOSTNAME=${!PREFIX_JSONSSL_HOSTNAME:-$JSONSSL_HOSTNAME}
-
- JSONSSL_PROFILE=${!PREFIX_JSONSSL_PROFILE:-$JSONSSL_PROFILE}
- JSONSSL_GET_CA_CERT_CMD=${!PREFIX_JSONSSL_GET_CA_CERT_CMD:-$JSONSSL_GET_CA_CERT_CMD}
- JSONSSL_GET_CERT_CMD=${!PREFIX_JSONSSL_GET_CERT_CMD:-$JSONSSL_GET_CERT_CMD}
- JSONSSL_GET_KEY_CMD=${!PREFIX_JSONSSL_GET_KEY_CMD:-$JSONSSL_GET_KEY_CMD}
-
- source "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/jsonssl-default-env"
-
- if [ -z "${JSONSSL_FILE}" ]; then
- log-helper info "Variable JSONSSL_FILE is empty, set to default location:"
- log-helper info "JSONSSL_FILE=${JSONSSL_FILE_DEFAULT}"
- JSONSSL_FILE=${JSONSSL_FILE_DEFAULT}
- fi
-
- if [ ! -e "${JSONSSL_FILE}" ]; then
- log-helper error "JSONSSL_FILE file '${JSONSSL_FILE}' not found"
- exit 1
- fi
-
- # Json file profile, only traefik for now
- if [ "${JSONSSL_PROFILE,,}" = "traefik" ]; then
- # Let's Encrypt CA certificate is in cert file after the domain certificate.
- # So we took what's after the first cert.
- JSONSSL_GET_CA_CERT_CMD="awk '{if(found) print} /END CERTIFICATE/{found=1}' ${CERT_FILE}"
-
- JSONSSL_GET_CERT_CMD="cat ${JSONSSL_FILE} | jq -r '[.Certificates[]] | map(select(.Domain.Main == \"${JSONSSL_HOSTNAME}\")) | .[0].Certificate' | base64 -d"
- JSONSSL_GET_KEY_CMD="cat ${JSONSSL_FILE} | jq -r '[.Certificates[]] | map(select(.Domain.Main == \"${JSONSSL_HOSTNAME}\")) | .[0].Key' | base64 -d"
- elif [ "${JSONSSL_PROFILE,,}" = "traefik_up_to_v1_6" ]; then
- # Let's Encrypt CA certificate is in cert file after the domain certificate.
- # So we took what's after the first cert.
- JSONSSL_GET_CA_CERT_CMD="awk '{if(found) print} /END CERTIFICATE/{found=1}' ${CERT_FILE}"
-
- JSONSSL_GET_CERT_CMD="cat ${JSONSSL_FILE} | jq -r '[.[\"DomainsCertificate\"].Certs[].Certificate] | map(select(.Domain == \"${JSONSSL_HOSTNAME}\")) | .[0].Certificate' | base64 -d"
- JSONSSL_GET_KEY_CMD="cat ${JSONSSL_FILE} | jq -r '[.[\"DomainsCertificate\"].Certs[].Certificate] | map(select(.Domain == \"${JSONSSL_HOSTNAME}\")) | .[0].PrivateKey' | base64 -d"
- fi
-
- log-helper debug "Run JSONSSL_GET_CERT_CMD: ${JSONSSL_GET_CERT_CMD}"
- log-helper debug "put return in ${CERT_FILE}"
- eval "${JSONSSL_GET_CERT_CMD}" > "${CERT_FILE}"
-
- if [ ! -s "$CERT_FILE" ]; then
- log-helper error "Generated file '${CERT_FILE}' is empty"
- log-helper error "Set loglevel to debug for more information"
- exit 1
- fi
-
- log-helper debug "Run JSONSSL_GET_KEY_CMD: ${JSONSSL_GET_KEY_CMD}"
- log-helper debug "put return in ${KEY_FILE}"
- eval "$JSONSSL_GET_KEY_CMD" > "${KEY_FILE}"
-
- if [ ! -s "${KEY_FILE}" ]; then
- log-helper error "Generated file '${KEY_FILE}' is empty"
- log-helper error "Set loglevel to debug for more information"
- exit 1
- fi
-
- # if CA cert doesn't exist
- if [ ! -e "$CA_FILE" ]; then
- log-helper debug "Run JSONSSL_GET_CA_CERT_CMD: ${JSONSSL_GET_CA_CERT_CMD}"
- log-helper debug "put return in ${CA_FILE}"
- eval "$JSONSSL_GET_CA_CERT_CMD" > "${CA_FILE}"
-
- if [ ! -s "$CA_FILE" ]; then
- log-helper error "Generated file '${CA_FILE}' is empty"
- log-helper error "Set loglevel to debug for more information"
- exit 1
- fi
- fi
-
- log-helper debug "done :)"
-
- elif [ ! -e "${KEY_FILE}" ]; then
- log-helper error "Certificate file ${CERT_FILE} exists but not key file ${KEY_FILE}"
- exit 1
- elif [ ! -e "${CERT_FILE}" ]; then
- log-helper error "Key file ${KEY_FILE} exists but not certificate file ${CERT_FILE}"
- exit 1
-else
- log-helper debug "Files ${CERT_FILE} and ${KEY_FILE} exists, fix files permissions"
- chmod 644 "${CERT_FILE}"
- chmod 600 "${KEY_FILE}"
-fi
diff --git a/image/service-available/:ssl-tools/assets/tool/ssl-auto-renew b/image/service-available/:ssl-tools/assets/tool/ssl-auto-renew
deleted file mode 100755
index 78d8f7de..00000000
--- a/image/service-available/:ssl-tools/assets/tool/ssl-auto-renew
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/bin/bash -e
-
-# This file aims to be called by a cron task
-# and not directly. See ssl-helper.
-
-source /container/run/environment.sh
-
-SSL_HELPER_TOOL=$1
-PREFIX=$2
-CERT_FILE=$3
-KEY_FILE=$4
-CA_FILE=$5
-IMPACTED_SERVICES=$6
-JSONSSL_FILE=$7
-FROM_FILES=$8
-CERT_FROM_FILE=$9
-KEY_FROM_FILE=${10}
-CA_CERT_FROM_FILE=${11}
-
-function stop_impacted_services() {
- # Stop impacted services
- if [ -n "${IMPACTED_SERVICES}" ]; then
- log-helper info "Services to stop: ${IMPACTED_SERVICES}"
-
- impacted_services_table=("${IMPACTED_SERVICES}")
- for service in "${impacted_services_table[@]}"
- do
- log-helper info "Stopping ${service}..."
- sv stop "/container/run/process/${service}"
- done
-
- log-helper info "All services are stopped"
- fi
-}
-
-function start_impacted_services() {
- # restart impacted services
- if [ -n "${IMPACTED_SERVICES}" ]; then
-
- impacted_services_table=("${IMPACTED_SERVICES}")
- for service in "${impacted_services_table[@]}"
- do
- log-helper info "Starting ${service}..."
- sv start "/container/run/process/${service}"
- done
-
- log-helper info "All services are started"
- fi
-}
-
-# renew from container files
-if [ "${FROM_FILES,,}" = "true" ]; then
-
- log-helper info "Check renew from files"
- renew=false
-
- # File previous md5
- CERT_PREVIOUS_MD5=$(cat "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5${CERT_FILE}.md5") || true
- KEY_PREVIOUS_MD5=$(cat "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5${KEY_FILE}.md5") || true
- CA_CERT_PREVIOUS_MD5=$(cat "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5${CA_FILE}.md5") || true
-
- # from file current md5
- FROM_CERT_MD5=$(md5sum "${CERT_FROM_FILE}" | awk '{ print $1 }')
- FROM_KEY_MD5=$(md5sum "${KEY_FROM_FILE}" | awk '{ print $1 }')
- FROM_CA_CERT_MD5=$(md5sum "${CA_CERT_FROM_FILE}" | awk '{ print $1 }')
-
- [[ "$CERT_PREVIOUS_MD5" != "$FROM_CERT_MD5" ]] && renew=true
- [[ "$KEY_PREVIOUS_MD5" != "$FROM_KEY_MD5" ]] && renew=true
- [[ "$CA_CERT_PREVIOUS_MD5" != "$FROM_CA_CERT_MD5" ]] && renew=true
-
- if ! $renew; then
- log-helper info "Certificate files are identicals"
- exit 0
- fi
-
- log-helper info "Certificate files are differents"
-
- stop_impacted_services
-
- if [ "${CERT_FROM_FILE}" != "${CERT_FILE}" ]; then
- log-helper info "Copy ${CERT_FROM_FILE} to ${CERT_FILE}"
- cp -f "${CERT_FROM_FILE}" "${CERT_FILE}"
- fi
-
- if [ "${KEY_FROM_FILE}" != "${KEY_FILE}" ]; then
- log-helper info "Copy ${KEY_FROM_FILE} to ${KEY_FILE}"
- cp -f "${KEY_FROM_FILE}" "${KEY_FILE}"
- fi
-
- if [ "${CA_CERT_FROM_FILE}" != "${CA_FILE}" ]; then
- log-helper info "Copy ${CA_CERT_FROM_FILE} to ${CA_FILE}"
- cp -f "${CA_CERT_FROM_FILE}" "${CA_FILE}"
- fi
-
- log-helper info "Update file md5 with new values"
- echo "${FROM_CERT_MD5}" > "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5${CERT_FILE}.md5"
- echo "${FROM_KEY_MD5}" > "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5${KEY_FILE}.md5"
- echo "${FROM_CA_CERT_MD5}" > "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5${CA_FILE}.md5"
-
- start_impacted_services
-
- # renew with cfssl or jsonssl
-else
- log-helper info "Check renew for cfssl or jsonssl"
-
- cert_ok=false
- ca_ok=false
-
- # the certificate will expired in the next day
- if openssl x509 -checkend 259200 -noout -in "${CERT_FILE}"; then
- log-helper info "The certificate '${CERT_FILE}' is ok for the next 3 days at least."
- cert_ok=true
- fi
-
- if openssl x509 -checkend 259200 -noout -in "${CA_FILE}"; then
- log-helper info "The CA certificate '${CA_FILE}' is ok for the next 3 days at least."
- ca_ok=true
- fi
-
- if [ "${SSL_HELPER_TOOL}" = "jsonssl-helper" ]; then
- log-helper info "Check if ${JSONSSL_FILE} has changed"
- JSONSSL_FILE_PREVIOUS_MD5=$(cat "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5${JSONSSL_FILE}.md5") || true
- JSONSSL_FILE_MD5=$(md5sum "${JSONSSL_FILE}" | awk '{ print $1 }')
-
- [[ "${JSONSSL_FILE_PREVIOUS_MD5}" != "${JSONSSL_FILE_MD5}" ]] && cert_ok=false
- fi
-
- if ${cert_ok} && ${ca_ok}; then
- log-helper info "Nothing to do :)"
- exit 0
- fi
-
- log-helper info "Auto-renew on the way!"
-
- stop_impacted_services
-
- log-helper info "Remove certificate files"
- rm -f "${CERT_FILE}" "${KEY_FILE}" "${CA_FILE}"
-
- log-helper info "Regenerate certificate with ${SSL_HELPER_TOOL}"
- ${SSL_HELPER_TOOL} "${PREFIX}" "${CERT_FILE}" "${KEY_FILE}" "${CA_FILE}"
-
- start_impacted_services
-
- if [ "${SSL_HELPER_TOOL}" = "jsonssl-helper" ]; then
- log-helper info "Update file md5 with new values"
- echo "${JSONSSL_FILE_MD5}" > "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5${JSONSSL_FILE}.md5"
- fi
-
-fi
-
-log-helper info "Auto-renew finished! Champagne!"
diff --git a/image/service-available/:ssl-tools/assets/tool/ssl-helper b/image/service-available/:ssl-tools/assets/tool/ssl-helper
deleted file mode 100755
index 8a5d717f..00000000
--- a/image/service-available/:ssl-tools/assets/tool/ssl-helper
+++ /dev/null
@@ -1,100 +0,0 @@
-#!/bin/bash -e
-log-helper level eq trace && set -x
-
-# This tool helps to generate tls certificates with cfssl
-# or get certificates from a json file
-
-PREFIX=$1
-CERT_FILE=$2
-KEY_FILE=$3
-CA_FILE=$4
-
-log-helper debug "Hi! I'm ssl-helper, what button should i press ?"
-
-# set env vars
-PREFIX=${PREFIX^^} # uppercase
-
-PREFIX_SSL_HELPER_TOOL=${PREFIX}_SSL_HELPER_TOOL
-PREFIX_SSL_HELPER_AUTO_RENEW=${PREFIX}_SSL_HELPER_AUTO_RENEW
-PREFIX_SSL_HELPER_AUTO_RENEW_CRON_EXP=${PREFIX}_SSL_HELPER_AUTO_RENEW_CRON_EXP
-PREFIX_SSL_HELPER_AUTO_RENEW_SERVICES_IMPACTED=${PREFIX}_SSL_HELPER_AUTO_RENEW_SERVICES_IMPACTED
-PREFIX_SSL_HELPER_AUTO_RENEW_FROM_FILES=${PREFIX}_SSL_HELPER_AUTO_RENEW_FROM_FILES
-PREFIX_SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE=${PREFIX}_SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE
-PREFIX_SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE=${PREFIX}_SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE
-PREFIX_SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE=${PREFIX}_SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE
-
-SSL_HELPER_TOOL=${!PREFIX_SSL_HELPER_TOOL:-$SSL_HELPER_TOOL}
-SSL_HELPER_AUTO_RENEW=${!PREFIX_SSL_HELPER_AUTO_RENEW:-$SSL_HELPER_AUTO_RENEW}
-SSL_HELPER_AUTO_RENEW_CRON_EXP=${!PREFIX_SSL_HELPER_AUTO_RENEW_CRON_EXP:-$SSL_HELPER_AUTO_RENEW_CRON_EXP}
-SSL_HELPER_AUTO_RENEW_SERVICES_IMPACTED=${!PREFIX_SSL_HELPER_AUTO_RENEW_SERVICES_IMPACTED:-$SSL_HELPER_AUTO_RENEW_SERVICES_IMPACTED}
-SSL_HELPER_AUTO_RENEW_FROM_FILES=${!PREFIX_SSL_HELPER_AUTO_RENEW_FROM_FILES:-$SSL_HELPER_AUTO_RENEW_FROM_FILES}
-SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE=${!PREFIX_SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE:-$SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE}
-SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE=${!PREFIX_SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE:-$SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE}
-SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE=${!PREFIX_SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE:-$SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE}
-
-source "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/default-env"
-
-# call the certificate tool cfssl-helper (default) or jsonssl-helper
-${SSL_HELPER_TOOL,,} "${PREFIX}" "${CERT_FILE}" "${KEY_FILE}" "${CA_FILE}"
-
-# auto-renew certificates just before it expired
-# or if source files have changed
-if [ "${SSL_HELPER_AUTO_RENEW,,}" = "true" ]; then
-
- # only for multiple process images (uses cron)
- if [ ! -e "/container/multiple_process_stack_added" ]; then
- log-helper error "auto-renew is available only with multiple process images"
- exit 1
- fi
-
- # if SSL_HELPER_AUTO_RENEW_FROM_FILES=true check certificate source files
- if [ "${SSL_HELPER_AUTO_RENEW_FROM_FILES,,}" = "true" ]; then
-
- [[ -z "${SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE}" ]] && SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE=${CERT_FILE}
- [[ -z "${SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE}" ]] && SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE=${KEY_FILE}
- [[ -z "${SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE}" ]] && SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE=${CA_FILE}
-
- if [ ! -e "${SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE}" ] || [ ! -e "${SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE}" ] || [ ! -e "${SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE}" ]; then
- log-helper error "with SSL_HELPER_AUTO_RENEW_FROM_FILES=true the following files must exists:"
- log-helper error "SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE=${SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE}"
- log-helper error "SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE=${SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE}"
- log-helper error "SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE=${SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE}"
- exit 1
- fi
-
- mkdir -p "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5$(dirname "${CERT_FILE}")"
- mkdir -p "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5$(dirname "${KEY_FILE}")"
- mkdir -p "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5$(dirname "${CA_FILE}")"
-
- # calculate certificates files md5
- md5sum "${CERT_FILE}" | awk '{ print $1 }' > "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5${CERT_FILE}.md5"
- md5sum "${KEY_FILE}" | awk '{ print $1 }' > "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5${KEY_FILE}.md5"
- md5sum "${CA_FILE}" | awk '{ print $1 }' > "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5${CA_FILE}.md5"
-
- fi
-
- if [ "${SSL_HELPER_TOOL,,}" = "jsonssl-helper" ]; then
-
- PREFIX_JSONSSL_FILE=${PREFIX}_JSONSSL_FILE
- JSONSSL_FILE=${!PREFIX_JSONSSL_FILE:-$JSONSSL_FILE}
-
- source "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/jsonssl-default-env"
-
- if [ -z "${JSONSSL_FILE}" ]; then
- JSONSSL_FILE=${JSONSSL_FILE_DEFAULT}
- fi
-
- # calculate jsonssl file md5
- mkdir -p "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5$(dirname "${JSONSSL_FILE}")"
- md5sum "${JSONSSL_FILE}" | awk '{ print $1 }' > "${CONTAINER_SERVICE_DIR}/:ssl-tools/assets/md5${JSONSSL_FILE}.md5"
-
- fi
-
- # add cron job
- echo "${SSL_HELPER_AUTO_RENEW_CRON_EXP} root /usr/sbin/ssl-auto-renew ${SSL_HELPER_TOOL,,} ${PREFIX} ${CERT_FILE} ${KEY_FILE} ${CA_FILE} \"${SSL_HELPER_AUTO_RENEW_SERVICES_IMPACTED}\" \"${JSONSSL_FILE}\" \"${SSL_HELPER_AUTO_RENEW_FROM_FILES}\" \"${SSL_HELPER_AUTO_RENEW_CERT_FROM_FILE}\" \"${SSL_HELPER_AUTO_RENEW_KEY_FROM_FILE}\" \"${SSL_HELPER_AUTO_RENEW_CA_CERT_FROM_FILE}\" 2>&1 | /usr/bin/logger -t cron_ssl_auto_renew" > "/etc/cron.d/${PREFIX}"
- chmod 600 "/etc/cron.d/${PREFIX}"
-
- # disable auto-renew if it was added
- elif [ -e "/etc/cron.d/${PREFIX}" ]; then
- rm -f "/etc/cron.d/${PREFIX}"
-fi
diff --git a/image/service-available/:ssl-tools/download.sh b/image/service-available/:ssl-tools/download.sh
deleted file mode 100755
index a699f0f9..00000000
--- a/image/service-available/:ssl-tools/download.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/bash -e
-
-UARCH=$(uname -m)
-echo "Architecture is ${UARCH}"
-
-case "${UARCH}" in
-
- "x86_64")
- HOST_ARCH="amd64"
- ;;
-
- "arm64" | "aarch64")
- HOST_ARCH="arm64"
- ;;
-
- "armv7l" | "armv6l" | "armhf")
- HOST_ARCH="arm"
- ;;
-
- "i386")
- HOST_ARCH="386"
- ;;
-
- *)
- echo "Architecture not supported. Exiting."
- exit 1
- ;;
-esac
-
-echo "Going to use ${HOST_ARCH} cfssl binaries"
-
-# download curl and ca-certificate from apt-get if needed
-to_install=()
-
-if [ "$(dpkg-query -W -f='${Status}' curl 2>/dev/null | grep -c "ok installed")" -eq 0 ]; then
- to_install+=("curl")
-fi
-
-if [ "$(dpkg-query -W -f='${Status}' ca-certificates 2>/dev/null | grep -c "ok installed")" -eq 0 ]; then
- to_install+=("ca-certificates")
-fi
-
-if [ ${#to_install[@]} -ne 0 ]; then
- LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends "${to_install[@]}"
-fi
-
-LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends openssl jq
-
-# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=923479
-if [[ "${HOST_ARCH}" == 'arm' ]]; then
- LC_ALL=C DEBIAN_FRONTEND=noninteractive c_rehash
-fi
-
-echo "Download cfssl ..."
-echo "curl -o /usr/sbin/cfssl -SL https://github.com/osixia/cfssl/releases/download/1.5.0/cfssl_linux-${HOST_ARCH}"
-curl -o /usr/sbin/cfssl -SL "https://github.com/osixia/cfssl/releases/download/1.5.0/cfssl_linux-${HOST_ARCH}"
-chmod 700 /usr/sbin/cfssl
-
-echo "Download cfssljson ..."
-echo "curl -o /usr/sbin/cfssljson -SL https://github.com/osixia/cfssl/releases/download/1.5.0/cfssljson_linux-${HOST_ARCH}"
-curl -o /usr/sbin/cfssljson -SL "https://github.com/osixia/cfssl/releases/download/1.5.0/cfssljson_linux-${HOST_ARCH}"
-chmod 700 /usr/sbin/cfssljson
-
-echo "Project sources: https://github.com/cloudflare/cfssl"
-
-# remove tools installed to download cfssl
-if [ ${#to_install[@]} -ne 0 ]; then
- apt-get remove -y --purge --auto-remove "${to_install[@]}"
-fi
diff --git a/image/service-available/:ssl-tools/startup.sh b/image/service-available/:ssl-tools/startup.sh
deleted file mode 100755
index 01890998..00000000
--- a/image/service-available/:ssl-tools/startup.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh -e
-log-helper level eq trace && set -x
-
-chmod 700 "${CONTAINER_SERVICE_DIR}"/:ssl-tools/assets/tool/*
-ln -sf "${CONTAINER_SERVICE_DIR}"/:ssl-tools/assets/tool/* /usr/sbin
diff --git a/image/service-available/:syslog-ng-core/download.sh b/image/service-available/:syslog-ng-core/download.sh
deleted file mode 100755
index 92bc3df3..00000000
--- a/image/service-available/:syslog-ng-core/download.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh -e
-
-# download syslog-ng-core from apt-get
-LC_ALL=C DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends syslog-ng-core
diff --git a/image/service-available/:syslog-ng-core/process.sh b/image/service-available/:syslog-ng-core/process.sh
deleted file mode 100755
index c842af4a..00000000
--- a/image/service-available/:syslog-ng-core/process.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh -e
-log-helper level eq trace && set -x
-
-PIDFILE="/var/run/syslog-ng.pid"
-SYSLOGNG_OPTS=""
-
-[ -r /etc/default/syslog-ng ] && . /etc/default/syslog-ng
-
-exec /usr/sbin/syslog-ng --pidfile "$PIDFILE" -F $SYSLOGNG_OPTS
diff --git a/image/tool/add-multiple-process-stack b/image/tool/add-multiple-process-stack
deleted file mode 100755
index 131d8a7f..00000000
--- a/image/tool/add-multiple-process-stack
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh -e
-echo "Install the multiple process stack: runit, syslog-ng-core, logrotate and cron"
-/container/tool/add-service-available :runit :syslog-ng-core :logrotate :cron
-touch /container/multiple_process_stack_added
diff --git a/image/tool/add-service-available b/image/tool/add-service-available
deleted file mode 100755
index 081f7c38..00000000
--- a/image/tool/add-service-available
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh -e
-
-# Usage :
-# RUN /container/tool/add-service-available [service1] [service2] ...
-
-SERVICE_DIR="/container/service"
-SERVICE_AVAILABLE_DIR="/container/service-available"
-DOWNLOAD_FILENAME="download.sh"
-
-for i in "$@"
-do
-
- echo "add-service-available: ${i}"
- if [ -d "${SERVICE_AVAILABLE_DIR}/${i}" ]; then
-
- if [ -f "${SERVICE_AVAILABLE_DIR}/${i}/${DOWNLOAD_FILENAME}" ]; then
- echo "run ${SERVICE_AVAILABLE_DIR}/${i}/${DOWNLOAD_FILENAME}"
- ${SERVICE_AVAILABLE_DIR}/"${i}"/"${DOWNLOAD_FILENAME}"
- echo "remove ${SERVICE_AVAILABLE_DIR}/${i}/${DOWNLOAD_FILENAME}"
- rm -f "${SERVICE_AVAILABLE_DIR}/${i}/${DOWNLOAD_FILENAME}"
- fi
-
- echo "move ${SERVICE_AVAILABLE_DIR}/${i} to ${SERVICE_DIR}/${i}"
- mv "${SERVICE_AVAILABLE_DIR}/${i}" "${SERVICE_DIR}/${i}"
-
- else
- echo "service-available: ${i} not found in ${SERVICE_AVAILABLE_DIR}/${i}"
- exit 1
- fi
-done
diff --git a/image/tool/complex-bash-env b/image/tool/complex-bash-env
deleted file mode 100755
index 829bcd1b..00000000
--- a/image/tool/complex-bash-env
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash -e
-
-call=$1
-
-function iterate() {
- local env_var_name=$1
- local env_var=${!env_var_name}
-
- if [ "$(complex-bash-env isTable "$env_var")" = true ]; then
- complex-bash-env stripTablePrefix "${env_var}"
- else
- echo "${env_var_name}"
- fi
-}
-
-function isTable() {
- local env_var=$1
- if [ "$(echo "${env_var}" | grep "#COMPLEX_BASH_ENV:TABLE:" -c )" -eq 1 ]; then
- echo true
- else
- echo false
- fi
-}
-
-function isRow() {
- local env_var=$1
- if [ "$(echo "${env_var}" | grep "#COMPLEX_BASH_ENV:ROW:" -c )" -eq 1 ]; then
- echo true
- else
- echo false
- fi
-}
-
-function getRowKey() {
- local env_var=$1
- local row_key_var_name
- row_key_var_name=$(complex-bash-env getRowKeyVarName "$env_var")
- echo "${!row_key_var_name}"
-}
-
-function getRowValue() {
- local env_var=$1
- local row_value_var_name
- row_value_var_name=$(complex-bash-env getRowValueVarName "$env_var")
- echo "${!row_value_var_name}"
-}
-
-function getRowKeyVarName() {
- local env_var=$1
- local row=($(complex-bash-env getRow "$env_var"))
- echo "${row[0]}"
-}
-
-function getRowValueVarName() {
- local env_var=$1
- local row=($(complex-bash-env getRow "$env_var"))
- echo "${row[1]}"
-}
-
-function getRow() {
- local env_var
- env_var=$1
- if [ "$(complex-bash-env isRow "$env_var")" = true ]; then
- local env_var
- env_var=$(complex-bash-env stripRowPrefix "$env_var")
- echo "${env_var}"
- else
- echo "$env_var is not a complex bash env row"
- exit 1
- fi
-}
-
-function stripTablePrefix() {
- local env_var=$1
- stripPrefix "$env_var" "#COMPLEX_BASH_ENV:TABLE:"
-}
-
-function stripRowPrefix() {
- local env_var=$1
- stripPrefix "$env_var" "#COMPLEX_BASH_ENV:ROW:"
-}
-
-function stripPrefix() {
- local env_var=$1
- local prefix=$2
- local r=${env_var#$prefix}
- echo "${r}"
-}
-
-shift
-$call "$@"
diff --git a/image/tool/install-service b/image/tool/install-service
deleted file mode 100755
index b8a57827..00000000
--- a/image/tool/install-service
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/python3 -u
-import os, os.path, subprocess
-
-SERVICE_DIR = "/container/service"
-INSTALL_FILENAME = "install.sh"
-PROCESS_FILENAME = "process.sh"
-nb_process = 0
-
-print("install-service")
-# Auto run global install script if available
-if os.path.isfile(SERVICE_DIR + os.sep + INSTALL_FILENAME):
- print(("run " + SERVICE_DIR + os.sep + INSTALL_FILENAME))
- subprocess.call([SERVICE_DIR + os.sep + INSTALL_FILENAME],shell=True)
-
- print(("remove " + SERVICE_DIR + os.sep + INSTALL_FILENAME + "\n"))
- os.remove(SERVICE_DIR + os.sep + INSTALL_FILENAME)
-
-# Process install script of services in /container/service
-for service in sorted(os.listdir(SERVICE_DIR)):
-
- if os.path.isfile(SERVICE_DIR + os.sep + service + os.sep + INSTALL_FILENAME):
- print(("run " + SERVICE_DIR + os.sep + service + os.sep + INSTALL_FILENAME))
- subprocess.call([SERVICE_DIR + os.sep + service + os.sep + INSTALL_FILENAME],shell=True)
-
- print(("remove " + SERVICE_DIR + os.sep + service + os.sep + INSTALL_FILENAME))
- os.remove(SERVICE_DIR + os.sep + service + os.sep + INSTALL_FILENAME)
-
- if os.path.isfile(SERVICE_DIR + os.sep + service + os.sep + PROCESS_FILENAME):
- nb_process += 1
-
-
-print((str(nb_process) + " process found."))
-
-# Multiple process image
-if nb_process > 1:
- if not os.path.exists("/container/multiple_process_stack_added"):
- print("This image has multiple process.")
- subprocess.call(["apt-get update"],shell=True)
- subprocess.call(["/container/tool/add-multiple-process-stack"],shell=True)
- print("For better image build process consider adding:")
- print("\"/container/tool/add-multiple-process-stack\" after an apt-get update in your Dockerfile.")
diff --git a/image/tool/log-helper b/image/tool/log-helper
deleted file mode 100755
index 61a52ea8..00000000
--- a/image/tool/log-helper
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/bin/bash -e
-
-# log helper base on environment variable CONTAINER_LOG_LEVEL
-# CONTAINER_LOG_LEVEL environment variable is set by run tool based on --log-level argument (info by default)
-# or you can set it directly with docker --env argument
-
-# Usage example: log-helper info CONTAINER_LOG_LEVEL is info or more
-# the message "CONTAINER_LOG_LEVEL is info or more" will be printed only if log level is info, debug or trace
-
-LOG_LEVEL_NONE=0
-LOG_LEVEL_ERROR=1
-LOG_LEVEL_WARNING=2
-LOG_LEVEL_INFO=3
-LOG_LEVEL_DEBUG=4
-LOG_LEVEL_TRACE=5
-
-# default log level if CONTAINER_LOG_LEVEL is not set -> info
-log_level=${CONTAINER_LOG_LEVEL:-${LOG_LEVEL_INFO}}
-
-call=$1 # function to call (error, warning, info, debug, trace, level)
-if [[ ! "$call" =~ ^(error|warning|info|debug|trace|level)$ ]]; then
- echo "Error: Function $call not found"
- echo "Allowed functions are: error, warning, info, debug, trace, level"
- echo "usage example: log-helper info hello !"
- exit 1
-fi
-
-
-echo_msg="" # message to print if required log level is set
-echo_param="" # echo command parameters
-
-function print_log(){
- local level_txt=$1
- local message=$2
- local date=$(date +"%Y-%m-%d %T")
-
- readarray -t messages <<<"$message"
-
- for m in "${messages[@]}"; do
- echo "*** ${level_txt} | ${date} | ${m}"
- done
-}
-
-function error() {
-
- # getEchoParams no matter what level it is to not break pipes
- getEchoParams $@
-
- if [ $log_level -ge 1 ]; then
- echo $echo_param "$(print_log " ERROR " "$echo_msg")"
- fi
-}
-
-function warning() {
-
- # getEchoParams no matter what level it is to not break pipes
- getEchoParams $@
-
- if [ $log_level -ge 2 ]; then
- echo $echo_param "$(print_log "WARNING" "$echo_msg")"
- fi
-}
-
-function info() {
-
- # getEchoParams no matter what level it is to not break pipes
- getEchoParams $@
-
- if [ $log_level -ge 3 ]; then
- echo $echo_param "$(print_log " INFO " "$echo_msg")"
- fi
-}
-
-function debug() {
-
- # getEchoParams no matter what level it is to not break pipes
- getEchoParams $@
-
- if [ $log_level -ge 4 ]; then
- echo $echo_param "$(print_log " DEBUG " "$echo_msg")"
- fi
-}
-
-function trace() {
-
- # getEchoParams no matter what level it is to not break pipes
- getEchoParams $@
-
- if [ $log_level -ge 5 ]; then
- echo $echo_param "$(print_log " TRACE " "$echo_msg")"
- fi
-}
-
-function getMsgFromStdin() {
- if [ -z "$2" ]; then
- echo_msg=$(cat)
- fi
-}
-
-function getEchoParams() {
-
- echo_msg="$@"
-
- if [[ "$1" =~ ^(-e|-n|-E)$ ]]; then
- echo_param=$1
- echo_msg=${echo_msg#$1 }
- fi
-
- # read from pipe if echo_msg is empty
- [[ -n "$echo_msg" ]] || getMsgFromStdin
-}
-
-function level() {
-
- local operator=$1
- local loglevel_str=$2
- local loglevel_str=${loglevel_str^^} # uppercase
-
- if [[ ! "$operator" =~ ^(eq|ne|gt|ge|lt|le)$ ]]; then
- echo "Error: Operator $operator not allowed"
- echo "Allowed operators are: eq, ne, gt, ge, lt, le"
- echo "Help: http://www.tldp.org/LDP/abs/html/comparison-ops.html"
- exit 1
- fi
-
- if [ -z "$loglevel_str" ]; then
- echo "Error: No log level provided"
- echo "Allowed log level are: none, error, warning, info, debug, trace"
- echo "usage example: log-helper level eq info"
- exit 1
- fi
-
- local log_level_var=LOG_LEVEL_$loglevel_str
-
- if [ $log_level -$operator ${!log_level_var} ]; then
- exit 0
- else
- exit 1
- fi
-}
-
-shift
-$call "$@"
diff --git a/image/tool/run b/image/tool/run
deleted file mode 100755
index 1f04a553..00000000
--- a/image/tool/run
+++ /dev/null
@@ -1,936 +0,0 @@
-#!/usr/bin/python3 -u
-# -*- coding: utf-8 -*-
-
-import os, os.path, sys, stat, signal, errno, argparse, time, json, re, yaml, ast, socket, shutil, pwd, grp
-from datetime import datetime
-
-KILL_PROCESS_TIMEOUT = int(os.environ.get('KILL_PROCESS_TIMEOUT', 30))
-KILL_ALL_PROCESSES_TIMEOUT = int(os.environ.get('KILL_ALL_PROCESSES_TIMEOUT', 30))
-
-LOG_LEVEL_NONE = 0
-LOG_LEVEL_ERROR = 1
-LOG_LEVEL_WARNING = 2
-LOG_LEVEL_INFO = 3
-LOG_LEVEL_DEBUG = 4
-LOG_LEVEL_TRACE = 5
-
-SHENV_NAME_WHITELIST_REGEX = re.compile('\W')
-
-log_level = None
-
-environ_backup = dict(os.environ)
-terminated_child_processes = {}
-
-IMPORT_STARTUP_FILENAME="startup.sh"
-IMPORT_PROCESS_FILENAME="process.sh"
-IMPORT_FINISH_FILENAME="finish.sh"
-
-IMPORT_ENVIRONMENT_DIR="/container/environment"
-IMPORT_FIRST_STARTUP_ENVIRONMENT_DIR="/container/environment/startup"
-
-ENV_FILES_YAML_EXTENSIONS = ('.yaml', '.startup.yaml')
-ENV_FILES_JSON_EXTENSIONS = ('.json', '.startup.json')
-ENV_FILES_STARTUP_EXTENSIONS = ('.startup.yaml', '.startup.json')
-
-IMPORT_SERVICE_DIR="/container/service"
-
-RUN_DIR="/container/run"
-RUN_STATE_DIR = RUN_DIR + "/state"
-RUN_ENVIRONMENT_DIR = RUN_DIR + "/environment"
-RUN_ENVIRONMENT_FILE_EXPORT = RUN_DIR + "/environment.sh"
-RUN_STARTUP_DIR = RUN_DIR + "/startup"
-RUN_STARTUP_FINAL_FILE = RUN_DIR + "/startup.sh"
-RUN_PROCESS_DIR = RUN_DIR + "/process"
-RUN_SERVICE_DIR = RUN_DIR + "/service"
-
-ENVIRONMENT_LOG_LEVEL_KEY = 'CONTAINER_LOG_LEVEL'
-ENVIRONMENT_SERVICE_DIR_KEY = 'CONTAINER_SERVICE_DIR'
-ENVIRONMENT_STATE_DIR_KEY = 'CONTAINER_STATE_DIR'
-
-class AlarmException(Exception):
- pass
-
-def write_log(level, message):
- now = datetime.now()
- for line in message.splitlines():
- sys.stderr.write("*** %s | %s | %s\n" % (level, now.strftime("%Y-%m-%d %H:%M:%S"), line))
-
-def error(message):
- if log_level >= LOG_LEVEL_ERROR:
- write_log(" ERROR ", message)
-
-def warning(message):
- if log_level >= LOG_LEVEL_WARNING:
- write_log("WARNING", message)
-
-def info(message):
- if log_level >= LOG_LEVEL_INFO:
- write_log(" INFO ", message)
-
-def debug(message):
- if log_level >= LOG_LEVEL_DEBUG:
- write_log(" DEBUG ", message)
-
-def trace(message):
- if log_level >= LOG_LEVEL_TRACE:
- write_log(" TRACE ", message)
-
-def debug_env_dump():
- debug("------------ Environment dump ------------")
- for name, value in list(os.environ.items()):
- debug(name + " = " + value)
- debug("------------------------------------------")
-
-def ignore_signals_and_raise_keyboard_interrupt(signame):
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- raise KeyboardInterrupt(signame)
-
-def raise_alarm_exception():
- raise AlarmException('Alarm')
-
-def listdir(path):
- try:
- result = os.stat(path)
- except OSError:
- return []
- if stat.S_ISDIR(result.st_mode):
- return sorted(os.listdir(path))
- else:
- return []
-
-def is_exe(path):
- try:
- return os.path.isfile(path) and os.access(path, os.X_OK)
- except OSError:
- return False
-
-def xstr(s):
- if s is None:
- return ''
- return str(s)
-
-def set_env_hostname_to_etc_hosts():
- try:
- if "HOSTNAME" in os.environ:
- socket_hostname = socket.gethostname()
-
- if os.environ["HOSTNAME"] != socket_hostname:
- ip_address = socket.gethostbyname(socket_hostname)
- with open("/etc/hosts", "a") as myfile:
- myfile.write(ip_address+" "+os.environ["HOSTNAME"]+"\n")
- except:
- warning("set_env_hostname_to_etc_hosts: failed at some point...")
-
-def python_dict_to_bash_envvar(name, python_dict):
-
- for value in python_dict:
- python_to_bash_envvar(name+"_KEY", value)
- python_to_bash_envvar(name+"_VALUE", python_dict.get(value))
-
- values = "#COMPLEX_BASH_ENV:ROW: "+name+"_KEY "+name+"_VALUE"
- os.environ[name] = xstr(values)
- trace("python2bash : set : " + name + " = "+ os.environ[name])
-
-def python_list_to_bash_envvar(name, python_list):
-
- values="#COMPLEX_BASH_ENV:TABLE:"
-
- i=1
- for value in python_list:
- child_name = name + "_ROW_" + str(i)
- values += " " + child_name
- python_to_bash_envvar(child_name, value)
- i = i +1
-
- os.environ[name] = xstr(values)
- trace("python2bash : set : " + name + " = "+ os.environ[name])
-
-def python_to_bash_envvar(name, value):
-
- try:
- value = ast.literal_eval(value)
- except:
- pass
-
- if isinstance(value, list):
- python_list_to_bash_envvar(name,value)
-
- elif isinstance(value, dict):
- python_dict_to_bash_envvar(name,value)
-
- else:
- os.environ[name] = xstr(value)
- trace("python2bash : set : " + name + " = "+ os.environ[name])
-
-def decode_python_envvars():
- _environ = dict(os.environ)
- for name, value in list(_environ.items()):
- if value.startswith("#PYTHON2BASH:") :
- value = value.replace("#PYTHON2BASH:","",1)
- python_to_bash_envvar(name, value)
-
-def decode_json_envvars():
- _environ = dict(os.environ)
- for name, value in list(_environ.items()):
- if value.startswith("#JSON2BASH:") :
- value = value.replace("#JSON2BASH:","",1)
- try:
- value = json.loads(value)
- python_to_bash_envvar(name,value)
- except:
- os.environ[name] = xstr(value)
- warning("failed to parse : " + xstr(value))
- trace("set : " + name + " = "+ os.environ[name])
-
-def decode_envvars():
- decode_json_envvars()
- decode_python_envvars()
-
-def generic_import_envvars(path, override_existing_environment):
- if not os.path.exists(path):
- trace("generic_import_envvars "+ path+ " don't exists")
- return
- new_env = {}
- for envfile in listdir(path):
- filePath = path + os.sep + envfile
- if os.path.isfile(filePath) and "." not in envfile:
- name = os.path.basename(envfile)
- with open(filePath, "r") as f:
- # Text files often end with a trailing newline, which we
- # don't want to include in the env variable value. See
- # https://github.com/phusion/baseimage-docker/pull/49
- value = re.sub('\n\Z', '', f.read())
- new_env[name] = value
- trace("import " + name + " from " + filePath)
-
- for name, value in list(new_env.items()):
- if override_existing_environment or name not in os.environ:
- os.environ[name] = value
- trace("set : " + name + " = "+ os.environ[name])
- else:
- debug("ignore : " + name + " = " + xstr(value) + " (keep " + name + " = " + os.environ[name] + " )")
-
-def import_run_envvars():
- clear_environ()
- generic_import_envvars(RUN_ENVIRONMENT_DIR, True)
-
-def import_envvars():
- generic_import_envvars(IMPORT_ENVIRONMENT_DIR, False)
- generic_import_envvars(IMPORT_FIRST_STARTUP_ENVIRONMENT_DIR, False)
-
-def export_run_envvars(to_dir = True):
- if to_dir and not os.path.exists(RUN_ENVIRONMENT_DIR):
- warning("export_run_envvars: "+RUN_ENVIRONMENT_DIR+" don't exists")
- return
- shell_dump = ""
- for name, value in list(os.environ.items()):
- if name in ['USER', 'GROUP', 'UID', 'GID', 'SHELL']:
- continue
- if to_dir:
- with open(RUN_ENVIRONMENT_DIR + os.sep + name, "w") as f:
- f.write(value)
- trace("export " + name + " to " + RUN_ENVIRONMENT_DIR + os.sep + name)
- shell_dump += "export " + sanitize_shenvname(name) + "=" + shquote(value) + "\n"
-
- with open(RUN_ENVIRONMENT_FILE_EXPORT, "w") as f:
- f.write(shell_dump)
- trace("export "+RUN_ENVIRONMENT_FILE_EXPORT)
-
-def create_run_envvars():
- set_dir_env()
- set_log_level_env()
- import_envvars()
- import_env_files()
- decode_envvars()
- export_run_envvars()
-
-def clear_run_envvars():
- try:
- shutil.rmtree(RUN_ENVIRONMENT_DIR)
- os.makedirs(RUN_ENVIRONMENT_DIR)
- os.chmod(RUN_ENVIRONMENT_DIR, 700)
- except:
- warning("clear_run_envvars: failed at some point...")
-
-def print_env_files_order(file_extensions):
-
- if not os.path.exists(IMPORT_ENVIRONMENT_DIR):
- warning("print_env_files_order "+IMPORT_ENVIRONMENT_DIR+" don't exists")
- return
-
- to_print = 'Caution: previously defined variables will not be overriden.\n'
-
- file_found = False
- for subdir, _, files in sorted(os.walk(IMPORT_ENVIRONMENT_DIR)):
- for file in files:
- filepath = subdir + os.sep + file
- if filepath.endswith(file_extensions):
- file_found = True
- filepath = subdir + os.sep + file
- to_print += filepath + '\n'
-
- if file_found:
- if log_level < LOG_LEVEL_DEBUG:
- to_print+='\nTo see how this files are processed and environment variables values,\n'
- to_print+='run this container with \'--loglevel debug\''
-
- info('Environment files will be proccessed in this order : \n' + to_print)
-
-def import_env_files():
-
- if not os.path.exists(IMPORT_ENVIRONMENT_DIR):
- warning("import_env_files: "+IMPORT_ENVIRONMENT_DIR+" don't exists")
- return
-
- file_extensions = ENV_FILES_YAML_EXTENSIONS + ENV_FILES_JSON_EXTENSIONS
- print_env_files_order(file_extensions)
-
- for subdir, _, files in sorted(os.walk(IMPORT_ENVIRONMENT_DIR)):
- for file in files:
- if file.endswith(file_extensions):
- filepath = subdir + os.sep + file
-
- try:
- with open(filepath, "r") as f:
-
- debug("process environment file : " + filepath)
-
- if file.endswith(ENV_FILES_YAML_EXTENSIONS):
- env_vars = yaml.load(f)
-
- elif file.endswith(ENV_FILES_JSON_EXTENSIONS):
- env_vars = json.load(f)
-
- for name, value in list(env_vars.items()):
- if not name in os.environ:
- if isinstance(value, list) or isinstance(value, dict):
- os.environ[name] = '#PYTHON2BASH:' + xstr(value)
- else:
- os.environ[name] = xstr(value)
- trace("set : " + name + " = "+ os.environ[name])
- else:
- debug("ignore : " + name + " = " + xstr(value) + " (keep " + name + " = " + os.environ[name] + " )")
- except:
- warning('failed to parse: ' + filepath)
-
-def remove_startup_env_files():
-
- if os.path.isdir(IMPORT_FIRST_STARTUP_ENVIRONMENT_DIR):
- try:
- shutil.rmtree(IMPORT_FIRST_STARTUP_ENVIRONMENT_DIR)
- except:
- warning("remove_startup_env_files: failed to remove "+IMPORT_FIRST_STARTUP_ENVIRONMENT_DIR)
-
- if not os.path.exists(IMPORT_ENVIRONMENT_DIR):
- warning("remove_startup_env_files: "+IMPORT_ENVIRONMENT_DIR+" don't exists")
- return
-
- for subdir, _, files in sorted(os.walk(IMPORT_ENVIRONMENT_DIR)):
- for file in files:
- filepath = subdir + os.sep + file
- if filepath.endswith(ENV_FILES_STARTUP_EXTENSIONS):
- try:
- os.remove(filepath)
- info("Remove file "+filepath)
- except:
- warning("remove_startup_env_files: failed to remove "+filepath)
-
-def restore_environ():
- clear_environ()
- trace("Restore initial environment")
- os.environ.update(environ_backup)
-
-def clear_environ():
- trace("Clear existing environment")
- os.environ.clear()
-
-def set_startup_scripts_env():
- debug("Set environment for startup files")
- clear_run_envvars() # clear previous environment
- create_run_envvars() # create run envvars with all env files
-
-def set_process_env(keep_startup_env = False):
- debug("Set environment for container process")
- if not keep_startup_env:
- remove_startup_env_files()
- clear_run_envvars()
-
- restore_environ()
- create_run_envvars() # recreate env var without startup env files
-
-def setup_run_directories(args):
-
- directories = (RUN_PROCESS_DIR, RUN_STARTUP_DIR, RUN_STATE_DIR, RUN_ENVIRONMENT_DIR)
- for directory in directories:
- if not os.path.exists(directory):
- os.makedirs(directory)
-
- if directory == RUN_ENVIRONMENT_DIR:
- os.chmod(directory, 700)
-
- if not os.path.exists(RUN_ENVIRONMENT_FILE_EXPORT):
- open(RUN_ENVIRONMENT_FILE_EXPORT, 'a').close()
- os.chmod(RUN_ENVIRONMENT_FILE_EXPORT, 640)
- uid = pwd.getpwnam("root").pw_uid
- gid = grp.getgrnam("docker_env").gr_gid
- os.chown(RUN_ENVIRONMENT_FILE_EXPORT, uid, gid)
-
- if state_is_first_start():
-
- if args.copy_service:
- copy_service_to_run_dir()
-
- set_dir_env()
-
- base_path = os.environ[ENVIRONMENT_SERVICE_DIR_KEY]
- nb_service = len(listdir(base_path))
-
- if nb_service > 0 :
- info("Search service in " + ENVIRONMENT_SERVICE_DIR_KEY + " = "+base_path+" :")
- for d in listdir(base_path):
- d_path = base_path + os.sep + d
- if os.path.isdir(d_path):
- if is_exe(d_path + os.sep + IMPORT_STARTUP_FILENAME):
- info('link ' + d_path + os.sep + IMPORT_STARTUP_FILENAME + ' to ' + RUN_STARTUP_DIR + os.sep + d)
- try:
- os.symlink(d_path + os.sep + IMPORT_STARTUP_FILENAME, RUN_STARTUP_DIR + os.sep + d)
- except OSError as detail:
- warning('failed to link ' + d_path + os.sep + IMPORT_STARTUP_FILENAME + ' to ' + RUN_STARTUP_DIR + os.sep + d + ': ' + xstr(detail))
-
- if is_exe(d_path + os.sep + IMPORT_PROCESS_FILENAME):
- info('link ' + d_path + os.sep + IMPORT_PROCESS_FILENAME + ' to ' + RUN_PROCESS_DIR + os.sep + d + os.sep + 'run')
-
- if not os.path.exists(RUN_PROCESS_DIR + os.sep + d):
- os.makedirs(RUN_PROCESS_DIR + os.sep + d)
- else:
- warning('directory ' + RUN_PROCESS_DIR + os.sep + d + ' already exists')
-
- try:
- os.symlink(d_path + os.sep + IMPORT_PROCESS_FILENAME, RUN_PROCESS_DIR + os.sep + d + os.sep + 'run')
- except OSError as detail:
- warning('failed to link ' + d_path + os.sep + IMPORT_PROCESS_FILENAME + ' to ' + RUN_PROCESS_DIR + os.sep + d + os.sep + 'run : ' + xstr(detail))
-
- if not args.skip_finish_files and is_exe(d_path + os.sep + IMPORT_FINISH_FILENAME):
- info('link ' + d_path + os.sep + IMPORT_FINISH_FILENAME + ' to ' + RUN_PROCESS_DIR + os.sep + d + os.sep + 'finish')
-
- if not os.path.exists(RUN_PROCESS_DIR + os.sep + d):
- os.makedirs(RUN_PROCESS_DIR + os.sep + d)
-
- try:
- os.symlink(d_path + os.sep + IMPORT_FINISH_FILENAME, RUN_PROCESS_DIR + os.sep + d + os.sep + 'finish')
- except OSError as detail:
- warning('failed to link ' + d_path + os.sep + IMPORT_FINISH_FILENAME + ' to ' + RUN_PROCESS_DIR + os.sep + d + os.sep + 'finish : ' + xstr(detail))
-
-def set_dir_env():
- if state_is_service_copied_to_run_dir():
- os.environ[ENVIRONMENT_SERVICE_DIR_KEY] = RUN_SERVICE_DIR
- else:
- os.environ[ENVIRONMENT_SERVICE_DIR_KEY] = IMPORT_SERVICE_DIR
- trace("set : " + ENVIRONMENT_SERVICE_DIR_KEY + " = " + os.environ[ENVIRONMENT_SERVICE_DIR_KEY])
-
- os.environ[ENVIRONMENT_STATE_DIR_KEY] = RUN_STATE_DIR
- trace("set : " + ENVIRONMENT_STATE_DIR_KEY + " = " + os.environ[ENVIRONMENT_STATE_DIR_KEY])
-
-def set_log_level_env():
- os.environ[ENVIRONMENT_LOG_LEVEL_KEY] = xstr(log_level)
- trace("set : "+ENVIRONMENT_LOG_LEVEL_KEY+" = " + os.environ[ENVIRONMENT_LOG_LEVEL_KEY])
-
-def copy_service_to_run_dir():
-
- if os.path.exists(RUN_SERVICE_DIR):
- warning("Copy "+IMPORT_SERVICE_DIR+" to "+RUN_SERVICE_DIR + " ignored")
- warning(RUN_SERVICE_DIR + " already exists")
- return
-
- info("Copy "+IMPORT_SERVICE_DIR+" to "+RUN_SERVICE_DIR)
-
- try:
- shutil.copytree(IMPORT_SERVICE_DIR, RUN_SERVICE_DIR)
- except shutil.Error as e:
- warning(e)
-
- state_set_service_copied_to_run_dir()
-
-def state_set_service_copied_to_run_dir():
- open(RUN_STATE_DIR+"/service-copied-to-run-dir", 'a').close()
-
-def state_is_service_copied_to_run_dir():
- return os.path.exists(RUN_STATE_DIR+'/service-copied-to-run-dir')
-
-def state_set_first_startup_done():
- open(RUN_STATE_DIR+"/first-startup-done", 'a').close()
-
-def state_is_first_start():
- return os.path.exists(RUN_STATE_DIR+'/first-startup-done') == False
-
-def state_set_startup_done():
- open(RUN_STATE_DIR+"/startup-done", 'a').close()
-
-def state_reset_startup_done():
- try:
- os.remove(RUN_STATE_DIR+"/startup-done")
- except OSError:
- pass
-
-def is_multiple_process_container():
- return len(listdir(RUN_PROCESS_DIR)) > 1
-
-def is_single_process_container():
- return len(listdir(RUN_PROCESS_DIR)) == 1
-
-def get_container_process():
- for p in listdir(RUN_PROCESS_DIR):
- return RUN_PROCESS_DIR + os.sep + p + os.sep + 'run'
-
-def is_runit_installed():
- return os.path.exists('/usr/bin/sv')
-
-_find_unsafe = re.compile(r'[^\w@%+=:,./-]').search
-
-def shquote(s):
- """Return a shell-escaped version of the string *s*."""
- if not s:
- return "''"
- if _find_unsafe(s) is None:
- return s
-
- # use single quotes, and put single quotes into double quotes
- # the string $'b is then quoted as '$'"'"'b'
- return "'" + s.replace("'", "'\"'\"'") + "'"
-
-def sanitize_shenvname(s):
- return re.sub(SHENV_NAME_WHITELIST_REGEX, "_", s)
-
-# Waits for the child process with the given PID, while at the same time
-# reaping any other child processes that have exited (e.g. adopted child
-# processes that have terminated).
-def waitpid_reap_other_children(pid):
- global terminated_child_processes
-
- status = terminated_child_processes.get(pid)
- if status:
- # A previous call to waitpid_reap_other_children(),
- # with an argument not equal to the current argument,
- # already waited for this process. Return the status
- # that was obtained back then.
- del terminated_child_processes[pid]
- return status
-
- done = False
- status = None
- while not done:
- try:
- # https://github.com/phusion/baseimage-docker/issues/151#issuecomment-92660569
- this_pid, status = os.waitpid(pid, os.WNOHANG)
- if this_pid == 0:
- this_pid, status = os.waitpid(-1, 0)
- if this_pid == pid:
- done = True
- else:
- # Save status for later.
- terminated_child_processes[this_pid] = status
- except OSError as e:
- if e.errno == errno.ECHILD or e.errno == errno.ESRCH:
- return None
- else:
- raise
- return status
-
-def stop_child_process(name, pid, signo = signal.SIGTERM, time_limit = KILL_PROCESS_TIMEOUT):
- info("Shutting down %s (PID %d)..." % (name, pid))
- try:
- os.kill(pid, signo)
- except OSError:
- pass
- signal.alarm(time_limit)
- try:
- try:
- waitpid_reap_other_children(pid)
- except OSError:
- pass
- except AlarmException:
- warning("%s (PID %d) did not shut down in time. Forcing it to exit." % (name, pid))
- try:
- os.kill(pid, signal.SIGKILL)
- except OSError:
- pass
- try:
- waitpid_reap_other_children(pid)
- except OSError:
- pass
- finally:
- signal.alarm(0)
-
-def run_command_killable(command):
- status = None
- debug_env_dump()
- pid = os.spawnvp(os.P_NOWAIT, command[0], command)
- try:
- status = waitpid_reap_other_children(pid)
- except BaseException:
- warning("An error occurred. Aborting.")
- stop_child_process(command[0], pid)
- raise
- if status != 0:
- if status is None:
- error("%s exited with unknown status\n" % command[0])
- else:
- error("%s failed with status %d\n" % (command[0], os.WEXITSTATUS(status)))
- sys.exit(1)
-
-def run_command_killable_and_import_run_envvars(command):
- run_command_killable(command)
- import_run_envvars()
- export_run_envvars(False)
-
-def kill_all_processes(time_limit):
- info("Killing all processes...")
- try:
- os.kill(-1, signal.SIGTERM)
- except OSError:
- pass
- signal.alarm(time_limit)
- try:
- # Wait until no more child processes exist.
- done = False
- while not done:
- try:
- os.waitpid(-1, 0)
- except OSError as e:
- if e.errno == errno.ECHILD:
- done = True
- else:
- raise
- except AlarmException:
- warning("Not all processes have exited in time. Forcing them to exit.")
- try:
- os.kill(-1, signal.SIGKILL)
- except OSError:
- pass
- finally:
- signal.alarm(0)
-
-def container_had_startup_script():
- return (len(listdir(RUN_STARTUP_DIR)) > 0 or is_exe(RUN_STARTUP_FINAL_FILE))
-
-def run_startup_files(args):
-
- # Run /container/run/startup/*
- for name in listdir(RUN_STARTUP_DIR):
- filename = RUN_STARTUP_DIR + os.sep + name
- if is_exe(filename):
- info("Running %s..." % filename)
- run_command_killable_and_import_run_envvars([filename])
-
- # Run /container/run/startup.sh.
- if is_exe(RUN_STARTUP_FINAL_FILE):
- info("Running "+RUN_STARTUP_FINAL_FILE+"...")
- run_command_killable_and_import_run_envvars([RUN_STARTUP_FINAL_FILE])
-
-def wait_for_process_or_interrupt(pid):
- status = waitpid_reap_other_children(pid)
- return (True, status)
-
-def run_process(args, background_process_name, background_process_command):
- background_process_pid = run_background_process(background_process_name,background_process_command)
- background_process_exited = False
- exit_status = None
-
- if len(args.main_command) == 0:
- background_process_exited, exit_status = wait_background_process(background_process_name, background_process_pid)
- else:
- exit_status = run_foreground_process(args.main_command)
-
- return background_process_pid, background_process_exited, exit_status
-
-def run_background_process(name, command):
- info("Running "+ name +"...")
- pid = os.spawnvp(os.P_NOWAIT, command[0], command)
- debug("%s started as PID %d" % (name, pid))
- return pid
-
-def wait_background_process(name, pid):
- exit_code = None
- exit_status = None
- process_exited = False
-
- process_exited, exit_code = wait_for_process_or_interrupt(pid)
- if process_exited:
- if exit_code is None:
- info(name + " exited with unknown status")
- exit_status = 1
- else:
- exit_status = os.WEXITSTATUS(exit_code)
- info("%s exited with status %d" % (name, exit_status))
- return (process_exited, exit_status)
-
-def run_foreground_process(command):
- exit_code = None
- exit_status = None
-
- info("Running %s..." % " ".join(command))
- pid = os.spawnvp(os.P_NOWAIT, command[0], command)
- try:
- exit_code = waitpid_reap_other_children(pid)
- if exit_code is None:
- info("%s exited with unknown status." % command[0])
- exit_status = 1
- else:
- exit_status = os.WEXITSTATUS(exit_code)
- info("%s exited with status %d." % (command[0], exit_status))
- except KeyboardInterrupt:
- stop_child_process(command[0], pid)
- raise
- except BaseException:
- error("An error occurred. Aborting.")
- stop_child_process(command[0], pid)
- raise
-
- return exit_status
-
-def shutdown_runit_services():
- debug("Begin shutting down runit services...")
- os.system("/usr/bin/sv -w %d force-stop %s/* > /dev/null" % (KILL_PROCESS_TIMEOUT, RUN_PROCESS_DIR))
-
-def wait_for_runit_services():
- debug("Waiting for runit services to exit...")
- done = False
- while not done:
- done = os.system("/usr/bin/sv status "+RUN_PROCESS_DIR+"/* | grep -q '^run:'") != 0
- if not done:
- time.sleep(0.1)
- shutdown_runit_services()
-
-def run_multiple_process_container(args):
- if not is_runit_installed():
- error("Error: runit is not installed and this is a multiple process container.")
- return
-
- background_process_exited=False
- background_process_pid=None
-
- try:
- runit_command=["/usr/bin/runsvdir", "-P", RUN_PROCESS_DIR]
- background_process_pid, background_process_exited, exit_status = run_process(args, "runit daemon", runit_command)
-
- sys.exit(exit_status)
- finally:
- shutdown_runit_services()
- if not background_process_exited:
- stop_child_process("runit daemon", background_process_pid)
- wait_for_runit_services()
-
-def run_single_process_container(args):
- background_process_exited=False
- background_process_pid=None
-
- try:
- container_process=get_container_process()
- background_process_pid, background_process_exited, exit_status = run_process(args, container_process, [container_process])
-
- sys.exit(exit_status)
- finally:
- if not background_process_exited:
- stop_child_process(container_process, background_process_pid)
-
-def run_no_process_container(args):
- if len(args.main_command) == 0:
- args.main_command=['bash'] # run bash by default
-
- exit_status = run_foreground_process(args.main_command)
- sys.exit(exit_status)
-
-def run_finish_files():
-
- # iterate process dir to find finish files
- for name in listdir(RUN_PROCESS_DIR):
- filename = RUN_PROCESS_DIR + os.sep + name + os.sep + "finish"
- if is_exe(filename):
- info("Running %s..." % filename)
- run_command_killable_and_import_run_envvars([filename])
-
-def wait_states(states):
- for state in states:
- filename = RUN_STATE_DIR + os.sep + state
- info("Wait state: " + state)
-
- while not os.path.exists(filename):
- time.sleep(0.1)
- debug("Check file " + filename)
- pass
- debug("Check file " + filename + " [Ok]")
-
-def run_cmds(args, when):
- debug("Run commands before " + when + "...")
- if len(args.cmds) > 0:
-
- for cmd in args.cmds:
- if (len(cmd) > 1 and cmd[1] == when) or (len(cmd) == 1 and when == "startup"):
- info("Running '"+cmd[0]+"'...")
- run_command_killable_and_import_run_envvars(cmd[0].split())
-
-def main(args):
-
- info(ENVIRONMENT_LOG_LEVEL_KEY + " = " + xstr(log_level) + " (" + log_level_switcher_inv.get(log_level) + ")")
- state_reset_startup_done()
-
- if args.set_env_hostname_to_etc_hosts:
- set_env_hostname_to_etc_hosts()
-
- wait_states(args.wait_states)
- setup_run_directories(args)
-
- if not args.skip_env_files:
- set_startup_scripts_env()
-
- run_cmds(args,"startup")
-
- if not args.skip_startup_files and container_had_startup_script():
- run_startup_files(args)
-
- state_set_startup_done()
- state_set_first_startup_done()
-
- if not args.skip_env_files:
- set_process_env(args.keep_startup_env)
-
- run_cmds(args,"process")
-
- debug_env_dump()
-
- if is_single_process_container() and not args.skip_process_files:
- run_single_process_container(args)
-
- elif is_multiple_process_container() and not args.skip_process_files:
- run_multiple_process_container(args)
-
- else:
- run_no_process_container(args)
-
-# Parse options.
-parser = argparse.ArgumentParser(description = 'Initialize the system.', epilog='Osixia! Light Baseimage: https://github.com/osixia/docker-light-baseimage')
-parser.add_argument('main_command', metavar = 'MAIN_COMMAND', type = str, nargs = '*',
- help = 'The main command to run, leave empty to only run container process.')
-parser.add_argument('-e', '--skip-env-files', dest = 'skip_env_files',
- action = 'store_const', const = True, default = False,
- help = 'Skip getting environment values from environment file(s).')
-parser.add_argument('-s', '--skip-startup-files', dest = 'skip_startup_files',
- action = 'store_const', const = True, default = False,
- help = 'Skip running '+RUN_STARTUP_DIR+'/* and '+RUN_STARTUP_FINAL_FILE + ' file(s).')
-parser.add_argument('-p', '--skip-process-files', dest = 'skip_process_files',
- action = 'store_const', const = True, default = False,
- help = 'Skip running container process file(s).')
-parser.add_argument('-f', '--skip-finish-files', dest = 'skip_finish_files',
- action = 'store_const', const = True, default = False,
- help = 'Skip running container finish file(s).')
-parser.add_argument('-o', '--run-only', type=str, choices=["startup","process","finish"], dest = 'run_only', default = None,
- help = 'Run only this file type and ignore others.')
-parser.add_argument('-c', '--cmd', metavar=('COMMAND', 'WHEN={startup,process,finish}'), dest = 'cmds', type = str,
- action = 'append', default = [], nargs = "+",
- help = 'Run COMMAND before WHEN file(s). Default before startup file(s).')
-parser.add_argument('-k', '--no-kill-all-on-exit', dest = 'kill_all_on_exit',
- action = 'store_const', const = False, default = True,
- help = 'Don\'t kill all processes on the system upon exiting.')
-parser.add_argument('--wait-state', metavar = 'FILENAME', dest = 'wait_states', type = str,
- action = 'append', default=[],
- help = 'Wait until the container FILENAME file exists in '+RUN_STATE_DIR+' directory before starting. Usefull when 2 containers share '+RUN_DIR+' directory via volume.')
-parser.add_argument('--wait-first-startup', dest = 'wait_first_startup',
- action = 'store_const', const = True, default = False,
- help = 'Wait until the first startup is done before starting. Usefull when 2 containers share '+RUN_DIR+' directory via volume.')
-parser.add_argument('--keep-startup-env', dest = 'keep_startup_env',
- action = 'store_const', const = True, default = False,
- help = 'Don\'t remove ' + xstr(ENV_FILES_STARTUP_EXTENSIONS) + ' environment files after startup scripts.')
-parser.add_argument('--copy-service', dest = 'copy_service',
- action = 'store_const', const = True, default = False,
- help = 'Copy '+IMPORT_SERVICE_DIR+' to '+RUN_SERVICE_DIR+'. Help to fix docker mounted files problems.')
-parser.add_argument('--dont-touch-etc-hosts', dest = 'set_env_hostname_to_etc_hosts',
- action = 'store_const', const = False, default = True,
- help = 'Don\'t add in /etc/hosts a line with the container ip and $HOSTNAME environment variable value.')
-parser.add_argument('--keepalive', dest = 'keepalive',
- action = 'store_const', const = True, default = False,
- help = 'Keep alive container if all startup files and process exited without error.')
-parser.add_argument('--keepalive-force', dest = 'keepalive_force',
- action = 'store_const', const = True, default = False,
- help = 'Keep alive container in all circonstancies.')
-parser.add_argument('-l', '--loglevel', type=str, choices=["none","error","warning","info","debug","trace"], dest = 'log_level', default = "info",
- help = 'Log level (default: info)')
-
-args = parser.parse_args()
-
-log_level_switcher = {"none": LOG_LEVEL_NONE,"error": LOG_LEVEL_ERROR,"warning": LOG_LEVEL_WARNING,"info": LOG_LEVEL_INFO,"debug": LOG_LEVEL_DEBUG, "trace": LOG_LEVEL_TRACE}
-log_level_switcher_inv = {LOG_LEVEL_NONE: "none",LOG_LEVEL_ERROR:"error",LOG_LEVEL_WARNING:"warning",LOG_LEVEL_INFO:"info",LOG_LEVEL_DEBUG:"debug",LOG_LEVEL_TRACE:"trace"}
-log_level = log_level_switcher.get(args.log_level)
-
-# Run only arg
-if args.run_only != None:
- if args.run_only == "startup" and args.skip_startup_files:
- error("Error: When '--run-only startup' is set '--skip-startup-files' can't be set.")
- sys.exit(1)
- elif args.run_only == "process" and args.skip_startup_files:
- error("Error: When '--run-only process' is set '--skip-process-files' can't be set.")
- sys.exit(1)
- elif args.run_only == "finish" and args.skip_startup_files:
- error("Error: When '--run-only finish' is set '--skip-finish-files' can't be set.")
- sys.exit(1)
-
- if args.run_only == "startup":
- args.skip_process_files = True
- args.skip_finish_files = True
- elif args.run_only == "process":
- args.skip_startup_files = True
- args.skip_finish_files = True
- elif args.run_only == "finish":
- args.skip_startup_files = True
- args.skip_process_files = True
-
-# wait for startup args
-if args.wait_first_startup:
- args.wait_states.insert(0, 'first-startup-done')
-
-# Run main function.
-signal.signal(signal.SIGTERM, lambda signum, frame: ignore_signals_and_raise_keyboard_interrupt('SIGTERM'))
-signal.signal(signal.SIGINT, lambda signum, frame: ignore_signals_and_raise_keyboard_interrupt('SIGINT'))
-signal.signal(signal.SIGALRM, lambda signum, frame: raise_alarm_exception())
-
-exit_code = 0
-
-try:
- main(args)
-
-except SystemExit as err:
- exit_code = err.code
- if args.keepalive and err.code == 0:
- try:
- info("All process have exited without error, keep container alive...")
- while True:
- time.sleep(60)
- pass
- except:
- error("Keep alive process ended.")
-
-except KeyboardInterrupt:
- warning("Init system aborted.")
- exit(2)
-
-finally:
-
- run_cmds(args,"finish")
-
- # for multiple process images finish script are run by runit
- if not args.skip_finish_files and not is_multiple_process_container():
- run_finish_files()
-
- if args.keepalive_force:
- try:
- info("All process have exited, keep container alive...")
- while True:
- time.sleep(60)
- pass
- except:
- error("Keep alive process ended.")
-
- if args.kill_all_on_exit:
- kill_all_processes(KILL_ALL_PROCESSES_TIMEOUT)
-
- exit(exit_code)
diff --git a/image/tool/setuser b/image/tool/setuser
deleted file mode 100755
index 06d7430a..00000000
--- a/image/tool/setuser
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/python3
-
-'''
-Copyright (c) 2013-2015 Phusion Holding B.V.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-'''
-
-import sys
-import os
-import pwd
-
-
-def abort(message):
- sys.stderr.write("setuser: %s\n" % message)
- sys.exit(1)
-
-
-def main():
- '''
- A simple alternative to sudo that executes a command as a user by setting
- the user ID and user parameters to those described by the system and then
- using execvp(3) to execute the command without the necessity of a TTY
- '''
-
- username = sys.argv[1]
- try:
- user = pwd.getpwnam(username)
- except KeyError:
- abort("user %s not found" % username)
- os.initgroups(username, user.pw_gid)
- os.setgid(user.pw_gid)
- os.setuid(user.pw_uid)
- os.environ['USER'] = username
- os.environ['HOME'] = user.pw_dir
- os.environ['UID'] = str(user.pw_uid)
- try:
- os.execvp(sys.argv[2], sys.argv[2:])
- except OSError as e:
- abort("cannot execute %s: %s" % (sys.argv[2], str(e)))
-
-if __name__ == '__main__':
-
- if len(sys.argv) < 3:
- sys.stderr.write("Usage: /sbin/setuser USERNAME COMMAND [args..]\n")
- sys.exit(1)
-
- main()
diff --git a/image/tool/wait-process b/image/tool/wait-process
deleted file mode 100755
index 7ff54a31..00000000
--- a/image/tool/wait-process
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/sh -e
-
-# wait startup to finish
-log-helper debug "Waits until startup is complete..."
-while ! test -f /container/run/state/startup-done
-do
- sleep 0.5
-done
-
-for process in "$@"
-do
- # wait service
- log-helper debug "Waits for process ${process} to be started..."
- while ! pgrep -c "${process}" > /dev/null
- do
- sleep 0.5
- done
-done
diff --git a/log/log.go b/log/log.go
new file mode 100644
index 00000000..95fcbe90
--- /dev/null
+++ b/log/log.go
@@ -0,0 +1,310 @@
+package log
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/osixia/container-baseimage/errors"
+)
+
+type level uint32
+type format string
+
+type PrintFunc func(message string)
+
+type CompareFunc func(level, level) bool
+
+const (
+ LevelNone level = 0
+ LevelError level = 1
+ LevelWarning level = 2
+ LevelInfo level = 3
+ LevelDebug level = 4
+ LevelTrace level = 5
+
+ FormatConsole format = "console"
+ FormatJson format = "json"
+)
+
+var Levels = map[level]string{
+ LevelNone: "none",
+ LevelError: "error",
+ LevelWarning: "warning",
+ LevelInfo: "info",
+ LevelDebug: "debug",
+ LevelTrace: "trace",
+}
+
+var Formats = map[format]string{
+ FormatConsole: "console",
+ FormatJson: "json",
+}
+
+var DefaultLevel = LevelInfo
+var DefaultFormat = FormatConsole
+
+type Config struct {
+ Level level
+ Format format
+}
+
+var config = &Config{
+ Level: DefaultLevel,
+ Format: DefaultFormat,
+}
+
+type EnvironmentConfig struct {
+ LevelKey string
+ FormatKey string
+}
+
+var environmentConfig *EnvironmentConfig
+
+func (ec *EnvironmentConfig) Validate() error {
+ if ec.LevelKey == "" {
+ return fmt.Errorf("LevelKey: %w", errors.ErrRequired)
+ }
+
+ if ec.FormatKey == "" {
+ return fmt.Errorf("FormatKey: %w", errors.ErrRequired)
+ }
+
+ return nil
+}
+
+func SetEnvironmentConfig(ec *EnvironmentConfig) error {
+
+ if err := ec.Validate(); err != nil {
+ return err
+ }
+
+ environmentConfig = ec
+
+ if os.Getenv(ec.LevelKey) != "" {
+ if err := SetLevel(os.Getenv(ec.LevelKey)); err != nil {
+ return err
+ }
+ }
+
+ if os.Getenv(ec.FormatKey) != "" {
+ if err := SetFormat(os.Getenv(ec.FormatKey)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func SetConfig(c *Config) {
+ config = c
+}
+
+func FromCmd(f PrintFunc, args []string) {
+ fi, _ := os.Stdin.Stat()
+ if (fi.Mode() & os.ModeCharDevice) == 0 {
+ scanner := bufio.NewScanner(os.Stdin)
+ for scanner.Scan() {
+ f(scanner.Text())
+ }
+ } else {
+ f(strings.Join(args, " "))
+ }
+}
+
+func print(output io.Writer, level string, message string) {
+ now := time.Now().Format(time.RFC3339)
+
+ if config.Format == FormatJson {
+ dictionary := map[string]string{
+ "datetime": now,
+ "level": strings.TrimSpace(level),
+ "message": message,
+ }
+
+ json, err := json.Marshal(dictionary)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return
+ }
+
+ fmt.Fprintln(output, string(json))
+ return
+ }
+
+ for _, line := range strings.Split(message, "\n") {
+ fmt.Fprintf(output, "%v %v %v\n", now, level, line)
+ }
+}
+
+func Fatal(message string) {
+ Error(message)
+ os.Exit(1)
+}
+
+func Fatalf(format string, a ...any) {
+ Fatal(getMessage(format, a...))
+}
+
+func Error(message string) {
+ if config.Level >= LevelError {
+ print(os.Stderr, "ERROR ", message)
+ }
+}
+
+func Errorf(format string, a ...any) {
+ Error(getMessage(format, a...))
+}
+
+func Warning(message string) {
+ if config.Level >= LevelWarning {
+ print(os.Stdout, "WARNING", message)
+ }
+}
+
+func Warningf(format string, a ...any) {
+ Warning(getMessage(format, a...))
+}
+
+func Info(message string) {
+ if config.Level >= LevelInfo {
+ print(os.Stdout, "INFO ", message)
+ }
+}
+
+func Infof(format string, a ...any) {
+ Info(getMessage(format, a...))
+}
+
+func Debug(message string) {
+ if config.Level >= LevelDebug {
+ print(os.Stdout, "DEBUG ", message)
+ }
+}
+
+func Debugf(format string, a ...any) {
+ Debug(getMessage(format, a...))
+}
+
+func Trace(message string) {
+ if config.Level >= LevelTrace {
+ print(os.Stdout, "TRACE ", message)
+ }
+}
+
+func Tracef(format string, a ...any) {
+ Trace(getMessage(format, a...))
+}
+
+func LevelsList() []string {
+ keys := make([]int, 0, len(Levels))
+ for k := range Levels {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ values := make([]string, 0, len(Levels))
+ for k := range keys {
+ values = append(values, Levels[level(k)])
+ }
+
+ return values
+}
+
+func FormatsList() []string {
+ values := make([]string, 0, len(Formats))
+ for _, f := range Formats {
+ values = append(values, f)
+ }
+ sort.Strings(values)
+
+ return values
+}
+
+func ParseLevel(level string) (level, error) {
+ for k, l := range Levels {
+ if l == level {
+ return k, nil
+ }
+ }
+
+ return 0, fmt.Errorf("%v: log level %w (choices: %v)", level, errors.ErrUnknown, strings.Join(LevelsList(), ", "))
+}
+
+func Level() level {
+ return config.Level
+}
+
+func SetLevel(level string) error {
+ lvl, err := ParseLevel(level)
+ if err != nil {
+ return err
+ }
+
+ config.Level = lvl
+
+ if environmentConfig != nil {
+ os.Setenv(environmentConfig.LevelKey, level)
+ }
+
+ return nil
+}
+
+func Format() format {
+ return config.Format
+}
+
+func ParseFormat(format string) (format, error) {
+ for k, f := range Formats {
+ if f == format {
+ return k, nil
+ }
+ }
+
+ return "", fmt.Errorf("%v: log format %w (choices: %v)", format, errors.ErrUnknown, strings.Join(FormatsList(), ", "))
+}
+
+func SetFormat(format string) error {
+ f, err := ParseFormat(format)
+ if err != nil {
+ return err
+ }
+
+ config.Format = f
+
+ if environmentConfig != nil {
+ os.Setenv(environmentConfig.FormatKey, Formats[config.Format])
+ }
+
+ return nil
+}
+
+func Equals(a level, b level) bool { return a == b }
+func NotEquals(a level, b level) bool { return a != b }
+func GreaterThan(a level, b level) bool { return a > b }
+func GreaterOrEquals(a level, b level) bool { return a >= b }
+func LessThan(a level, b level) bool { return a < b }
+func LessOrEquals(a level, b level) bool { return a <= b }
+
+// getMessage format with Sprint, Sprintf, or neither.
+func getMessage(format string, a ...any) string {
+ if len(a) == 0 {
+ return format
+ }
+
+ if format != "" {
+ return fmt.Sprintf(format, a...)
+ }
+
+ if len(a) == 1 {
+ if str, ok := a[0].(string); ok {
+ return str
+ }
+ }
+ return fmt.Sprint(a...)
+}
diff --git a/main.go b/main.go
new file mode 100644
index 00000000..d7ba3d0a
--- /dev/null
+++ b/main.go
@@ -0,0 +1,31 @@
+package main
+
+import (
+ "context"
+ "os"
+
+ "github.com/osixia/container-baseimage/cmd"
+ "github.com/osixia/container-baseimage/config"
+ "github.com/osixia/container-baseimage/core"
+ "github.com/osixia/container-baseimage/log"
+)
+
+func main() {
+
+ // set logger environment variables configuration
+ if err := log.SetEnvironmentConfig(config.LogEnvironmentConfig); err != nil {
+ log.Fatalf("Error initializing logger environment: %v", err.Error())
+ }
+
+ // init core
+ if err := core.Init(config.CoreConfig); err != nil {
+ log.Fatalf("Error initializing core: %v", err.Error())
+ }
+
+ // execute cmd
+ mainCtx := context.Background()
+ if err := cmd.Run(mainCtx); err != nil {
+ os.Exit(1)
+ }
+
+}
diff --git a/test/test.bats b/test/test.bats
deleted file mode 100644
index fb7fc132..00000000
--- a/test/test.bats
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env bats
-load test_helper
-
-@test "image build" {
-
- run build_image
- [ "$status" -eq 0 ]
-
-}
diff --git a/test/test_helper.bash b/test/test_helper.bash
deleted file mode 100644
index 15f5dc07..00000000
--- a/test/test_helper.bash
+++ /dev/null
@@ -1,75 +0,0 @@
-setup() {
- IMAGE_NAME="$NAME:$VERSION"
-}
-
-# function relative to the current container / image
-build_image() {
- #disable outputs
- docker build -t $IMAGE_NAME $BATS_TEST_DIRNAME/../image &> /dev/null
-}
-
-run_image() {
- CONTAINER_ID=$(docker run $@ -d $IMAGE_NAME)
- CONTAINER_IP=$(get_container_ip_by_cid $CONTAINER_ID)
-}
-
-start_container() {
- start_containers_by_cid $CONTAINER_ID
-}
-
-stop_container() {
- stop_containers_by_cid $CONTAINER_ID
-}
-
-remove_container() {
- remove_containers_by_cid $CONTAINER_ID
-}
-
-clear_container() {
- stop_containers_by_cid $CONTAINER_ID
- remove_containers_by_cid $CONTAINER_ID
-}
-
-wait_process() {
- wait_process_by_cid $CONTAINER_ID $@
-}
-
-# generic functions
-get_container_ip_by_cid() {
- local IP=$(docker inspect -f "{{ .NetworkSettings.IPAddress }}" $1)
- echo "$IP"
-}
-
-start_containers_by_cid() {
- for cid in "$@"
- do
- #disable outputs
- docker start $cid &> /dev/null
- done
-}
-
-stop_containers_by_cid() {
- for cid in "$@"
- do
- #disable outputs
- docker stop $cid &> /dev/null
- done
-}
-
-remove_containers_by_cid() {
- for cid in "$@"
- do
- #disable outputs
- docker rm $cid &> /dev/null
- done
-}
-
-clear_containers_by_cid() {
- stop_containers_by_cid $@
- remove_containers_by_cid $@
-}
-
-wait_process_by_cid() {
- cid=$1
- docker exec $cid /container/tool/wait-process ${@:2}
-}