From 2b6046174bec6d323adae6f6c396b346965a4d2b Mon Sep 17 00:00:00 2001 From: "Petr \"Stone\" Hracek" Date: Wed, 23 Aug 2023 08:54:21 +0200 Subject: [PATCH 1/4] Moving sources from 8.1 to 8.2. Preserve git history Signed-off-by: Petr "Stone" Hracek --- {8.1 => 8.2}/.exclude-c9s | 0 {8.1 => 8.2}/.exclude-rhel8 | 0 {8.1 => 8.2}/Dockerfile.c9s | 0 {8.1 => 8.2}/Dockerfile.fedora | 0 {8.1 => 8.2}/Dockerfile.rhel8 | 0 {8.1 => 8.2}/Dockerfile.rhel9 | 0 {8.1 => 8.2}/README.md | 0 {8.1 => 8.2}/root/opt/app-root/etc/php.d/10-opcache.ini.template | 0 {8.1 => 8.2}/root/opt/app-root/etc/php.ini.template | 0 {8.1 => 8.2}/root/usr/libexec/container-setup | 0 {8.1 => 8.2}/root/usr/share/container-scripts/php/common.sh | 0 .../share/container-scripts/php/httpd-cnf/00-documentroot.conf | 0 .../usr/share/container-scripts/php/httpd-cnf/50-mpm-tuning.conf | 0 .../share/container-scripts/php/post-assemble/20-copy-config.sh | 0 .../usr/share/container-scripts/php/post-assemble/40-ssl-certs.sh | 0 .../usr/share/container-scripts/php/pre-start/20-copy-config.sh | 0 .../usr/share/container-scripts/php/pre-start/40-ssl-certs.sh | 0 .../root/usr/share/container-scripts/php/pre-start/60-fpm.sh | 0 {8.1 => 8.2}/s2i/bin/assemble | 0 {8.1 => 8.2}/s2i/bin/run | 0 {8.1 => 8.2}/s2i/bin/save-artifacts | 0 {8.1 => 8.2}/s2i/bin/usage | 0 {8.1 => 8.2}/test/examples | 0 {8.1 => 8.2}/test/imagestreams | 0 {8.1 => 8.2}/test/run | 0 {8.1 => 8.2}/test/run-openshift | 0 {8.1 => 8.2}/test/run-openshift-remote-cluster | 0 {8.1 => 8.2}/test/self-signed-ssl | 0 {8.1 => 8.2}/test/test-app | 0 {8.1 => 8.2}/test/test-lib-openshift.sh | 0 {8.1 => 8.2}/test/test-lib-php.sh | 0 {8.1 => 8.2}/test/test-lib-remote-openshift.sh | 0 {8.1 => 8.2}/test/test-lib.sh | 0 {8.1 => 8.2}/test/test-openshift.yaml | 0 34 files changed, 0 insertions(+), 0 deletions(-) rename {8.1 => 8.2}/.exclude-c9s (100%) rename {8.1 => 8.2}/.exclude-rhel8 (100%) rename {8.1 => 8.2}/Dockerfile.c9s (100%) rename {8.1 => 8.2}/Dockerfile.fedora (100%) rename {8.1 => 8.2}/Dockerfile.rhel8 (100%) rename {8.1 => 8.2}/Dockerfile.rhel9 (100%) rename {8.1 => 8.2}/README.md (100%) rename {8.1 => 8.2}/root/opt/app-root/etc/php.d/10-opcache.ini.template (100%) rename {8.1 => 8.2}/root/opt/app-root/etc/php.ini.template (100%) rename {8.1 => 8.2}/root/usr/libexec/container-setup (100%) rename {8.1 => 8.2}/root/usr/share/container-scripts/php/common.sh (100%) rename {8.1 => 8.2}/root/usr/share/container-scripts/php/httpd-cnf/00-documentroot.conf (100%) rename {8.1 => 8.2}/root/usr/share/container-scripts/php/httpd-cnf/50-mpm-tuning.conf (100%) rename {8.1 => 8.2}/root/usr/share/container-scripts/php/post-assemble/20-copy-config.sh (100%) rename {8.1 => 8.2}/root/usr/share/container-scripts/php/post-assemble/40-ssl-certs.sh (100%) rename {8.1 => 8.2}/root/usr/share/container-scripts/php/pre-start/20-copy-config.sh (100%) rename {8.1 => 8.2}/root/usr/share/container-scripts/php/pre-start/40-ssl-certs.sh (100%) rename {8.1 => 8.2}/root/usr/share/container-scripts/php/pre-start/60-fpm.sh (100%) rename {8.1 => 8.2}/s2i/bin/assemble (100%) rename {8.1 => 8.2}/s2i/bin/run (100%) rename {8.1 => 8.2}/s2i/bin/save-artifacts (100%) rename {8.1 => 8.2}/s2i/bin/usage (100%) rename {8.1 => 8.2}/test/examples (100%) rename {8.1 => 8.2}/test/imagestreams (100%) rename {8.1 => 8.2}/test/run (100%) rename {8.1 => 8.2}/test/run-openshift (100%) rename {8.1 => 8.2}/test/run-openshift-remote-cluster (100%) rename {8.1 => 8.2}/test/self-signed-ssl (100%) rename {8.1 => 8.2}/test/test-app (100%) rename {8.1 => 8.2}/test/test-lib-openshift.sh (100%) rename {8.1 => 8.2}/test/test-lib-php.sh (100%) rename {8.1 => 8.2}/test/test-lib-remote-openshift.sh (100%) rename {8.1 => 8.2}/test/test-lib.sh (100%) rename {8.1 => 8.2}/test/test-openshift.yaml (100%) diff --git a/8.1/.exclude-c9s b/8.2/.exclude-c9s similarity index 100% rename from 8.1/.exclude-c9s rename to 8.2/.exclude-c9s diff --git a/8.1/.exclude-rhel8 b/8.2/.exclude-rhel8 similarity index 100% rename from 8.1/.exclude-rhel8 rename to 8.2/.exclude-rhel8 diff --git a/8.1/Dockerfile.c9s b/8.2/Dockerfile.c9s similarity index 100% rename from 8.1/Dockerfile.c9s rename to 8.2/Dockerfile.c9s diff --git a/8.1/Dockerfile.fedora b/8.2/Dockerfile.fedora similarity index 100% rename from 8.1/Dockerfile.fedora rename to 8.2/Dockerfile.fedora diff --git a/8.1/Dockerfile.rhel8 b/8.2/Dockerfile.rhel8 similarity index 100% rename from 8.1/Dockerfile.rhel8 rename to 8.2/Dockerfile.rhel8 diff --git a/8.1/Dockerfile.rhel9 b/8.2/Dockerfile.rhel9 similarity index 100% rename from 8.1/Dockerfile.rhel9 rename to 8.2/Dockerfile.rhel9 diff --git a/8.1/README.md b/8.2/README.md similarity index 100% rename from 8.1/README.md rename to 8.2/README.md diff --git a/8.1/root/opt/app-root/etc/php.d/10-opcache.ini.template b/8.2/root/opt/app-root/etc/php.d/10-opcache.ini.template similarity index 100% rename from 8.1/root/opt/app-root/etc/php.d/10-opcache.ini.template rename to 8.2/root/opt/app-root/etc/php.d/10-opcache.ini.template diff --git a/8.1/root/opt/app-root/etc/php.ini.template b/8.2/root/opt/app-root/etc/php.ini.template similarity index 100% rename from 8.1/root/opt/app-root/etc/php.ini.template rename to 8.2/root/opt/app-root/etc/php.ini.template diff --git a/8.1/root/usr/libexec/container-setup b/8.2/root/usr/libexec/container-setup similarity index 100% rename from 8.1/root/usr/libexec/container-setup rename to 8.2/root/usr/libexec/container-setup diff --git a/8.1/root/usr/share/container-scripts/php/common.sh b/8.2/root/usr/share/container-scripts/php/common.sh similarity index 100% rename from 8.1/root/usr/share/container-scripts/php/common.sh rename to 8.2/root/usr/share/container-scripts/php/common.sh diff --git a/8.1/root/usr/share/container-scripts/php/httpd-cnf/00-documentroot.conf b/8.2/root/usr/share/container-scripts/php/httpd-cnf/00-documentroot.conf similarity index 100% rename from 8.1/root/usr/share/container-scripts/php/httpd-cnf/00-documentroot.conf rename to 8.2/root/usr/share/container-scripts/php/httpd-cnf/00-documentroot.conf diff --git a/8.1/root/usr/share/container-scripts/php/httpd-cnf/50-mpm-tuning.conf b/8.2/root/usr/share/container-scripts/php/httpd-cnf/50-mpm-tuning.conf similarity index 100% rename from 8.1/root/usr/share/container-scripts/php/httpd-cnf/50-mpm-tuning.conf rename to 8.2/root/usr/share/container-scripts/php/httpd-cnf/50-mpm-tuning.conf diff --git a/8.1/root/usr/share/container-scripts/php/post-assemble/20-copy-config.sh b/8.2/root/usr/share/container-scripts/php/post-assemble/20-copy-config.sh similarity index 100% rename from 8.1/root/usr/share/container-scripts/php/post-assemble/20-copy-config.sh rename to 8.2/root/usr/share/container-scripts/php/post-assemble/20-copy-config.sh diff --git a/8.1/root/usr/share/container-scripts/php/post-assemble/40-ssl-certs.sh b/8.2/root/usr/share/container-scripts/php/post-assemble/40-ssl-certs.sh similarity index 100% rename from 8.1/root/usr/share/container-scripts/php/post-assemble/40-ssl-certs.sh rename to 8.2/root/usr/share/container-scripts/php/post-assemble/40-ssl-certs.sh diff --git a/8.1/root/usr/share/container-scripts/php/pre-start/20-copy-config.sh b/8.2/root/usr/share/container-scripts/php/pre-start/20-copy-config.sh similarity index 100% rename from 8.1/root/usr/share/container-scripts/php/pre-start/20-copy-config.sh rename to 8.2/root/usr/share/container-scripts/php/pre-start/20-copy-config.sh diff --git a/8.1/root/usr/share/container-scripts/php/pre-start/40-ssl-certs.sh b/8.2/root/usr/share/container-scripts/php/pre-start/40-ssl-certs.sh similarity index 100% rename from 8.1/root/usr/share/container-scripts/php/pre-start/40-ssl-certs.sh rename to 8.2/root/usr/share/container-scripts/php/pre-start/40-ssl-certs.sh diff --git a/8.1/root/usr/share/container-scripts/php/pre-start/60-fpm.sh b/8.2/root/usr/share/container-scripts/php/pre-start/60-fpm.sh similarity index 100% rename from 8.1/root/usr/share/container-scripts/php/pre-start/60-fpm.sh rename to 8.2/root/usr/share/container-scripts/php/pre-start/60-fpm.sh diff --git a/8.1/s2i/bin/assemble b/8.2/s2i/bin/assemble similarity index 100% rename from 8.1/s2i/bin/assemble rename to 8.2/s2i/bin/assemble diff --git a/8.1/s2i/bin/run b/8.2/s2i/bin/run similarity index 100% rename from 8.1/s2i/bin/run rename to 8.2/s2i/bin/run diff --git a/8.1/s2i/bin/save-artifacts b/8.2/s2i/bin/save-artifacts similarity index 100% rename from 8.1/s2i/bin/save-artifacts rename to 8.2/s2i/bin/save-artifacts diff --git a/8.1/s2i/bin/usage b/8.2/s2i/bin/usage similarity index 100% rename from 8.1/s2i/bin/usage rename to 8.2/s2i/bin/usage diff --git a/8.1/test/examples b/8.2/test/examples similarity index 100% rename from 8.1/test/examples rename to 8.2/test/examples diff --git a/8.1/test/imagestreams b/8.2/test/imagestreams similarity index 100% rename from 8.1/test/imagestreams rename to 8.2/test/imagestreams diff --git a/8.1/test/run b/8.2/test/run similarity index 100% rename from 8.1/test/run rename to 8.2/test/run diff --git a/8.1/test/run-openshift b/8.2/test/run-openshift similarity index 100% rename from 8.1/test/run-openshift rename to 8.2/test/run-openshift diff --git a/8.1/test/run-openshift-remote-cluster b/8.2/test/run-openshift-remote-cluster similarity index 100% rename from 8.1/test/run-openshift-remote-cluster rename to 8.2/test/run-openshift-remote-cluster diff --git a/8.1/test/self-signed-ssl b/8.2/test/self-signed-ssl similarity index 100% rename from 8.1/test/self-signed-ssl rename to 8.2/test/self-signed-ssl diff --git a/8.1/test/test-app b/8.2/test/test-app similarity index 100% rename from 8.1/test/test-app rename to 8.2/test/test-app diff --git a/8.1/test/test-lib-openshift.sh b/8.2/test/test-lib-openshift.sh similarity index 100% rename from 8.1/test/test-lib-openshift.sh rename to 8.2/test/test-lib-openshift.sh diff --git a/8.1/test/test-lib-php.sh b/8.2/test/test-lib-php.sh similarity index 100% rename from 8.1/test/test-lib-php.sh rename to 8.2/test/test-lib-php.sh diff --git a/8.1/test/test-lib-remote-openshift.sh b/8.2/test/test-lib-remote-openshift.sh similarity index 100% rename from 8.1/test/test-lib-remote-openshift.sh rename to 8.2/test/test-lib-remote-openshift.sh diff --git a/8.1/test/test-lib.sh b/8.2/test/test-lib.sh similarity index 100% rename from 8.1/test/test-lib.sh rename to 8.2/test/test-lib.sh diff --git a/8.1/test/test-openshift.yaml b/8.2/test/test-openshift.yaml similarity index 100% rename from 8.1/test/test-openshift.yaml rename to 8.2/test/test-openshift.yaml From 00f9da5a3833a4822bb0af637d3c83954b8156ab Mon Sep 17 00:00:00 2001 From: "Petr \"Stone\" Hracek" Date: Wed, 23 Aug 2023 08:55:02 +0200 Subject: [PATCH 2/4] Copying source back Signed-off-by: Petr "Stone" Hracek --- 8.1/.exclude-c9s | 0 8.1/.exclude-rhel8 | 0 8.1/Dockerfile.c9s | 86 + 8.1/Dockerfile.fedora | 81 + 8.1/Dockerfile.rhel8 | 82 + 8.1/Dockerfile.rhel9 | 91 + 8.1/README.md | 330 ++++ .../etc/php.d/10-opcache.ini.template | 114 ++ 8.1/root/opt/app-root/etc/php.ini.template | 1626 +++++++++++++++++ 8.1/root/usr/libexec/container-setup | 70 + .../usr/share/container-scripts/php/common.sh | 166 ++ .../php/httpd-cnf/00-documentroot.conf | 1 + .../php/httpd-cnf/50-mpm-tuning.conf | 12 + .../php/post-assemble/20-copy-config.sh | 6 + .../php/post-assemble/40-ssl-certs.sh | 4 + .../php/pre-start/20-copy-config.sh | 6 + .../php/pre-start/40-ssl-certs.sh | 4 + .../container-scripts/php/pre-start/60-fpm.sh | 6 + 8.1/s2i/bin/assemble | 73 + 8.1/s2i/bin/run | 75 + 8.1/s2i/bin/save-artifacts | 4 + 8.1/s2i/bin/usage | 21 + 8.1/test/examples/from-dockerfile/.gitignore | 1 + 8.1/test/examples/from-dockerfile/Dockerfile | 24 + .../examples/from-dockerfile/Dockerfile.s2i | 25 + 8.1/test/examples/from-dockerfile/README.md | 22 + .../certs/server-cert-selfsigned.pem | 20 + .../httpd-ssl/private/server-key.pem | 28 + 8.1/test/examples/self-signed-ssl/index.html | 1 + 8.1/test/imagestreams/php-centos.json | 133 ++ 8.1/test/imagestreams/php-rhel-aarch64.json | 93 + 8.1/test/imagestreams/php-rhel.json | 133 ++ 8.1/test/run | 237 +++ 8.1/test/run-openshift | 102 ++ 8.1/test/run-openshift-remote-cluster | 42 + .../certs/server-cert-selfsigned.pem | 20 + .../httpd-ssl/private/server-key.pem | 28 + 8.1/test/self-signed-ssl/index.html | 1 + 8.1/test/test-app/composer.json | 6 + 8.1/test/test-app/index.php | 13 + 8.1/test/test-app/session_test.php | 4 + 8.1/test/test-lib-openshift.sh | 1441 +++++++++++++++ 8.1/test/test-lib-php.sh | 34 + 8.1/test/test-lib-remote-openshift.sh | 138 ++ 8.1/test/test-lib.sh | 1438 +++++++++++++++ 8.1/test/test-openshift.yaml | 77 + 46 files changed, 6919 insertions(+) create mode 100644 8.1/.exclude-c9s create mode 100644 8.1/.exclude-rhel8 create mode 100644 8.1/Dockerfile.c9s create mode 100644 8.1/Dockerfile.fedora create mode 100644 8.1/Dockerfile.rhel8 create mode 100644 8.1/Dockerfile.rhel9 create mode 100644 8.1/README.md create mode 100644 8.1/root/opt/app-root/etc/php.d/10-opcache.ini.template create mode 100644 8.1/root/opt/app-root/etc/php.ini.template create mode 100755 8.1/root/usr/libexec/container-setup create mode 100644 8.1/root/usr/share/container-scripts/php/common.sh create mode 100644 8.1/root/usr/share/container-scripts/php/httpd-cnf/00-documentroot.conf create mode 100644 8.1/root/usr/share/container-scripts/php/httpd-cnf/50-mpm-tuning.conf create mode 100644 8.1/root/usr/share/container-scripts/php/post-assemble/20-copy-config.sh create mode 100644 8.1/root/usr/share/container-scripts/php/post-assemble/40-ssl-certs.sh create mode 100644 8.1/root/usr/share/container-scripts/php/pre-start/20-copy-config.sh create mode 100644 8.1/root/usr/share/container-scripts/php/pre-start/40-ssl-certs.sh create mode 100644 8.1/root/usr/share/container-scripts/php/pre-start/60-fpm.sh create mode 100755 8.1/s2i/bin/assemble create mode 100755 8.1/s2i/bin/run create mode 100755 8.1/s2i/bin/save-artifacts create mode 100755 8.1/s2i/bin/usage create mode 100644 8.1/test/examples/from-dockerfile/.gitignore create mode 100644 8.1/test/examples/from-dockerfile/Dockerfile create mode 100644 8.1/test/examples/from-dockerfile/Dockerfile.s2i create mode 100644 8.1/test/examples/from-dockerfile/README.md create mode 100644 8.1/test/examples/self-signed-ssl/httpd-ssl/certs/server-cert-selfsigned.pem create mode 100644 8.1/test/examples/self-signed-ssl/httpd-ssl/private/server-key.pem create mode 100644 8.1/test/examples/self-signed-ssl/index.html create mode 100644 8.1/test/imagestreams/php-centos.json create mode 100644 8.1/test/imagestreams/php-rhel-aarch64.json create mode 100644 8.1/test/imagestreams/php-rhel.json create mode 100755 8.1/test/run create mode 100755 8.1/test/run-openshift create mode 100755 8.1/test/run-openshift-remote-cluster create mode 100644 8.1/test/self-signed-ssl/httpd-ssl/certs/server-cert-selfsigned.pem create mode 100644 8.1/test/self-signed-ssl/httpd-ssl/private/server-key.pem create mode 100644 8.1/test/self-signed-ssl/index.html create mode 100644 8.1/test/test-app/composer.json create mode 100644 8.1/test/test-app/index.php create mode 100644 8.1/test/test-app/session_test.php create mode 100644 8.1/test/test-lib-openshift.sh create mode 100644 8.1/test/test-lib-php.sh create mode 100644 8.1/test/test-lib-remote-openshift.sh create mode 100644 8.1/test/test-lib.sh create mode 100644 8.1/test/test-openshift.yaml diff --git a/8.1/.exclude-c9s b/8.1/.exclude-c9s new file mode 100644 index 000000000..e69de29bb diff --git a/8.1/.exclude-rhel8 b/8.1/.exclude-rhel8 new file mode 100644 index 000000000..e69de29bb diff --git a/8.1/Dockerfile.c9s b/8.1/Dockerfile.c9s new file mode 100644 index 000000000..fc4d11748 --- /dev/null +++ b/8.1/Dockerfile.c9s @@ -0,0 +1,86 @@ +FROM quay.io/sclorg/s2i-base-c9s:c9s + +# This image provides an Apache+PHP environment for running PHP +# applications. + +EXPOSE 8080 +EXPOSE 8443 + +# Description +# This image provides an Apache 2.4 + PHP 7.4 environment for running PHP applications. +# Exposed ports: +# * 8080 - alternative port for http + +ENV PHP_VERSION=8.1 \ + PHP_VER_SHORT=81 \ + NAME=php + +ENV SUMMARY="Platform for building and running PHP $PHP_VERSION applications" \ + DESCRIPTION="PHP $PHP_VERSION available as container is a base platform for \ +building and running various PHP $PHP_VERSION applications and frameworks. \ +PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers \ +to write dynamically generated web pages. PHP also offers built-in database integration \ +for several commercial and non-commercial database management systems, so writing \ +a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding \ +is probably as a replacement for CGI scripts." + +LABEL summary="${SUMMARY}" \ + description="${DESCRIPTION}" \ + io.k8s.description="${DESCRIPTION}" \ + io.k8s.display-name="Apache 2.4 with PHP ${PHP_VERSION}" \ + io.openshift.expose-services="8080:http" \ + io.openshift.tags="builder,${NAME},${NAME}${PHP_VER_SHORT},${NAME}-${PHP_VER_SHORT}" \ + io.openshift.s2i.scripts-url="image:///usr/libexec/s2i" \ + io.s2i.scripts-url="image:///usr/libexec/s2i" \ + name="sclorg/${NAME}-${PHP_VER_SHORT}-c9s" \ + com.redhat.component="${NAME}-${PHP_VER_SHORT}-container" \ + version="1" \ + com.redhat.license_terms="https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI" \ + help="For more information visit https://github.com/sclorg/s2i-${NAME}-container" \ + usage="s2i build https://github.com/sclorg/s2i-php-container.git --context-dir=${PHP_VERSION}/test/test-app sclorg/${NAME}-${PHP_VER_SHORT}-c9s sample-server" \ + maintainer="SoftwareCollections.org " + +# Install Apache httpd and PHP +ARG INSTALL_PKGS="php php-fpm php-mysqlnd php-pgsql php-bcmath \ + php-gd php-intl php-ldap php-mbstring php-pdo \ + php-process php-soap php-opcache php-xml \ + php-gmp php-pecl-apcu php-pecl-zip mod_ssl hostname" + +RUN yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + yum reinstall -y tzdata && \ + rpm -V $INSTALL_PKGS && \ + php -v | grep -qe "v$PHP_VERSION\." && echo "Found VERSION $PHP_VERSION" && \ + yum -y clean all --enablerepo='*' + +ENV PHP_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/php/ \ + APP_DATA=${APP_ROOT}/src \ + PHP_DEFAULT_INCLUDE_PATH=/usr/share/pear \ + PHP_SYSCONF_PATH=/etc \ + PHP_HTTPD_CONF_FILE=php.conf \ + PHP_FPM_CONF_D_PATH=/etc/php-fpm.d \ + PHP_FPM_CONF_FILE=www.conf \ + PHP_FPM_RUN_DIR=/run/php-fpm \ + PHP_MAIN_FPM_CONF_FILE=/etc/php-fpm.conf \ + PHP_FPM_LOG_PATH=/var/log/php-fpm \ + HTTPD_CONFIGURATION_PATH=${APP_ROOT}/etc/conf.d \ + HTTPD_MAIN_CONF_PATH=/etc/httpd/conf \ + HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d \ + HTTPD_MODULES_CONF_D_PATH=/etc/httpd/conf.modules.d \ + HTTPD_VAR_RUN=/var/run/httpd \ + HTTPD_DATA_PATH=/var/www \ + HTTPD_DATA_ORIG_PATH=/var/www \ + HTTPD_VAR_PATH=/var + +# Copy the S2I scripts from the specific language image to $STI_SCRIPTS_PATH +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + +# Copy extra files to the image. +COPY ./root/ / + +# Reset permissions of filesystem to default values +RUN /usr/libexec/container-setup && rpm-file-permissions + +USER 1001 + +# Set the default CMD to print the usage of the language image +CMD $STI_SCRIPTS_PATH/usage diff --git a/8.1/Dockerfile.fedora b/8.1/Dockerfile.fedora new file mode 100644 index 000000000..ec83b89b5 --- /dev/null +++ b/8.1/Dockerfile.fedora @@ -0,0 +1,81 @@ +FROM quay.io/fedora/s2i-base:37 + +# This image provides an Apache+PHP environment for running PHP +# applications. + +EXPOSE 8080 +EXPOSE 8443 + +ENV PHP_VERSION=8.1 \ + PHP_SHORT_VER=81 \ + PATH=$PATH:/usr/bin + +ENV SUMMARY="Platform for building and running PHP $PHP_VERSION applications" \ + DESCRIPTION="PHP $PHP_VERSION available as container is a base platform for \ +building and running various PHP $PHP_VERSION applications and frameworks. \ +PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers \ +to write dynamically generated web pages. PHP also offers built-in database integration \ +for several commercial and non-commercial database management systems, so writing \ +a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding \ +is probably as a replacement for CGI scripts." + +ENV NAME=php \ + VERSION=0 \ + RELEASE=1 \ + ARCH=x86_64 + +LABEL summary="$SUMMARY" \ + description="$DESCRIPTION" \ + io.k8s.description="$DESCRIPTION" \ + io.k8s.display-name="Apache 2.4 with PHP $PHP_VERSION" \ + io.openshift.expose-services="8080:http" \ + io.openshift.tags="builder,php" \ + name="fedora/$NAME-$PHP_SHORT_VER" \ + com.redhat.component="$NAME" \ + version="$VERSION" \ + usage="s2i build https://github.com/sclorg/s2i-php-container.git --context-dir=/$PHP_VERSION/test/test-app quay.io/fedora/$NAME-$PHP_SHORT_VER sample-server" \ + maintainer="SoftwareCollections.org " + +# Install Apache httpd and PHP +ARG INSTALL_PKGS="php php-fpm php-mysqlnd php-bcmath \ + php-gd php-intl php-ldap php-mbstring php-pdo \ + php-process php-soap php-opcache php-xml \ + php-gmp php-pecl-apcu mod_ssl hostname" + +RUN yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS --nogpgcheck && \ + rpm -V $INSTALL_PKGS && \ + php -v | grep -qe "v$PHP_VERSION\." && echo "Found VERSION $PHP_VERSION" && \ + yum -y clean all --enablerepo='*' + +ENV PHP_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/php/ \ + APP_DATA=${APP_ROOT}/src \ + PHP_DEFAULT_INCLUDE_PATH=/usr/share/pear \ + PHP_SYSCONF_PATH=/etc \ + PHP_HTTPD_CONF_FILE=php.conf \ + PHP_FPM_CONF_D_PATH=/etc/php-fpm.d \ + PHP_FPM_CONF_FILE=www.conf \ + PHP_FPM_RUN_DIR=/run/php-fpm \ + PHP_MAIN_FPM_CONF_FILE=/etc/php-fpm.conf \ + PHP_FPM_LOG_PATH=/var/log/php-fpm \ + HTTPD_CONFIGURATION_PATH=${APP_ROOT}/etc/conf.d \ + HTTPD_MAIN_CONF_PATH=/etc/httpd/conf \ + HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d \ + HTTPD_MODULES_CONF_D_PATH=/etc/httpd/conf.modules.d \ + HTTPD_VAR_RUN=/var/run/httpd \ + HTTPD_DATA_PATH=/var/www \ + HTTPD_DATA_ORIG_PATH=/var/www \ + HTTPD_VAR_PATH=/var + +# Copy the S2I scripts from the specific language image to $STI_SCRIPTS_PATH +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + +# Copy extra files to the image. +COPY ./root/ / + +# Reset permissions of filesystem to default values +RUN /usr/libexec/container-setup && rpm-file-permissions + +USER 1001 + +# Set the default CMD to print the usage of the language image +CMD $STI_SCRIPTS_PATH/usage diff --git a/8.1/Dockerfile.rhel8 b/8.1/Dockerfile.rhel8 new file mode 100644 index 000000000..8032fdddb --- /dev/null +++ b/8.1/Dockerfile.rhel8 @@ -0,0 +1,82 @@ +FROM ubi8/s2i-base:1 + +# This image provides an Apache+PHP environment for running PHP +# applications. + +EXPOSE 8080 +EXPOSE 8443 + +# Description +# This image provides an Apache 2.4 + PHP 8.0 environment for running PHP applications. +# Exposed ports: +# * 8080 - alternative port for http + +ENV PHP_VERSION=8.1 \ + PHP_VER_SHORT=81 \ + NAME=php + +ENV SUMMARY="Platform for building and running PHP $PHP_VERSION applications" \ + DESCRIPTION="PHP $PHP_VERSION available as container is a base platform for \ +building and running various PHP $PHP_VERSION applications and frameworks. \ +PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers \ +to write dynamically generated web pages. PHP also offers built-in database integration \ +for several commercial and non-commercial database management systems, so writing \ +a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding \ +is probably as a replacement for CGI scripts." + +LABEL summary="${SUMMARY}" \ + description="${DESCRIPTION}" \ + io.k8s.description="${DESCRIPTION}" \ + io.k8s.display-name="Apache 2.4 with PHP ${PHP_VERSION}" \ + io.openshift.expose-services="8080:http" \ + io.openshift.tags="builder,${NAME},${NAME}${PHP_VER_SHORT},${NAME}-${PHP_VER_SHORT}" \ + io.openshift.s2i.scripts-url="image:///usr/libexec/s2i" \ + io.s2i.scripts-url="image:///usr/libexec/s2i" \ + name="ubi8/${NAME}-${PHP_VER_SHORT}" \ + com.redhat.component="${NAME}-${PHP_VER_SHORT}-container" \ + version="1" \ + com.redhat.license_terms="https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI" \ + help="For more information visit https://github.com/sclorg/s2i-${NAME}-container" \ + usage="s2i build https://github.com/sclorg/s2i-php-container.git --context-dir=${PHP_VERSION}/test/test-app ubi8/${NAME}-${PHP_VER_SHORT} sample-server" \ + maintainer="SoftwareCollections.org " + +# Install Apache httpd and PHP +ARG INSTALL_PKGS="php php-mysqlnd php-pgsql php-bcmath \ + php-gd php-intl php-ldap php-mbstring php-pdo \ + php-process php-soap php-opcache php-xml \ + php-gmp php-pecl-apcu php-pecl-zip mod_ssl hostname" + +RUN yum -y module enable php:$PHP_VERSION && \ + yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + yum reinstall -y tzdata && \ + rpm -V $INSTALL_PKGS && \ + php -v | grep -qe "v$PHP_VERSION\." && echo "Found VERSION $PHP_VERSION" && \ + yum -y clean all --enablerepo='*' + +ENV PHP_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/php/ \ + APP_DATA=${APP_ROOT}/src \ + PHP_DEFAULT_INCLUDE_PATH=/usr/share/pear \ + PHP_SYSCONF_PATH=/etc \ + PHP_HTTPD_CONF_FILE=php.conf \ + HTTPD_CONFIGURATION_PATH=${APP_ROOT}/etc/conf.d \ + HTTPD_MAIN_CONF_PATH=/etc/httpd/conf \ + HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d \ + HTTPD_MODULES_CONF_D_PATH=/etc/httpd/conf.modules.d \ + HTTPD_VAR_RUN=/var/run/httpd \ + HTTPD_DATA_PATH=/var/www \ + HTTPD_DATA_ORIG_PATH=/var/www \ + HTTPD_VAR_PATH=/var + +# Copy the S2I scripts from the specific language image to $STI_SCRIPTS_PATH +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + +# Copy extra files to the image. +COPY ./root/ / + +# Reset permissions of filesystem to default values +RUN /usr/libexec/container-setup && rpm-file-permissions + +USER 1001 + +# Set the default CMD to print the usage of the language image +CMD $STI_SCRIPTS_PATH/usage diff --git a/8.1/Dockerfile.rhel9 b/8.1/Dockerfile.rhel9 new file mode 100644 index 000000000..6d64d6886 --- /dev/null +++ b/8.1/Dockerfile.rhel9 @@ -0,0 +1,91 @@ +FROM ubi9/s2i-base:1 + +# This image provides an Apache+PHP environment for running PHP +# applications. + +EXPOSE 8080 +EXPOSE 8443 + +# Description +# This image provides an Apache 2.4 + PHP 8.0 environment for running PHP applications. +# Exposed ports: +# * 8080 - alternative port for http + +ENV PHP_VERSION=8.1 \ + PHP_VER_SHORT=81 \ + NAME=php + +ENV SUMMARY="Platform for building and running PHP $PHP_VERSION applications" \ + DESCRIPTION="PHP $PHP_VERSION available as container is a base platform for \ +building and running various PHP $PHP_VERSION applications and frameworks. \ +PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers \ +to write dynamically generated web pages. PHP also offers built-in database integration \ +for several commercial and non-commercial database management systems, so writing \ +a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding \ +is probably as a replacement for CGI scripts." + +LABEL summary="${SUMMARY}" \ + description="${DESCRIPTION}" \ + io.k8s.description="${DESCRIPTION}" \ + io.k8s.display-name="Apache 2.4 with PHP ${PHP_VERSION}" \ + io.openshift.expose-services="8080:http" \ + io.openshift.tags="builder,${NAME},${NAME}${PHP_VER_SHORT},${NAME}-${PHP_VER_SHORT}" \ + io.openshift.s2i.scripts-url="image:///usr/libexec/s2i" \ + io.s2i.scripts-url="image:///usr/libexec/s2i" \ + name="ubi9/${NAME}-${PHP_VER_SHORT}" \ + com.redhat.component="${NAME}-${PHP_VER_SHORT}-container" \ + version="1" \ + com.redhat.license_terms="https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI" \ + help="For more information visit https://github.com/sclorg/s2i-${NAME}-container" \ + usage="s2i build https://github.com/sclorg/s2i-php-container.git --context-dir=${PHP_VERSION}/test/test-app ubi9/${NAME}-${PHP_VER_SHORT} sample-server" \ + maintainer="SoftwareCollections.org " + +# Install Apache httpd and PHP +ARG INSTALL_PKGS="php php-fpm php-mysqlnd php-pgsql php-bcmath \ + php-gd php-intl php-ldap php-mbstring php-pdo \ + php-process php-soap php-opcache php-xml \ + php-gmp php-pecl-apcu php-pecl-zip mod_ssl hostname" + +RUN yum module -y enable php:$PHP_VERSION && \ + yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + yum reinstall -y tzdata && \ + rpm -V $INSTALL_PKGS && \ + php -v | grep -qe "v$PHP_VERSION\." && echo "Found VERSION $PHP_VERSION" && \ + yum -y clean all --enablerepo='*' + +ENV PHP_CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/php/ \ + APP_DATA=${APP_ROOT}/src \ + PHP_DEFAULT_INCLUDE_PATH=/usr/share/pear \ + PHP_SYSCONF_PATH=/etc \ + PHP_HTTPD_CONF_FILE=php.conf \ + PHP_FPM_CONF_D_PATH=/etc/php-fpm.d \ + PHP_FPM_CONF_FILE=www.conf \ + PHP_FPM_RUN_DIR=/run/php-fpm \ + PHP_MAIN_FPM_CONF_FILE=/etc/php-fpm.conf \ + PHP_FPM_LOG_PATH=/var/log/php-fpm \ + HTTPD_CONFIGURATION_PATH=${APP_ROOT}/etc/conf.d \ + HTTPD_MAIN_CONF_PATH=/etc/httpd/conf \ + HTTPD_MAIN_CONF_D_PATH=/etc/httpd/conf.d \ + HTTPD_MODULES_CONF_D_PATH=/etc/httpd/conf.modules.d \ + HTTPD_VAR_RUN=/var/run/httpd \ + HTTPD_DATA_PATH=/var/www \ + HTTPD_DATA_ORIG_PATH=/var/www \ + HTTPD_VAR_PATH=/var + +# Copy the S2I scripts from the specific language image to $STI_SCRIPTS_PATH +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + +# Copy extra files to the image. +COPY ./root/ / + +# Reset permissions of filesystem to default values +RUN /usr/libexec/container-setup && rpm-file-permissions + +# RPM uses a wrong file in the config +# Related: https://bugzilla.redhat.com/show_bug.cgi?id=2092356 +RUN sed -i "s/mod_php7.c/mod_php.c/" /etc/httpd/conf.d/php.conf + +USER 1001 + +# Set the default CMD to print the usage of the language image +CMD $STI_SCRIPTS_PATH/usage diff --git a/8.1/README.md b/8.1/README.md new file mode 100644 index 000000000..6b4a6345a --- /dev/null +++ b/8.1/README.md @@ -0,0 +1,330 @@ +PHP 8.1 container image +======================= + +This container image includes PHP 8.1 as a [S2I](https://github.com/openshift/source-to-image) base image for your PHP 8.1 applications. +Users can choose between RHEL and CentOS Stream based builder images. +The RHEL UBI images are available in the [Red Hat Container Catalog](https://access.redhat.com/containers/), +the CentOS Stream images are available on [Quay.io](https://quay.io/organization/sclorg), +and the Fedora images are available in [Quay.io](https://quay.io/organization/fedora). +The resulting image can be run using [podman](https://github.com/containers/libpod). + +Note: while the examples in this README are calling `podman`, you can replace any such calls by `docker` with the same arguments + +Description +----------- + +PHP 8.1 available as container is a base platform for +building and running various PHP 8.1 applications and frameworks. +PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers +to write dynamically generated web pages. PHP also offers built-in database integration +for several commercial and non-commercial database management systems, so writing +a database-enabled webpage with PHP is fairly simple. The most common use of PHP coding +is probably as a replacement for CGI scripts. + +This container image includes an npm utility, so users can use it to install JavaScript +modules for their web applications. There is no guarantee for any specific npm or nodejs +version, that is included in the image; those versions can be changed anytime and +the nodejs itself is included just to make the npm work. + +Usage in OpenShift +------------------ +In this example, we will assume that you are using the `ubi8/php-81` image, available via `php:8.1` imagestream tag in Openshift. + +To build a simple [cakephp-sample-app](https://github.com/sclorg/cakephp-ex.git) application in Openshift: + +``` +oc new-app php:8.1~https://github.com/sclorg/cakephp-ex.git +``` + +To access the application: +``` +$ oc get pods +$ oc exec -- curl 127.0.0.1:8080 +``` + +**Accessing the application:** +``` +$ curl 127.0.0.1:8080 +``` + +Source-to-Image framework and scripts +------------------------------------- +This image supports the [Source-to-Image](https://docs.openshift.com/container-platform/3.11/creating_images/s2i.html) +(S2I) strategy in OpenShift. The Source-to-Image is an OpenShift framework +which makes it easy to write images that take application source code as +an input, use a builder image like this PHP container image, and produce +a new image that runs the assembled application as an output. + +To support the Source-to-Image framework, important scripts are included in the builder image: + +* The `/usr/libexec/s2i/assemble` script inside the image is run to produce a new image with the application artifacts. The script takes sources of a given application and places them into appropriate directories inside the image. It utilizes some common patterns in PHP application development (see the **Environment variables** section below). +* The `/usr/libexec/s2i/run` script is set as the default command in the resulting container image (the new image with the application artifacts). It runs `httpd` with PHP support enabled. + +Building an application using a Dockerfile +------------------------------------------ +Compared to the Source-to-Image strategy, using a Dockerfile is a more +flexible way to build a PHP container image with an application. +Use a Dockerfile when Source-to-Image is not sufficiently flexible for you or +when you build the image outside of the OpenShift environment. + +To use the PHP image in a Dockerfile, follow these steps: + +#### 1. Pull a base builder image to build on + +``` +podman pull ubi8/php-81 +``` + +An UBI image `ubi8/php-81` is used in this example. This image is usable and freely redistributable under the terms of the UBI End User License Agreement (EULA). See more about UBI at [UBI FAQ](https://developers.redhat.com/articles/ubi-faq). + +#### 2. Pull an application code + +An example application available at https://github.com/sclorg/cakephp-ex.git is used here. Feel free to clone the repository for further experiments. + +``` +git clone https://github.com/sclorg/cakephp-ex.git app-src +``` + +#### 3. Prepare an application inside a container + +This step usually consists of at least these parts: + +* putting the application source into the container +* installing the dependencies +* setting the default command in the resulting image + +For all these three parts, users can either setup all manually and use commands `./composer.phar` or other commands explicitly in the Dockerfile ([3.1.](#31-to-use-your-own-setup-create-a-dockerfile-with-this-content)), or users can use the Source-to-Image scripts inside the image ([3.2.](#32-to-use-the-source-to-image-scripts-and-build-an-image-using-a-dockerfile-create-a-dockerfile-with-this-content); see more about these scripts in the section "Source-to-Image framework and scripts" above), that already know how to set-up and run some common PHP applications. + +##### 3.1. To use your own setup, create a Dockerfile with this content: +``` +FROM ubi8/php-81 + +# Add application sources +ADD app-src . + +# Install the dependencies +RUN TEMPFILE=$(mktemp) && \ + curl -o "$TEMPFILE" "https://getcomposer.org/installer" && \ + php <"$TEMPFILE" && \ + ./composer.phar install --no-interaction --no-ansi --optimize-autoloader + +# Run script uses standard ways to configure the PHP application +# and execs httpd -D FOREGROUND at the end +# See more in /s2i/bin/run in this repository. +# Shortly what the run script does: The httpd daemon and php needs to be +# configured, so this script prepares the configuration based on the container +# parameters (e.g. available memory) and puts the configuration files into +# the approriate places. +# This can obviously be done differently, and in that case, the final CMD +# should be set to "CMD httpd -D FOREGROUND" instead. +CMD /usr/libexec/s2i/run + +``` + +##### 3.2. To use the Source-to-Image scripts and build an image using a Dockerfile, create a Dockerfile with this content: +``` +FROM ubi8/php-81 + +# Add application sources to a directory that the assemble script expects them +# and set permissions so that the container runs without root access +USER 0 +ADD app-src /tmp/src +RUN chown -R 1001:0 /tmp/src +USER 1001 + +# Install the dependencies +RUN /usr/libexec/s2i/assemble + +# Set the default command for the resulting image +CMD /usr/libexec/s2i/run +``` + +#### 4. Build a new image from a Dockerfile prepared in the previous step + +``` +podman build -t cakephp-app . +``` + +#### 5. Run the resulting image with the final application + +``` +podman run -d cakephp-app +``` + +Environment variables for Source-to-Image +----------------------------------------- + +To set these environment variables, you can place them as a key value pair into a `.s2i/environment` +file inside your source code repository. + +The following environment variables set their equivalent property value in the php.ini file: +* **ERROR_REPORTING** + * Informs PHP of which errors, warnings and notices you would like it to take action for + * Default: E_ALL & ~E_NOTICE +* **DISPLAY_ERRORS** + * Controls whether or not and where PHP will output errors, notices and warnings + * Default: ON +* **DISPLAY_STARTUP_ERRORS** + * Cause display errors which occur during PHP's startup sequence to be handled separately from display errors + * Default: OFF +* **TRACK_ERRORS** + * Store the last error/warning message in $php_errormsg (boolean) + * Default: OFF +* **HTML_ERRORS** + * Link errors to documentation related to the error + * Default: ON +* **INCLUDE_PATH** + * Path for PHP source files + * Default: .:/opt/app-root/src:/opt/rh/rh-php81/root/usr/share/pear (EL7) + * Default: .:/opt/app-root/src:/usr/share/pear (EL8, Fedora) +* **PHP_MEMORY_LIMIT** + * Memory Limit + * Default: 128M +* **SESSION_NAME** + * Name of the session + * Default: PHPSESSID +* **SESSION_HANDLER** + * Method for saving sessions + * Default: files +* **SESSION_PATH** + * Location for session data files + * Default: /tmp/sessions +* **SESSION_COOKIE_DOMAIN** + * The domain for which the cookie is valid. + * Default: +* **SESSION_COOKIE_HTTPONLY** + * Whether or not to add the httpOnly flag to the cookie + * Default: 0 +* **SESSION_COOKIE_SECURE** + * Specifies whether cookies should only be sent over secure connections. + * Default: Off +* **SHORT_OPEN_TAG** + * Determines whether or not PHP will recognize code between tags + * Default: OFF +* **DOCUMENTROOT** + * Path that defines the DocumentRoot for your application (ie. /public) + * Default: / + +The following environment variables set their equivalent property value in the opcache.ini file: +* **OPCACHE_MEMORY_CONSUMPTION** + * The OPcache shared memory storage size in megabytes + * Default: 128 +* **OPCACHE_REVALIDATE_FREQ** + * How often to check script timestamps for updates, in seconds. 0 will result in OPcache checking for updates on every request. + * Default: 2 +* **OPCACHE_MAX_FILES** + * The maximum number of keys (scripts) in the OPcache hash table. Only numbers between 200 and 1000000 are allowed. + * Default: 4000 + +You can also override the entire directory used to load the PHP configuration by setting: +* **PHPRC** + * Sets the path to the php.ini file +* **PHP_INI_SCAN_DIR** + * Path to scan for additional ini configuration files + +You can override the Apache [MPM prefork](https://httpd.apache.org/docs/2.4/mod/mpm_common.html) +settings to increase the performance for of the PHP application. In case you set +some Cgroup limits, the image will attempt to automatically set the +optimal values. You can override this at any time by specifying the values +yourself: + +* **HTTPD_START_SERVERS** + * The [StartServers](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#startservers) + directive sets the number of child server processes created on startup. + * Default: 8 +* **HTTPD_MAX_REQUEST_WORKERS** + * The [MaxRequestWorkers](https://httpd.apache.org/docs/2.4/mod/mpm_common.html#maxrequestworkers) + directive sets the limit on the number of simultaneous requests that will be served. + * `MaxRequestWorkers` was called `MaxClients` before version httpd 2.3.13. + * Default: 256 (this is automatically tuned by setting Cgroup limits for the container using this formula: + `TOTAL_MEMORY / 15MB`. The 15MB is average size of a single httpd process. +* **HTTPD_MAX_REQUESTS_PER_CHILD** + * The [MaxRequestsPerChild](http://httpd.apache.org/docs/current/mod/mpm_common.html#maxconnectionsperchild) + directive sets the limit on the number of connections that an individual child server process will handle. + After `MaxRequestsPerChild` connections, the child process will die. If `MaxRequestsPerChild` is 0, then the process will never expire. + * Setting `MaxRequestsPerChild` to a non-zero value limits the amount of memory that a process can consume by (accidental) memory leakage. + * `MaxRequestsPerChild` is called `MaxConnectionsPerChild` in Apache HTTP 2.3.9 and later. + * Default: 4000 +* **HTTPD_MAX_KEEPALIVE_REQUESTS** + * The [MaxKeepAliveRequests](http://httpd.apache.org/docs/current/mod/core.html#maxkeepaliverequests) + directive limits the number of requests allowed per connection when `KeepAlive` is on. If it is set to 0, unlimited requests will be allowed. + * Default: 100 + + You can use a custom composer repository mirror URL to download packages instead of the default 'packagist.org': + + * **COMPOSER_MIRROR** + * Adds a custom composer repository mirror URL to composer configuration. Note: This only affects packages listed in composer.json. + * **COMPOSER_INSTALLER** + * Overrides the default URL for downloading Composer of https://getcomposer.org/installer. Useful in disconnected environments. + * **COMPOSER_VERSION** + * Overrides the default composer version to install (1, 2, preview, snapshot or version="x.y.z") + * **COMPOSER_ARGS** + * Adds extra arguments to the `composer install` command line (for example `--no-dev`). + + +Source repository layout +------------------------ + +You do not need to change anything in your existing PHP project's repository. +However, if these files exist they will affect the behavior of the build process: + +* **composer.json** + + List of dependencies to be installed with `composer`. The format is documented + [here](https://getcomposer.org/doc/04-schema.md). + + +* **.htaccess** + + In case the **DocumentRoot** of the application is nested within the source directory `/opt/app-root/src`, + users can provide their own Apache **.htaccess** file. This allows the overriding of Apache's behavior and + specifies how application requests should be handled. The **.htaccess** file needs to be located at the root + of the application source. + +Hot deploy +---------- + +In order to immediately pick up changes made in your application source code, you need to run your built image with the `OPCACHE_REVALIDATE_FREQ=0` environment variable passed to [Podman](https://github.com/containers/libpod) `-e` run flag: + +``` +$ podman run -e OPCACHE_REVALIDATE_FREQ=0 -p 8080:8080 php-app +``` + +To change your source code in running container, use Podman's [exec](https://github.com/containers/libpod)) command: +``` +podman exec -it /bin/bash +``` + +After you [Podman exec](https://github.com/containers/libpod) into the running container, your current directory is set +to `/opt/app-root/src`, where the source code is located. + + +Extending image +--------------- +Not only content, but also startup scripts and configuration of the image can +be extended using [source-to-image](https://github.com/openshift/source-to-image). + +The structure of the application can look like this: + +| Folder name | Description | +|-------------------|----------------------------| +| `./httpd-cfg` | Can contain additional Apache configuration files (`*.conf`)| +| `./httpd-ssl` | Can contain own SSL certificate (in `certs/` subdirectory) and key (in `private/` subdirectory)| +| `./php-pre-start`| Can contain shell scripts (`*.sh`) that are sourced before `httpd` is started| +| `./php-post-assemble`| Can contain shell scripts (`*.sh`) that are sourced at the end of `assemble` script| +| `./` | Application source code | + + +See also +-------- +Dockerfile and other sources are available on https://github.com/sclorg/s2i-php-container. +In that repository you also can find another versions of Python environment Dockerfiles. +Dockerfile for CentOS is called `Dockerfile`, Dockerfile for RHEL7 is called `Dockerfile.rhel7`, +for RHEL8 it's `Dockerfile.rhel8`, for RHEL9 it's `Dockerfile.rhel9` and the Fedora Dockerfile is called Dockerfile.fedora. + +Security Implications +--------------------- + +-p 8080:8080 + + Opens container port 8080 and maps it to the same port on the Host. diff --git a/8.1/root/opt/app-root/etc/php.d/10-opcache.ini.template b/8.1/root/opt/app-root/etc/php.d/10-opcache.ini.template new file mode 100644 index 000000000..347e4d921 --- /dev/null +++ b/8.1/root/opt/app-root/etc/php.d/10-opcache.ini.template @@ -0,0 +1,114 @@ +; Enable Zend OPcache extension module +zend_extension=opcache.so + +; Determines if Zend OPCache is enabled +opcache.enable=1 + +; Determines if Zend OPCache is enabled for the CLI version of PHP +;opcache.enable_cli=0 + +; The OPcache shared memory storage size. +opcache.memory_consumption=${OPCACHE_MEMORY_CONSUMPTION} + +; The amount of memory for interned strings in Mbytes. +opcache.interned_strings_buffer=8 + +; The maximum number of keys (scripts) in the OPcache hash table. +; Only numbers between 200 and 1000000 are allowed. +opcache.max_accelerated_files=${OPCACHE_MAX_FILES} + +; The maximum percentage of "wasted" memory until a restart is scheduled. +;opcache.max_wasted_percentage=5 + +; When this directive is enabled, the OPcache appends the current working +; directory to the script key, thus eliminating possible collisions between +; files with the same name (basename). Disabling the directive improves +; performance, but may break existing applications. +;opcache.use_cwd=1 + +; When disabled, you must reset the OPcache manually or restart the +; webserver for changes to the filesystem to take effect. +;opcache.validate_timestamps=1 + +; How often (in seconds) to check file timestamps for changes to the shared +; memory storage allocation. ("1" means validate once per second, but only +; once per request. "0" means always validate) +opcache.revalidate_freq=${OPCACHE_REVALIDATE_FREQ} + +; Enables or disables file search in include_path optimization +;opcache.revalidate_path=0 + +; If disabled, all PHPDoc comments are dropped from the code to reduce the +; size of the optimized code. +;opcache.save_comments=1 + +; If enabled, a fast shutdown sequence is used for the accelerated code +; Depending on the used Memory Manager this may cause some incompatibilities. +;opcache.fast_shutdown=0 + +; Allow file existence override (file_exists, etc.) performance feature. +;opcache.enable_file_override=0 + +; A bitmask, where each bit enables or disables the appropriate OPcache +; passes +;opcache.optimization_level=0xffffffff + +;opcache.inherited_hack=1 +;opcache.dups_fix=0 + +; The location of the OPcache blacklist file (wildcards allowed). +; Each OPcache blacklist file is a text file that holds the names of files +; that should not be accelerated. +opcache.blacklist_filename=${PHP_SYSCONF_PATH}/php.d/opcache*.blacklist + +; Allows exclusion of large files from being cached. By default all files +; are cached. +opcache.max_file_size=1M + +; Check the cache checksum each N requests. +; The default value of "0" means that the checks are disabled. +;opcache.consistency_checks=0 + +; How long to wait (in seconds) for a scheduled restart to begin if the cache +; is not being accessed. +;opcache.force_restart_timeout=180 + +; OPcache error_log file name. Empty string assumes "stderr". +;opcache.error_log= + +; All OPcache errors go to the Web server log. +; By default, only fatal errors (level 0) or errors (level 1) are logged. +; You can also enable warnings (level 2), info messages (level 3) or +; debug messages (level 4). +;opcache.log_verbosity_level=1 + +; Preferred Shared Memory back-end. Leave empty and let the system decide. +;opcache.preferred_memory_model= + +; Protect the shared memory from unexpected writing during script execution. +; Useful for internal debugging only. +;opcache.protect_memory=0 + +; Allows calling OPcache API functions only from PHP scripts which path is +; started from specified string. The default "" means no restriction +;opcache.restrict_api= + +; Enables and sets the second level cache directory. +; It should improve performance when SHM memory is full, at server restart or +; SHM reset. The default "" disables file based caching. +; RPM note : file cache directory must be owned by process owner +; for mod_php, see /etc/httpd/conf.d/php.conf +; for php-fpm, see /etc/php-fpm.d/*conf +;opcache.file_cache= + +; Enables or disables opcode caching in shared memory. +;opcache.file_cache_only=0 + +; Enables or disables checksum validation when script loaded from file cache. +;opcache.file_cache_consistency_checks=1 + + +; Enables or disables copying of PHP code (text segment) into HUGE PAGES. +; This should improve performance, but requires appropriate OS configuration. +;opcache.huge_code_pages=0 + diff --git a/8.1/root/opt/app-root/etc/php.ini.template b/8.1/root/opt/app-root/etc/php.ini.template new file mode 100644 index 000000000..efaab65de --- /dev/null +++ b/8.1/root/opt/app-root/etc/php.ini.template @@ -0,0 +1,1626 @@ +[PHP] + +;;;;;;;;;;;;;;;;;;; +; About php.ini ; +;;;;;;;;;;;;;;;;;;; +; PHP's initialization file, generally called php.ini, is responsible for +; configuring many of the aspects of PHP's behavior. + +; PHP attempts to find and load this configuration from a number of locations. +; The following is a summary of its search order: +; 1. SAPI module specific location. +; 2. The PHPRC environment variable. (As of PHP 5.2.0) +; 3. A number of predefined registry keys on Windows (As of PHP 5.2.0) +; 4. Current working directory (except CLI) +; 5. The web server's directory (for SAPI modules), or directory of PHP +; (otherwise in Windows) +; 6. The directory from the --with-config-file-path compile time option, or the +; Windows directory (C:\windows or C:\winnt) +; See the PHP docs for more specific information. +; http://php.net/configuration.file + +; The syntax of the file is extremely simple. Whitespace and lines +; beginning with a semicolon are silently ignored (as you probably guessed). +; Section headers (e.g. [Foo]) are also silently ignored, even though +; they might mean something in the future. + +; Directives following the section heading [PATH=/www/mysite] only +; apply to PHP files in the /www/mysite directory. Directives +; following the section heading [HOST=www.example.com] only apply to +; PHP files served from www.example.com. Directives set in these +; special sections cannot be overridden by user-defined INI files or +; at runtime. Currently, [PATH=] and [HOST=] sections only work under +; CGI/FastCGI. +; http://php.net/ini.sections + +; Directives are specified using the following syntax: +; directive = value +; Directive names are *case sensitive* - foo=bar is different from FOO=bar. +; Directives are variables used to configure PHP or PHP extensions. +; There is no name validation. If PHP can't find an expected +; directive because it is not set or is mistyped, a default value will be used. + +; The value can be a string, a number, a PHP constant (e.g. E_ALL or M_PI), one +; of the INI constants (On, Off, True, False, Yes, No and None) or an expression +; (e.g. E_ALL & ~E_NOTICE), a quoted string ("bar"), or a reference to a +; previously set variable or directive (e.g. ${foo}) + +; Expressions in the INI file are limited to bitwise operators and parentheses: +; | bitwise OR +; ^ bitwise XOR +; & bitwise AND +; ~ bitwise NOT +; ! boolean NOT + +; Boolean flags can be turned on using the values 1, On, True or Yes. +; They can be turned off using the values 0, Off, False or No. + +; An empty string can be denoted by simply not writing anything after the equal +; sign, or by using the None keyword: + +; foo = ; sets foo to an empty string +; foo = None ; sets foo to an empty string +; foo = "None" ; sets foo to the string 'None' + +; If you use constants in your value, and these constants belong to a +; dynamically loaded extension (either a PHP extension or a Zend extension), +; you may only use these constants *after* the line that loads the extension. + +;;;;;;;;;;;;;;;;;;; +; About this file ; +;;;;;;;;;;;;;;;;;;; +; PHP comes packaged with two INI files. One that is recommended to be used +; in production environments and one that is recommended to be used in +; development environments. + +; php.ini-production contains settings which hold security, performance and +; best practices at its core. But please be aware, these settings may break +; compatibility with older or less security conscience applications. We +; recommending using the production ini in production and testing environments. + +; php.ini-development is very similar to its production variant, except it is +; much more verbose when it comes to errors. We recommend using the +; development version only in development environments, as errors shown to +; application users can inadvertently leak otherwise secure information. + +; This is php.ini-production INI file. + +;;;;;;;;;;;;;;;;;;; +; Quick Reference ; +;;;;;;;;;;;;;;;;;;; +; The following are all the settings which are different in either the production +; or development versions of the INIs with respect to PHP's default behavior. +; Please see the actual settings later in the document for more details as to why +; we recommend these changes in PHP's behavior. + +; display_errors +; Default Value: On +; Development Value: On +; Production Value: Off + +; display_startup_errors +; Default Value: Off +; Development Value: On +; Production Value: Off + +; error_reporting +; Default Value: E_ALL & ~E_NOTICE & ~E_STRICT & ~E_DEPRECATED +; Development Value: E_ALL +; Production Value: E_ALL & ~E_DEPRECATED & ~E_STRICT + +; html_errors +; Default Value: On +; Development Value: On +; Production value: On + +; log_errors +; Default Value: Off +; Development Value: On +; Production Value: On + +; max_input_time +; Default Value: -1 (Unlimited) +; Development Value: 60 (60 seconds) +; Production Value: 60 (60 seconds) + +; output_buffering +; Default Value: Off +; Development Value: 4096 +; Production Value: 4096 + +; register_argc_argv +; Default Value: On +; Development Value: Off +; Production Value: Off + +; request_order +; Default Value: None +; Development Value: "GP" +; Production Value: "GP" + +; session.gc_divisor +; Default Value: 100 +; Development Value: 1000 +; Production Value: 1000 + +; session.hash_bits_per_character +; Default Value: 4 +; Development Value: 5 +; Production Value: 5 + +; short_open_tag +; Default Value: On +; Development Value: Off +; Production Value: Off + +; track_errors +; Default Value: Off +; Development Value: On +; Production Value: Off + +; url_rewriter.tags +; Default Value: "a=href,area=href,frame=src,form=,fieldset=" +; Development Value: "a=href,area=href,frame=src,input=src,form=fakeentry" +; Production Value: "a=href,area=href,frame=src,input=src,form=fakeentry" + +; variables_order +; Default Value: "EGPCS" +; Development Value: "GPCS" +; Production Value: "GPCS" + +;;;;;;;;;;;;;;;;;;;; +; php.ini Options ; +;;;;;;;;;;;;;;;;;;;; +; Name for user-defined php.ini (.htaccess) files. Default is ".user.ini" +;user_ini.filename = ".user.ini" + +; To disable this feature set this option to empty value +;user_ini.filename = + +; TTL for user-defined php.ini files (time-to-live) in seconds. Default is 300 seconds (5 minutes) +;user_ini.cache_ttl = 300 + +;;;;;;;;;;;;;;;;;;;; +; Language Options ; +;;;;;;;;;;;;;;;;;;;; + +; Enable the PHP scripting language engine under Apache. +; http://php.net/engine +engine = On + +; This directive determines whether or not PHP will recognize code between +; tags as PHP source which should be processed as such. It is +; generally recommended that should be used and that this feature +; should be disabled, as enabling it may result in issues when generating XML +; documents, however this remains supported for backward compatibility reasons. +; Note that this directive does not control the would work. +; http://php.net/syntax-highlighting +;highlight.string = #DD0000 +;highlight.comment = #FF9900 +;highlight.keyword = #007700 +;highlight.default = #0000BB +;highlight.html = #000000 + +; If enabled, the request will be allowed to complete even if the user aborts +; the request. Consider enabling it if executing long requests, which may end up +; being interrupted by the user or a browser timing out. PHP's default behavior +; is to disable this feature. +; http://php.net/ignore-user-abort +;ignore_user_abort = On + +; Determines the size of the realpath cache to be used by PHP. This value should +; be increased on systems where PHP opens many files to reflect the quantity of +; the file operations performed. +; http://php.net/realpath-cache-size +;realpath_cache_size = 4096k + +; Duration of time, in seconds for which to cache realpath information for a given +; file or directory. For systems with rarely changing files, consider increasing this +; value. +; http://php.net/realpath-cache-ttl +;realpath_cache_ttl = 120 + +; Enables or disables the circular reference collector. +; http://php.net/zend.enable-gc +zend.enable_gc = On + +; If enabled, scripts may be written in encodings that are incompatible with +; the scanner. CP936, Big5, CP949 and Shift_JIS are the examples of such +; encodings. To use this feature, mbstring extension must be enabled. +; Default: Off +;zend.multibyte = Off + +; Allows to set the default encoding for the scripts. This value will be used +; unless "declare(encoding=...)" directive appears at the top of the script. +; Only affects if zend.multibyte is set. +; Default: "" +;zend.script_encoding = + +;;;;;;;;;;;;;;;;; +; Miscellaneous ; +;;;;;;;;;;;;;;;;; + +; Decides whether PHP may expose the fact that it is installed on the server +; (e.g. by adding its signature to the Web server header). It is no security +; threat in any way, but it makes it possible to determine whether you use PHP +; on your server or not. +; http://php.net/expose-php +expose_php = Off + +;;;;;;;;;;;;;;;;;;; +; Resource Limits ; +;;;;;;;;;;;;;;;;;;; + +; Maximum execution time of each script, in seconds +; http://php.net/max-execution-time +; Note: This directive is hardcoded to 0 for the CLI SAPI +max_execution_time = 300 + +; Maximum amount of time each script may spend parsing request data. It's a good +; idea to limit this time on productions servers in order to eliminate unexpectedly +; long running scripts. +; Note: This directive is hardcoded to -1 for the CLI SAPI +; Default Value: -1 (Unlimited) +; Development Value: 60 (60 seconds) +; Production Value: 60 (60 seconds) +; http://php.net/max-input-time +max_input_time = 60 + +; Maximum input variable nesting level +; http://php.net/max-input-nesting-level +;max_input_nesting_level = 64 + +; How many GET/POST/COOKIE input variables may be accepted +; max_input_vars = 1000 + +; Maximum amount of memory a script may consume (128MB) +; http://php.net/memory-limit +memory_limit = ${PHP_MEMORY_LIMIT} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; Error handling and logging ; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +; This directive informs PHP of which errors, warnings and notices you would like +; it to take action for. The recommended way of setting values for this +; directive is through the use of the error level constants and bitwise +; operators. The error level constants are below here for convenience as well as +; some common settings and their meanings. +; By default, PHP is set to take action on all errors, notices and warnings EXCEPT +; those related to E_NOTICE and E_STRICT, which together cover best practices and +; recommended coding standards in PHP. For performance reasons, this is the +; recommend error reporting setting. Your production server shouldn't be wasting +; resources complaining about best practices and coding standards. That's what +; development servers and development settings are for. +; Note: The php.ini-development file has this setting as E_ALL. This +; means it pretty much reports everything which is exactly what you want during +; development and early testing. +; +; Error Level Constants: +; E_ALL - All errors and warnings (includes E_STRICT as of PHP 5.4.0) +; E_ERROR - fatal run-time errors +; E_RECOVERABLE_ERROR - almost fatal run-time errors +; E_WARNING - run-time warnings (non-fatal errors) +; E_PARSE - compile-time parse errors +; E_NOTICE - run-time notices (these are warnings which often result +; from a bug in your code, but it's possible that it was +; intentional (e.g., using an uninitialized variable and +; relying on the fact it is automatically initialized to an +; empty string) +; E_STRICT - run-time notices, enable to have PHP suggest changes +; to your code which will ensure the best interoperability +; and forward compatibility of your code +; E_CORE_ERROR - fatal errors that occur during PHP's initial startup +; E_CORE_WARNING - warnings (non-fatal errors) that occur during PHP's +; initial startup +; E_COMPILE_ERROR - fatal compile-time errors +; E_COMPILE_WARNING - compile-time warnings (non-fatal errors) +; E_USER_ERROR - user-generated error message +; E_USER_WARNING - user-generated warning message +; E_USER_NOTICE - user-generated notice message +; E_DEPRECATED - warn about code that will not work in future versions +; of PHP +; E_USER_DEPRECATED - user-generated deprecation warnings +; +; Common Values: +; E_ALL (Show all errors, warnings and notices including coding standards.) +; E_ALL & ~E_NOTICE (Show all errors, except for notices) +; E_ALL & ~E_NOTICE & ~E_STRICT (Show all errors, except for notices and coding standards warnings.) +; E_COMPILE_ERROR|E_RECOVERABLE_ERROR|E_ERROR|E_CORE_ERROR (Show only errors) +; Default Value: E_ALL & ~E_NOTICE & ~E_STRICT & ~E_DEPRECATED +; Development Value: E_ALL +; Production Value: E_ALL & ~E_DEPRECATED & ~E_STRICT +; http://php.net/error-reporting +error_reporting = ${ERROR_REPORTING} + +; This directive controls whether or not and where PHP will output errors, +; notices and warnings too. Error output is very useful during development, but +; it could be very dangerous in production environments. Depending on the code +; which is triggering the error, sensitive information could potentially leak +; out of your application such as database usernames and passwords or worse. +; For production environments, we recommend logging errors rather than +; sending them to STDOUT. +; Possible Values: +; Off = Do not display any errors +; stderr = Display errors to STDERR (affects only CGI/CLI binaries!) +; On or stdout = Display errors to STDOUT +; Default Value: On +; Development Value: On +; Production Value: Off +; http://php.net/display-errors +display_errors = ${DISPLAY_ERRORS} + +; The display of errors which occur during PHP's startup sequence are handled +; separately from display_errors. PHP's default behavior is to suppress those +; errors from clients. Turning the display of startup errors on can be useful in +; debugging configuration problems. We strongly recommend you +; set this to 'off' for production servers. +; Default Value: Off +; Development Value: On +; Production Value: Off +; http://php.net/display-startup-errors +display_startup_errors = ${DISPLAY_STARTUP_ERRORS} + +; Besides displaying errors, PHP can also log errors to locations such as a +; server-specific log, STDERR, or a location specified by the error_log +; directive found below. While errors should not be displayed on productions +; servers they should still be monitored and logging is a great way to do that. +; Default Value: Off +; Development Value: On +; Production Value: On +; http://php.net/log-errors +log_errors = On + +; Set maximum length of log_errors. In error_log information about the source is +; added. The default is 1024 and 0 allows to not apply any maximum length at all. +; http://php.net/log-errors-max-len +log_errors_max_len = 1024 + +; Do not log repeated messages. Repeated errors must occur in same file on same +; line unless ignore_repeated_source is set true. +; http://php.net/ignore-repeated-errors +ignore_repeated_errors = Off + +; Ignore source of message when ignoring repeated messages. When this setting +; is On you will not log errors with repeated messages from different files or +; source lines. +; http://php.net/ignore-repeated-source +ignore_repeated_source = Off + +; If this parameter is set to Off, then memory leaks will not be shown (on +; stdout or in the log). This has only effect in a debug compile, and if +; error reporting includes E_WARNING in the allowed list +; http://php.net/report-memleaks +report_memleaks = On + +; This setting is on by default. +;report_zend_debug = 0 + +; Store the last error/warning message in $php_errormsg (boolean). Setting this value +; to On can assist in debugging and is appropriate for development servers. It should +; however be disabled on production servers. +; This directive is DEPRECATED. +; Default Value: Off +; Development Value: Off +; Production Value: Off +; http://php.net/track-errors +track_errors = ${TRACK_ERRORS} + +; Turn off normal error reporting and emit XML-RPC error XML +; http://php.net/xmlrpc-errors +;xmlrpc_errors = 0 + +; An XML-RPC faultCode +;xmlrpc_error_number = 0 + +; When PHP displays or logs an error, it has the capability of formatting the +; error message as HTML for easier reading. This directive controls whether +; the error message is formatted as HTML or not. +; Note: This directive is hardcoded to Off for the CLI SAPI +; Default Value: On +; Development Value: On +; Production value: On +; http://php.net/html-errors +html_errors = ${HTML_ERRORS} + +; If html_errors is set to On *and* docref_root is not empty, then PHP +; produces clickable error messages that direct to a page describing the error +; or function causing the error in detail. +; You can download a copy of the PHP manual from http://php.net/docs +; and change docref_root to the base URL of your local copy including the +; leading '/'. You must also specify the file extension being used including +; the dot. PHP's default behavior is to leave these settings empty, in which +; case no links to documentation are generated. +; Note: Never use this feature for production boxes. +; http://php.net/docref-root +; Examples +;docref_root = "/phpmanual/" + +; http://php.net/docref-ext +;docref_ext = .html + +; String to output before an error message. PHP's default behavior is to leave +; this setting blank. +; http://php.net/error-prepend-string +; Example: +;error_prepend_string = "" + +; String to output after an error message. PHP's default behavior is to leave +; this setting blank. +; http://php.net/error-append-string +; Example: +;error_append_string = "" + +; Log errors to specified file. PHP's default behavior is to leave this value +; empty. +; http://php.net/error-log +; Example: +;error_log = php_errors.log +; Log errors to syslog. +;error_log = syslog + +;windows.show_crt_warning +; Default value: 0 +; Development value: 0 +; Production value: 0 + +;;;;;;;;;;;;;;;;; +; Data Handling ; +;;;;;;;;;;;;;;;;; + +; The separator used in PHP generated URLs to separate arguments. +; PHP's default setting is "&". +; http://php.net/arg-separator.output +; Example: +;arg_separator.output = "&" + +; List of separator(s) used by PHP to parse input URLs into variables. +; PHP's default setting is "&". +; NOTE: Every character in this directive is considered as separator! +; http://php.net/arg-separator.input +; Example: +;arg_separator.input = ";&" + +; This directive determines which super global arrays are registered when PHP +; starts up. G,P,C,E & S are abbreviations for the following respective super +; globals: GET, POST, COOKIE, ENV and SERVER. There is a performance penalty +; paid for the registration of these arrays and because ENV is not as commonly +; used as the others, ENV is not recommended on productions servers. You +; can still get access to the environment variables through getenv() should you +; need to. +; Default Value: "EGPCS" +; Development Value: "GPCS" +; Production Value: "GPCS"; +; http://php.net/variables-order +variables_order = "EGPCS" + +; This directive determines which super global data (G,P & C) should be +; registered into the super global array REQUEST. If so, it also determines +; the order in which that data is registered. The values for this directive +; are specified in the same manner as the variables_order directive, +; EXCEPT one. Leaving this value empty will cause PHP to use the value set +; in the variables_order directive. It does not mean it will leave the super +; globals array REQUEST empty. +; Default Value: None +; Development Value: "GP" +; Production Value: "GP" +; http://php.net/request-order +request_order = "GP" + +; This directive determines whether PHP registers $argv & $argc each time it +; runs. $argv contains an array of all the arguments passed to PHP when a script +; is invoked. $argc contains an integer representing the number of arguments +; that were passed when the script was invoked. These arrays are extremely +; useful when running scripts from the command line. When this directive is +; enabled, registering these variables consumes CPU cycles and memory each time +; a script is executed. For performance reasons, this feature should be disabled +; on production servers. +; Note: This directive is hardcoded to On for the CLI SAPI +; Default Value: On +; Development Value: Off +; Production Value: Off +; http://php.net/register-argc-argv +register_argc_argv = Off + +; When enabled, the ENV, REQUEST and SERVER variables are created when they're +; first used (Just In Time) instead of when the script starts. If these +; variables are not used within a script, having this directive on will result +; in a performance gain. The PHP directive register_argc_argv must be disabled +; for this directive to have any affect. +; http://php.net/auto-globals-jit +auto_globals_jit = On + +; Whether PHP will read the POST data. +; This option is enabled by default. +; Most likely, you won't want to disable this option globally. It causes $_POST +; and $_FILES to always be empty; the only way you will be able to read the +; POST data will be through the php://input stream wrapper. This can be useful +; to proxy requests or to process the POST data in a memory efficient fashion. +; http://php.net/enable-post-data-reading +;enable_post_data_reading = Off + +; Maximum size of POST data that PHP will accept. +; Its value may be 0 to disable the limit. It is ignored if POST data reading +; is disabled through enable_post_data_reading. +; http://php.net/post-max-size +post_max_size = 200M + +; Automatically add files before PHP document. +; http://php.net/auto-prepend-file +auto_prepend_file = + +; Automatically add files after PHP document. +; http://php.net/auto-append-file +auto_append_file = + +; By default, PHP will output a character encoding using +; the Content-type: header. To disable sending of the charset, simply +; set it to be empty. +; +; PHP's built-in default is text/html +; http://php.net/default-mimetype +default_mimetype = "text/html" + +; PHP's default character set is set to UTF-8. +; http://php.net/default-charset +default_charset = "UTF-8" + +; PHP internal character encoding is set to empty. +; If empty, default_charset is used. +; http://php.net/internal-encoding +;internal_encoding = + +; PHP input character encoding is set to empty. +; If empty, default_charset is used. +; http://php.net/input-encoding +;input_encoding = + +; PHP output character encoding is set to empty. +; If empty, default_charset is used. +; mbstring or iconv output handler is used. +; See also output_buffer. +; http://php.net/output-encoding +;output_encoding = + +;;;;;;;;;;;;;;;;;;;;;;;;; +; Paths and Directories ; +;;;;;;;;;;;;;;;;;;;;;;;;; + +; UNIX: "/path1:/path2" +;include_path = ".:/php/includes" +; +; Windows: "\path1;\path2" +;include_path = ".;c:\php\includes" +; +; PHP's default setting for include_path is ".;/path/to/php/pear" +; http://php.net/include-path +include_path = ${INCLUDE_PATH} + +; The root of the PHP pages, used only if nonempty. +; if PHP was not compiled with FORCE_REDIRECT, you SHOULD set doc_root +; if you are running php as a CGI under any web server (other than IIS) +; see documentation for security issues. The alternate is to use the +; cgi.force_redirect configuration below +; http://php.net/doc-root +doc_root = + +; The directory under which PHP opens the script using /~username used only +; if nonempty. +; http://php.net/user-dir +user_dir = + +; Directory in which the loadable extensions (modules) reside. +; http://php.net/extension-dir +; extension_dir = "./" +; On windows: +; extension_dir = "ext" + +; Directory where the temporary files should be placed. +; Defaults to the system default (see sys_get_temp_dir) +; sys_temp_dir = "/tmp" + +; Whether or not to enable the dl() function. The dl() function does NOT work +; properly in multithreaded servers, such as IIS or Zeus, and is automatically +; disabled on them. +; http://php.net/enable-dl +enable_dl = Off + +; cgi.force_redirect is necessary to provide security running PHP as a CGI under +; most web servers. Left undefined, PHP turns this on by default. You can +; turn it off here AT YOUR OWN RISK +; **You CAN safely turn this off for IIS, in fact, you MUST.** +; http://php.net/cgi.force-redirect +;cgi.force_redirect = 1 + +; if cgi.nph is enabled it will force cgi to always sent Status: 200 with +; every request. PHP's default behavior is to disable this feature. +;cgi.nph = 1 + +; if cgi.force_redirect is turned on, and you are not running under Apache or Netscape +; (iPlanet) web servers, you MAY need to set an environment variable name that PHP +; will look for to know it is OK to continue execution. Setting this variable MAY +; cause security issues, KNOW WHAT YOU ARE DOING FIRST. +; http://php.net/cgi.redirect-status-env +;cgi.redirect_status_env = + +; cgi.fix_pathinfo provides *real* PATH_INFO/PATH_TRANSLATED support for CGI. PHP's +; previous behaviour was to set PATH_TRANSLATED to SCRIPT_FILENAME, and to not grok +; what PATH_INFO is. For more information on PATH_INFO, see the cgi specs. Setting +; this to 1 will cause PHP CGI to fix its paths to conform to the spec. A setting +; of zero causes PHP to behave as before. Default is 1. You should fix your scripts +; to use SCRIPT_FILENAME rather than PATH_TRANSLATED. +; http://php.net/cgi.fix-pathinfo +;cgi.fix_pathinfo=1 + +; FastCGI under IIS (on WINNT based OS) supports the ability to impersonate +; security tokens of the calling client. This allows IIS to define the +; security context that the request runs under. mod_fastcgi under Apache +; does not currently support this feature (03/17/2002) +; Set to 1 if running under IIS. Default is zero. +; http://php.net/fastcgi.impersonate +;fastcgi.impersonate = 1 + +; Disable logging through FastCGI connection. PHP's default behavior is to enable +; this feature. +;fastcgi.logging = 0 + +; cgi.rfc2616_headers configuration option tells PHP what type of headers to +; use when sending HTTP response code. If set to 0, PHP sends Status: header that +; is supported by Apache. When this option is set to 1, PHP will send +; RFC2616 compliant header. +; Default is zero. +; http://php.net/cgi.rfc2616-headers +;cgi.rfc2616_headers = 0 + +;;;;;;;;;;;;;;;; +; File Uploads ; +;;;;;;;;;;;;;;;; + +; Whether to allow HTTP file uploads. +; http://php.net/file-uploads +file_uploads = On + +; Temporary directory for HTTP uploaded files (will use system default if not +; specified). +; http://php.net/upload-tmp-dir +upload_tmp_dir = /tmp + +; Maximum allowed size for uploaded files. +; http://php.net/upload-max-filesize +upload_max_filesize = 200M + +; Maximum number of files that can be uploaded via a single request +max_file_uploads = 20 + +;;;;;;;;;;;;;;;;;; +; Fopen wrappers ; +;;;;;;;;;;;;;;;;;; + +; Whether to allow the treatment of URLs (like http:// or ftp://) as files. +; http://php.net/allow-url-fopen +allow_url_fopen = On + +; Whether to allow include/require to open URLs (like http:// or ftp://) as files. +; http://php.net/allow-url-include +allow_url_include = Off + +; Define the anonymous ftp password (your email address). PHP's default setting +; for this is empty. +; http://php.net/from +;from="john@doe.com" + +; Define the User-Agent string. PHP's default setting for this is empty. +; http://php.net/user-agent +;user_agent="PHP" + +; Default timeout for socket based streams (seconds) +; http://php.net/default-socket-timeout +default_socket_timeout = 60 + +; If your scripts have to deal with files from Macintosh systems, +; or you are running on a Mac and need to deal with files from +; unix or win32 systems, setting this flag will cause PHP to +; automatically detect the EOL character in those files so that +; fgets() and file() will work regardless of the source of the file. +; http://php.net/auto-detect-line-endings +;auto_detect_line_endings = Off + +;;;;;;;;;;;;;;;;;;;;;; +; Dynamic Extensions ; +;;;;;;;;;;;;;;;;;;;;;; + +; If you wish to have an extension loaded automatically, use the following +; syntax: +; +; extension=modulename +; +; For example: +; +; extension=mysqli +; +; When the extension library to load is not located in the default extension +; directory, You may specify an absolute path to the library file: +; +; extension=/path/to/extension/mysqli.so +; +; Note : The syntax used in previous PHP versions ('extension=.so' and +; 'extension='php_.dll') is supported for legacy reasons and may be +; deprecated in a future PHP major version. So, when it is possible, please +; move to the new ('extension=) syntax. + +;;;; +; Note: packaged extension modules are now loaded via the .ini files +; found in the directory /etc/php.d; these are loaded by default. +;;;; + +;;;;;;;;;;;;;;;;;;; +; Module Settings ; +;;;;;;;;;;;;;;;;;;; + +[CLI Server] +; Whether the CLI web server uses ANSI color coding in its terminal output. +cli_server.color = On + +[Date] +; Defines the default timezone used by the date functions +; http://php.net/date.timezone +date.timezone = UTC + +; http://php.net/date.default-latitude +;date.default_latitude = 31.7667 + +; http://php.net/date.default-longitude +;date.default_longitude = 35.2333 + +; http://php.net/date.sunrise-zenith +;date.sunrise_zenith = 90.583333 + +; http://php.net/date.sunset-zenith +;date.sunset_zenith = 90.583333 + +[filter] +; http://php.net/filter.default +;filter.default = unsafe_raw + +; http://php.net/filter.default-flags +;filter.default_flags = + +[iconv] +; Use of this INI entry is deprecated, use global input_encoding instead. +; If empty, default_charset or input_encoding or iconv.input_encoding is used. +; The precedence is: default_charset < intput_encoding < iconv.input_encoding +;iconv.input_encoding = + +; Use of this INI entry is deprecated, use global internal_encoding instead. +; If empty, default_charset or internal_encoding or iconv.internal_encoding is used. +; The precedence is: default_charset < internal_encoding < iconv.internal_encoding +;iconv.internal_encoding = + +; Use of this INI entry is deprecated, use global output_encoding instead. +; If empty, default_charset or output_encoding or iconv.output_encoding is used. +; The precedence is: default_charset < output_encoding < iconv.output_encoding +; To use an output encoding conversion, iconv's output handler must be set +; otherwise output encoding conversion cannot be performed. +;iconv.output_encoding = + +[intl] +;intl.default_locale = +; This directive allows you to produce PHP errors when some error +; happens within intl functions. The value is the level of the error produced. +; Default is 0, which does not produce any errors. +;intl.error_level = E_WARNING + +[sqlite] +; http://php.net/sqlite.assoc-case +;sqlite.assoc_case = 0 + +[sqlite3] +;sqlite3.extension_dir = + +[Pcre] +;PCRE library backtracking limit. +; http://php.net/pcre.backtrack-limit +;pcre.backtrack_limit=100000 + +;PCRE library recursion limit. +;Please note that if you set this value to a high number you may consume all +;the available process stack and eventually crash PHP (due to reaching the +;stack size limit imposed by the Operating System). +; http://php.net/pcre.recursion-limit +;pcre.recursion_limit=100000 + +;Enables or disables JIT compilation of patterns. This requires the PCRE +;library to be compiled with JIT support. +;pcre.jit=1 + +[Pdo] +; Whether to pool ODBC connections. Can be one of "strict", "relaxed" or "off" +; http://php.net/pdo-odbc.connection-pooling +;pdo_odbc.connection_pooling=strict + +;pdo_odbc.db2_instance_name + +[Pdo_mysql] +; If mysqlnd is used: Number of cache slots for the internal result set cache +; http://php.net/pdo_mysql.cache_size +pdo_mysql.cache_size = 2000 + +; Default socket name for local MySQL connects. If empty, uses the built-in +; MySQL defaults. +; http://php.net/pdo_mysql.default-socket +pdo_mysql.default_socket= + +[Phar] +; http://php.net/phar.readonly +;phar.readonly = On + +; http://php.net/phar.require-hash +;phar.require_hash = On + +;phar.cache_list = + +[mail function] +; For Unix only. You may supply arguments as well (default: "sendmail -t -i"). +; http://php.net/sendmail-path +sendmail_path = /usr/sbin/sendmail -t -i + +; Force the addition of the specified parameters to be passed as extra parameters +; to the sendmail binary. These parameters will always replace the value of +; the 5th parameter to mail(). +;mail.force_extra_parameters = + +; Add X-PHP-Originating-Script: that will include uid of the script followed by the filename +mail.add_x_header = On + +; The path to a log file that will log all mail() calls. Log entries include +; the full path of the script, line number, To address and headers. +;mail.log = +; Log mail to syslog; +;mail.log = syslog + +[ODBC] +; http://php.net/odbc.default-db +;odbc.default_db = Not yet implemented + +; http://php.net/odbc.default-user +;odbc.default_user = Not yet implemented + +; http://php.net/odbc.default-pw +;odbc.default_pw = Not yet implemented + +; Controls the ODBC cursor model. +; Default: SQL_CURSOR_STATIC (default). +;odbc.default_cursortype + +; Allow or prevent persistent links. +; http://php.net/odbc.allow-persistent +odbc.allow_persistent = On + +; Check that a connection is still valid before reuse. +; http://php.net/odbc.check-persistent +odbc.check_persistent = On + +; Maximum number of persistent links. -1 means no limit. +; http://php.net/odbc.max-persistent +odbc.max_persistent = -1 + +; Maximum number of links (persistent + non-persistent). -1 means no limit. +; http://php.net/odbc.max-links +odbc.max_links = -1 + +; Handling of LONG fields. Returns number of bytes to variables. 0 means +; passthru. +; http://php.net/odbc.defaultlrl +odbc.defaultlrl = 4096 + +; Handling of binary data. 0 means passthru, 1 return as is, 2 convert to char. +; See the documentation on odbc_binmode and odbc_longreadlen for an explanation +; of odbc.defaultlrl and odbc.defaultbinmode +; http://php.net/odbc.defaultbinmode +odbc.defaultbinmode = 1 + +;birdstep.max_links = -1 + +[Interbase] +; Allow or prevent persistent links. +ibase.allow_persistent = 1 + +; Maximum number of persistent links. -1 means no limit. +ibase.max_persistent = -1 + +; Maximum number of links (persistent + non-persistent). -1 means no limit. +ibase.max_links = -1 + +; Default database name for ibase_connect(). +;ibase.default_db = + +; Default username for ibase_connect(). +;ibase.default_user = + +; Default password for ibase_connect(). +;ibase.default_password = + +; Default charset for ibase_connect(). +;ibase.default_charset = + +; Default timestamp format. +ibase.timestampformat = "%Y-%m-%d %H:%M:%S" + +; Default date format. +ibase.dateformat = "%Y-%m-%d" + +; Default time format. +ibase.timeformat = "%H:%M:%S" + +[MySQLi] + +; Maximum number of persistent links. -1 means no limit. +; http://php.net/mysqli.max-persistent +mysqli.max_persistent = -1 + +; Allow accessing, from PHP's perspective, local files with LOAD DATA statements +; http://php.net/mysqli.allow_local_infile +;mysqli.allow_local_infile = On + +; Allow or prevent persistent links. +; http://php.net/mysqli.allow-persistent +mysqli.allow_persistent = On + +; Maximum number of links. -1 means no limit. +; http://php.net/mysqli.max-links +mysqli.max_links = -1 + +; If mysqlnd is used: Number of cache slots for the internal result set cache +; http://php.net/mysqli.cache_size +mysqli.cache_size = 2000 + +; Default port number for mysqli_connect(). If unset, mysqli_connect() will use +; the $MYSQL_TCP_PORT or the mysql-tcp entry in /etc/services or the +; compile-time value defined MYSQL_PORT (in that order). Win32 will only look +; at MYSQL_PORT. +; http://php.net/mysqli.default-port +mysqli.default_port = 3306 + +; Default socket name for local MySQL connects. If empty, uses the built-in +; MySQL defaults. +; http://php.net/mysqli.default-socket +mysqli.default_socket = + +; Default host for mysql_connect() (doesn't apply in safe mode). +; http://php.net/mysqli.default-host +mysqli.default_host = + +; Default user for mysql_connect() (doesn't apply in safe mode). +; http://php.net/mysqli.default-user +mysqli.default_user = + +; Default password for mysqli_connect() (doesn't apply in safe mode). +; Note that this is generally a *bad* idea to store passwords in this file. +; *Any* user with PHP access can run 'echo get_cfg_var("mysqli.default_pw") +; and reveal this password! And of course, any users with read access to this +; file will be able to reveal the password as well. +; http://php.net/mysqli.default-pw +mysqli.default_pw = + +; Allow or prevent reconnect +mysqli.reconnect = Off + +[mysqlnd] +; Enable / Disable collection of general statistics by mysqlnd which can be +; used to tune and monitor MySQL operations. +; http://php.net/mysqlnd.collect_statistics +mysqlnd.collect_statistics = On + +; Enable / Disable collection of memory usage statistics by mysqlnd which can be +; used to tune and monitor MySQL operations. +; http://php.net/mysqlnd.collect_memory_statistics +mysqlnd.collect_memory_statistics = Off + +; Size of a pre-allocated buffer used when sending commands to MySQL in bytes. +; http://php.net/mysqlnd.net_cmd_buffer_size +;mysqlnd.net_cmd_buffer_size = 2048 + +; Size of a pre-allocated buffer used for reading data sent by the server in +; bytes. +; http://php.net/mysqlnd.net_read_buffer_size +;mysqlnd.net_read_buffer_size = 32768 + +[PostgreSQL] +; Allow or prevent persistent links. +; http://php.net/pgsql.allow-persistent +pgsql.allow_persistent = On + +; Detect broken persistent links always with pg_pconnect(). +; Auto reset feature requires a little overheads. +; http://php.net/pgsql.auto-reset-persistent +pgsql.auto_reset_persistent = Off + +; Maximum number of persistent links. -1 means no limit. +; http://php.net/pgsql.max-persistent +pgsql.max_persistent = -1 + +; Maximum number of links (persistent+non persistent). -1 means no limit. +; http://php.net/pgsql.max-links +pgsql.max_links = -1 + +; Ignore PostgreSQL backends Notice message or not. +; Notice message logging require a little overheads. +; http://php.net/pgsql.ignore-notice +pgsql.ignore_notice = 0 + +; Log PostgreSQL backends Notice message or not. +; Unless pgsql.ignore_notice=0, module cannot log notice message. +; http://php.net/pgsql.log-notice +pgsql.log_notice = 0 + +[bcmath] +; Number of decimal digits for all bcmath functions. +; http://php.net/bcmath.scale +bcmath.scale = 0 + +[browscap] +; http://php.net/browscap +;browscap = extra/browscap.ini + +[Session] +; Handler used to store/retrieve data. +; http://php.net/session.save-handler +session.save_handler = ${SESSION_HANDLER} + +; Argument passed to save_handler. In the case of files, this is the path +; where data files are stored. Note: Windows users have to change this +; variable in order to use PHP's session functions. +; +; The path can be defined as: +; +; session.save_path = "N;/path" +; +; where N is an integer. Instead of storing all the session files in +; /path, what this will do is use subdirectories N-levels deep, and +; store the session data in those directories. This is useful if +; your OS has problems with many files in one directory, and is +; a more efficient layout for servers that handle many sessions. +; +; NOTE 1: PHP will not create this directory structure automatically. +; You can use the script in the ext/session dir for that purpose. +; NOTE 2: See the section on garbage collection below if you choose to +; use subdirectories for session storage +; +; The file storage module creates files using mode 600 by default. +; You can change that by using +; +; session.save_path = "N;MODE;/path" +; +; where MODE is the octal representation of the mode. Note that this +; does not overwrite the process's umask. +; http://php.net/session.save-path + +; RPM note : session directory must be owned by process owner +; for mod_php, see /etc/httpd/conf.d/php.conf +; for php-fpm, see /etc/php-fpm.d/*conf +session.save_path = ${SESSION_PATH} + +; Whether to use strict session mode. +; Strict session mode does not accept uninitialized session ID and regenerate +; session ID if browser sends uninitialized session ID. Strict mode protects +; applications from session fixation via session adoption vulnerability. It is +; disabled by default for maximum compatibility, but enabling it is encouraged. +; https://wiki.php.net/rfc/strict_sessions +session.use_strict_mode = 0 + +; Whether to use cookies. +; http://php.net/session.use-cookies +session.use_cookies = 1 + +; http://php.net/session.cookie-secure +session.cookie_secure = ${SESSION_COOKIE_SECURE} + +; This option forces PHP to fetch and use a cookie for storing and maintaining +; the session id. We encourage this operation as it's very helpful in combating +; session hijacking when not specifying and managing your own session id. It is +; not the be-all and end-all of session hijacking defense, but it's a good start. +; http://php.net/session.use-only-cookies +session.use_only_cookies = 1 + +; Name of the session (used as cookie name). +; http://php.net/session.name +session.name = ${SESSION_NAME} + +; Initialize session on request startup. +; http://php.net/session.auto-start +session.auto_start = 0 + +; Lifetime in seconds of cookie or, if 0, until browser is restarted. +; http://php.net/session.cookie-lifetime +session.cookie_lifetime = 0 + +; The path for which the cookie is valid. +; http://php.net/session.cookie-path +session.cookie_path = / + +; The domain for which the cookie is valid. +; http://php.net/session.cookie-domain +session.cookie_domain = ${SESSION_COOKIE_DOMAIN} + +; Whether or not to add the httpOnly flag to the cookie, which makes it inaccessible to browser scripting languages such as JavaScript. +; http://php.net/session.cookie-httponly +session.cookie_httponly = ${SESSION_COOKIE_HTTPONLY} + +; Handler used to serialize data. php is the standard serializer of PHP. +; http://php.net/session.serialize-handler +session.serialize_handler = php + +; Defines the probability that the 'garbage collection' process is started +; on every session initialization. The probability is calculated by using +; gc_probability/gc_divisor. Where session.gc_probability is the numerator +; and gc_divisor is the denominator in the equation. Setting this value to 1 +; when the session.gc_divisor value is 100 will give you approximately a 1% chance +; the gc will run on any give request. +; Default Value: 1 +; Development Value: 1 +; Production Value: 1 +; http://php.net/session.gc-probability +session.gc_probability = 1 + +; Defines the probability that the 'garbage collection' process is started on every +; session initialization. The probability is calculated by using the following equation: +; gc_probability/gc_divisor. Where session.gc_probability is the numerator and +; session.gc_divisor is the denominator in the equation. Setting this value to 1 +; when the session.gc_divisor value is 100 will give you approximately a 1% chance +; the gc will run on any give request. Increasing this value to 1000 will give you +; a 0.1% chance the gc will run on any give request. For high volume production servers, +; this is a more efficient approach. +; Default Value: 100 +; Development Value: 1000 +; Production Value: 1000 +; http://php.net/session.gc-divisor +session.gc_divisor = 1000 + +; After this number of seconds, stored data will be seen as 'garbage' and +; cleaned up by the garbage collection process. +; http://php.net/session.gc-maxlifetime +session.gc_maxlifetime = 1440 + +; NOTE: If you are using the subdirectory option for storing session files +; (see session.save_path above), then garbage collection does *not* +; happen automatically. You will need to do your own garbage +; collection through a shell script, cron entry, or some other method. +; For example, the following script would is the equivalent of +; setting session.gc_maxlifetime to 1440 (1440 seconds = 24 minutes): +; find /path/to/sessions -cmin +24 -type f | xargs rm + +; Check HTTP Referer to invalidate externally stored URLs containing ids. +; HTTP_REFERER has to contain this substring for the session to be +; considered as valid. +; http://php.net/session.referer-check +session.referer_check = + +; How many bytes to read from the file. +; http://php.net/session.entropy-length +;session.entropy_length = 32 + +; Specified here to create the session id. +; http://php.net/session.entropy-file +; Defaults to /dev/urandom +; On systems that don't have /dev/urandom but do have /dev/arandom, this will default to /dev/arandom +; If neither are found at compile time, the default is no entropy file. +; On windows, setting the entropy_length setting will activate the +; Windows random source (using the CryptoAPI) +;session.entropy_file = /dev/urandom + +; Set to {nocache,private,public,} to determine HTTP caching aspects +; or leave this empty to avoid sending anti-caching headers. +; http://php.net/session.cache-limiter +session.cache_limiter = nocache + +; Document expires after n minutes. +; http://php.net/session.cache-expire +session.cache_expire = 180 + +; trans sid support is disabled by default. +; Use of trans sid may risk your users' security. +; Use this option with caution. +; - User may send URL contains active session ID +; to other person via. email/irc/etc. +; - URL that contains active session ID may be stored +; in publicly accessible computer. +; - User may access your site with the same session ID +; always using URL stored in browser's history or bookmarks. +; http://php.net/session.use-trans-sid +session.use_trans_sid = 0 + +; Select a hash function for use in generating session ids. +; Possible Values +; 0 (MD5 128 bits) +; 1 (SHA-1 160 bits) +; This option may also be set to the name of any hash function supported by +; the hash extension. A list of available hashes is returned by the hash_algos() +; function. +; http://php.net/session.hash-function +session.hash_function = 0 + +; Define how many bits are stored in each character when converting +; the binary hash data to something readable. +; Possible values: +; 4 (4 bits: 0-9, a-f) +; 5 (5 bits: 0-9, a-v) +; 6 (6 bits: 0-9, a-z, A-Z, "-", ",") +; Default Value: 4 +; Development Value: 5 +; Production Value: 5 +; http://php.net/session.hash-bits-per-character +session.hash_bits_per_character = 5 + +; The URL rewriter will look for URLs in a defined set of HTML tags. +; form/fieldset are special; if you include them here, the rewriter will +; add a hidden field with the info which is otherwise appended +; to URLs. If you want XHTML conformity, remove the form entry. +; Note that all valid entries require a "=", even if no value follows. +; Default Value: "a=href,area=href,frame=src,form=,fieldset=" +; Development Value: "a=href,area=href,frame=src,input=src,form=fakeentry" +; Production Value: "a=href,area=href,frame=src,input=src,form=fakeentry" +; http://php.net/url-rewriter.tags +url_rewriter.tags = "a=href,area=href,frame=src,input=src,form=fakeentry" + +; Enable upload progress tracking in $_SESSION +; Default Value: On +; Development Value: On +; Production Value: On +; http://php.net/session.upload-progress.enabled +;session.upload_progress.enabled = On + +; Cleanup the progress information as soon as all POST data has been read +; (i.e. upload completed). +; Default Value: On +; Development Value: On +; Production Value: On +; http://php.net/session.upload-progress.cleanup +;session.upload_progress.cleanup = On + +; A prefix used for the upload progress key in $_SESSION +; Default Value: "upload_progress_" +; Development Value: "upload_progress_" +; Production Value: "upload_progress_" +; http://php.net/session.upload-progress.prefix +;session.upload_progress.prefix = "upload_progress_" + +; The index name (concatenated with the prefix) in $_SESSION +; containing the upload progress information +; Default Value: "PHP_SESSION_UPLOAD_PROGRESS" +; Development Value: "PHP_SESSION_UPLOAD_PROGRESS" +; Production Value: "PHP_SESSION_UPLOAD_PROGRESS" +; http://php.net/session.upload-progress.name +;session.upload_progress.name = "PHP_SESSION_UPLOAD_PROGRESS" + +; How frequently the upload progress should be updated. +; Given either in percentages (per-file), or in bytes +; Default Value: "1%" +; Development Value: "1%" +; Production Value: "1%" +; http://php.net/session.upload-progress.freq +;session.upload_progress.freq = "1%" + +; The minimum delay between updates, in seconds +; Default Value: 1 +; Development Value: 1 +; Production Value: 1 +; http://php.net/session.upload-progress.min-freq +;session.upload_progress.min_freq = "1" + +[Assertion] +; Switch whether to compile assertions at all (to have no overhead at run-time) +; -1: Do not compile at all +; 0: Jump over assertion at run-time +; 1: Execute assertions +; Changing from or to a negative value is only possible in php.ini! (For turning assertions on and off at run-time, see assert.active, when zend.assertions = 1) +; Default Value: 1 +; Development Value: 1 +; Production Value: -1 +; http://php.net/zend.assertions +zend.assertions = -1 + +; Assert(expr); active by default. +; http://php.net/assert.active +;assert.active = On + +; Throw an AssertationException on failed assertions +; http://php.net/assert.exception +;assert.exception = On + +; Issue a PHP warning for each failed assertion. (Overridden by assert.exception if active) +; http://php.net/assert.warning +;assert.warning = On + +; Don't bail out by default. +; http://php.net/assert.bail +;assert.bail = Off + +; User-function to be called if an assertion fails. +; http://php.net/assert.callback +;assert.callback = 0 + +; Eval the expression with current error_reporting(). Set to true if you want +; error_reporting(0) around the eval(). +; http://php.net/assert.quiet-eval +;assert.quiet_eval = 0 + +[mbstring] +; language for internal character representation. +; This affects mb_send_mail() and mbstring.detect_order. +; http://php.net/mbstring.language +;mbstring.language = Japanese + +; Use of this INI entry is deprecated, use global internal_encoding instead. +; internal/script encoding. +; Some encoding cannot work as internal encoding. (e.g. SJIS, BIG5, ISO-2022-*) +; If empty, default_charset or internal_encoding or iconv.internal_encoding is used. +; The precedence is: default_charset < internal_encoding < iconv.internal_encoding +;mbstring.internal_encoding = + +; Use of this INI entry is deprecated, use global input_encoding instead. +; http input encoding. +; mbstring.encoding_traslation = On is needed to use this setting. +; If empty, default_charset or input_encoding or mbstring.input is used. +; The precedence is: default_charset < intput_encoding < mbsting.http_input +; http://php.net/mbstring.http-input +;mbstring.http_input = + +; Use of this INI entry is deprecated, use global output_encoding instead. +; http output encoding. +; mb_output_handler must be registered as output buffer to function. +; If empty, default_charset or output_encoding or mbstring.http_output is used. +; The precedence is: default_charset < output_encoding < mbstring.http_output +; To use an output encoding conversion, mbstring's output handler must be set +; otherwise output encoding conversion cannot be performed. +; http://php.net/mbstring.http-output +;mbstring.http_output = + +; enable automatic encoding translation according to +; mbstring.internal_encoding setting. Input chars are +; converted to internal encoding by setting this to On. +; Note: Do _not_ use automatic encoding translation for +; portable libs/applications. +; http://php.net/mbstring.encoding-translation +;mbstring.encoding_translation = Off + +; automatic encoding detection order. +; "auto" detect order is changed according to mbstring.language +; http://php.net/mbstring.detect-order +;mbstring.detect_order = auto + +; substitute_character used when character cannot be converted +; one from another +; http://php.net/mbstring.substitute-character +;mbstring.substitute_character = none + +; overload(replace) single byte functions by mbstring functions. +; mail(), ereg(), etc are overloaded by mb_send_mail(), mb_ereg(), +; etc. Possible values are 0,1,2,4 or combination of them. +; For example, 7 for overload everything. +; 0: No overload +; 1: Overload mail() function +; 2: Overload str*() functions +; 4: Overload ereg*() functions +; http://php.net/mbstring.func-overload +;mbstring.func_overload = 0 + +; enable strict encoding detection. +; Default: Off +;mbstring.strict_detection = On + +; This directive specifies the regex pattern of content types for which mb_output_handler() +; is activated. +; Default: mbstring.http_output_conv_mimetype=^(text/|application/xhtml\+xml) +;mbstring.http_output_conv_mimetype= + +[gd] +; Tell the jpeg decode to ignore warnings and try to create +; a gd image. The warning will then be displayed as notices +; disabled by default +; http://php.net/gd.jpeg-ignore-warning +;gd.jpeg_ignore_warning = 0 + +[exif] +; Exif UNICODE user comments are handled as UCS-2BE/UCS-2LE and JIS as JIS. +; With mbstring support this will automatically be converted into the encoding +; given by corresponding encode setting. When empty mbstring.internal_encoding +; is used. For the decode settings you can distinguish between motorola and +; intel byte order. A decode setting cannot be empty. +; http://php.net/exif.encode-unicode +;exif.encode_unicode = ISO-8859-15 + +; http://php.net/exif.decode-unicode-motorola +;exif.decode_unicode_motorola = UCS-2BE + +; http://php.net/exif.decode-unicode-intel +;exif.decode_unicode_intel = UCS-2LE + +; http://php.net/exif.encode-jis +;exif.encode_jis = + +; http://php.net/exif.decode-jis-motorola +;exif.decode_jis_motorola = JIS + +; http://php.net/exif.decode-jis-intel +;exif.decode_jis_intel = JIS + +[Tidy] +; The path to a default tidy configuration file to use when using tidy +; http://php.net/tidy.default-config +;tidy.default_config = /usr/local/lib/php/default.tcfg + +; Should tidy clean and repair output automatically? +; WARNING: Do not use this option if you are generating non-html content +; such as dynamic images +; http://php.net/tidy.clean-output +tidy.clean_output = Off + +[soap] +; Enables or disables WSDL caching feature. +; http://php.net/soap.wsdl-cache-enabled +soap.wsdl_cache_enabled=1 + +; Sets the directory name where SOAP extension will put cache files. +; http://php.net/soap.wsdl-cache-dir + +; RPM note : cache directory must be owned by process owner +; for mod_php, see /etc/httpd/conf.d/php.conf +; for php-fpm, see /etc/php-fpm.d/*conf +soap.wsdl_cache_dir="/tmp" + +; (time to live) Sets the number of second while cached file will be used +; instead of original one. +; http://php.net/soap.wsdl-cache-ttl +soap.wsdl_cache_ttl=86400 + +; Sets the size of the cache limit. (Max. number of WSDL files to cache) +soap.wsdl_cache_limit = 5 + +[sysvshm] +; A default size of the shared memory segment +;sysvshm.init_mem = 10000 + +[ldap] +; Sets the maximum number of open links or -1 for unlimited. +ldap.max_links = -1 + +[dba] +;dba.default_handler= + +[curl] +; A default value for the CURLOPT_CAINFO option. This is required to be an +; absolute path. +;curl.cainfo = + +[openssl] +; The location of a Certificate Authority (CA) file on the local filesystem +; to use when verifying the identity of SSL/TLS peers. Most users should +; not specify a value for this directive as PHP will attempt to use the +; OS-managed cert stores in its absence. If specified, this value may still +; be overridden on a per-stream basis via the "cafile" SSL stream context +; option. +;openssl.cafile= + +; If openssl.cafile is not specified or if the CA file is not found, the +; directory pointed to by openssl.capath is searched for a suitable +; certificate. This value must be a correctly hashed certificate directory. +; Most users should not specify a value for this directive as PHP will +; attempt to use the OS-managed cert stores in its absence. If specified, +; this value may still be overridden on a per-stream basis via the "capath" +; SSL stream context option. +;openssl.capath= + +; Local Variables: +; tab-width: 4 +; End: diff --git a/8.1/root/usr/libexec/container-setup b/8.1/root/usr/libexec/container-setup new file mode 100755 index 000000000..4cc645f39 --- /dev/null +++ b/8.1/root/usr/libexec/container-setup @@ -0,0 +1,70 @@ +#!/bin/bash + +set -e + +# In order to drop the root user, we have to make some directories world +# writeable as OpenShift default security model is to run the container under +# random UID. + +source ${PHP_CONTAINER_SCRIPTS_PATH}/common.sh + +# compatibility symlinks so we hide SCL paths +if [ -v SCL_ENABLED ] ; then + # /opt/rh/httpd24/root/etc/httpd will be symlink to /etc/httpd + mv /opt/rh/httpd24/root/etc/httpd /etc/httpd + ln -s /etc/httpd /opt/rh/httpd24/root/etc/httpd + + # /opt/rh/httpd24/root/var/run/httpd will be symlink to /var/run/httpd + mv /opt/rh/httpd24/root/var/run/httpd /var/run/httpd + ln -s /var/run/httpd /opt/rh/httpd24/root/var/run/httpd + + # /opt/rh/httpd24/root/var/www will be symlink to /var/www + rm -rf /var/www + mv ${HTTPD_DATA_ORIG_PATH} /var/www + ln -s /var/www ${HTTPD_DATA_ORIG_PATH} +else + rm -f /opt/app-root/etc/scl_enable +fi + +if head "/etc/redhat-release" | grep -q -e "^Red Hat Enterprise Linux release 8" -e "^Red Hat Enterprise Linux release 9" -e "Fedora" -e "^CentOS Stream release 9"; then + /usr/libexec/httpd-ssl-gencerts +fi + +mkdir -p ${HTTPD_CONFIGURATION_PATH} +chmod -R a+rwx ${HTTPD_MAIN_CONF_PATH} +chmod -R a+rwx ${HTTPD_MAIN_CONF_D_PATH} +chmod -R ug+r /etc/pki/tls/certs/localhost.crt +chmod -R ug+r /etc/pki/tls/private/localhost.key +chown -R 1001:0 /etc/pki/tls/certs/localhost.crt +chown -R 1001:0 /etc/pki/tls/private/localhost.key +mkdir -p ${APP_ROOT}/etc +chmod -R a+rwx ${APP_ROOT}/etc +chmod -R a+rwx ${HTTPD_VAR_RUN} +chown -R 1001:0 ${APP_ROOT} +mkdir -p /tmp/sessions +chmod -R a+rwx /tmp/sessions +chown -R 1001:0 /tmp/sessions +chown -R 1001:0 ${HTTPD_DATA_PATH} +if [ "$PLATFORM" == "el7" ]; then + # Only run for el7 as this essentially becomes "chmod -R /etc" outside of SCL-based images + chmod -R a+rwx "${PHP_SYSCONF_PATH}" +else + chmod a+rwx "${PHP_SYSCONF_PATH}/php.ini" + chmod -R a+rwx "${PHP_SYSCONF_PATH}/php.d" + chmod -R a+rwx "${PHP_SYSCONF_PATH}/php-fpm.d" +fi + +if [ "x$PLATFORM" == "xel9" ] || [ "x$PLATFORM" == "xfedora" ]; then + if [ -v PHP_FPM_RUN_DIR ]; then + mkdir -p ${PHP_FPM_RUN_DIR} + chmod -R a+rwx ${PHP_FPM_RUN_DIR} + chown -R 1001:0 ${PHP_FPM_RUN_DIR} + mkdir -p ${PHP_FPM_LOG_PATH} + chmod -R a+rwx ${PHP_FPM_LOG_PATH} + chown -R 1001:0 ${PHP_FPM_LOG_PATH} + fi +fi + +mkdir -p ${PHP_CONTAINER_SCRIPTS_PATH}/pre-init + +config_general diff --git a/8.1/root/usr/share/container-scripts/php/common.sh b/8.1/root/usr/share/container-scripts/php/common.sh new file mode 100644 index 000000000..2145dfcca --- /dev/null +++ b/8.1/root/usr/share/container-scripts/php/common.sh @@ -0,0 +1,166 @@ +if [ "x$PLATFORM" == "xel7" ]; then + HTTPCONF_LINENO=151 +elif head "/etc/redhat-release" | grep -q "^Fedora"; then + HTTPCONF_LINENO=156 +elif [ "x$PLATFORM" == "xel9" ]; then + HTTPCONF_LINENO=156 +else + HTTPCONF_LINENO=154 +fi + +config_httpd_conf() { + sed -i "s/^Listen 80/Listen 0.0.0.0:8080/" ${HTTPD_MAIN_CONF_PATH}/httpd.conf + sed -i "s/^User apache/User default/" ${HTTPD_MAIN_CONF_PATH}/httpd.conf + sed -i "s/^Group apache/Group root/" ${HTTPD_MAIN_CONF_PATH}/httpd.conf + sed -i "s%^DocumentRoot \"${HTTPD_DATA_ORIG_PATH}/html\"%#DocumentRoot \"${APP_DATA}\"%" ${HTTPD_MAIN_CONF_PATH}/httpd.conf + sed -i "s%^ "${HTTPD_MODULES_CONF_D_PATH}/00-mpm.conf" + else + # overwrite default rhel-8 mpm mode + echo "LoadModule mpm_prefork_module modules/mod_mpm_prefork.so" > "${HTTPD_MODULES_CONF_D_PATH}/00-mpm.conf" + fi +} + +config_general() { + config_httpd_conf + config_ssl_conf + config_modules_conf + if [ -d "/run/php-fpm" ]; then + sed -i -E "/php_value\[session.save_path\]/d" ${PHP_FPM_CONF_D_PATH}/${PHP_FPM_CONF_FILE} + sed -e '/catch_workers_output/d' -e '/error_log/d' -i ${PHP_FPM_CONF_D_PATH}/${PHP_FPM_CONF_FILE} + sed -e 's/^(clear_env)\s+.*/clear_env = no/' -i ${PHP_FPM_CONF_D_PATH}/${PHP_FPM_CONF_FILE} + else + sed -i '/php_value session.save_/d' ${HTTPD_MAIN_CONF_D_PATH}/${PHP_HTTPD_CONF_FILE} + fi + head -n${HTTPCONF_LINENO} ${HTTPD_MAIN_CONF_PATH}/httpd.conf | tail -n1 | grep "AllowOverride All" || exit 1 + echo "IncludeOptional ${APP_ROOT}/etc/conf.d/*.conf" >> ${HTTPD_MAIN_CONF_PATH}/httpd.conf +} + +function log_info { + echo "---> `date +%T` $@" +} + +function log_and_run { + log_info "Running $@" + "$@" +} + +function log_volume_info { + CONTAINER_DEBUG=${CONTAINER_DEBUG:-} + if [[ "${CONTAINER_DEBUG,,}" != "true" ]]; then + return + fi + + log_info "Volume info for $@:" + set +e + log_and_run mount + while [ $# -gt 0 ]; do + log_and_run ls -alZ $1 + shift + done + set -e +} + +# get_matched_files finds file for image extending +function get_matched_files() { + local custom_dir default_dir + custom_dir="$1" + default_dir="$2" + files_matched="$3" + find "$default_dir" -maxdepth 1 -type f -name "$files_matched" -printf "%f\n" + [ -d "$custom_dir" ] && find "$custom_dir" -maxdepth 1 -type f -name "$files_matched" -printf "%f\n" +} + +# process_extending_files process extending files in $1 and $2 directories +# - source all *.sh files +# (if there are files with same name source only file from $1) +function process_extending_files() { + local custom_dir default_dir + custom_dir=$1 + default_dir=$2 + + while read filename ; do + echo "=> sourcing $filename ..." + # Custom file is prefered + if [ -f $custom_dir/$filename ]; then + source $custom_dir/$filename + elif [ -f $default_dir/$filename ]; then + source $default_dir/$filename + fi + done <<<"$(get_matched_files "$custom_dir" "$default_dir" '*.sh' | sort -u)" +} + +# process extending config files in $1 and $2 directories +# - expand variables in *.conf and copy the files into /opt/app-root/etc/httpd.d directory +# (if there are files with same name source only file from $1) +function process_extending_config_files() { + local custom_dir default_dir + custom_dir=$1 + default_dir=$2 + + while read filename ; do + echo "=> sourcing $filename ..." + # Custom file is prefered + if [ -f $custom_dir/$filename ]; then + envsubst < $custom_dir/$filename > ${HTTPD_CONFIGURATION_PATH}/$filename + elif [ -f $default_dir/$filename ]; then + envsubst < $default_dir/$filename > ${HTTPD_CONFIGURATION_PATH}/$filename + fi + done <<<"$(get_matched_files "$custom_dir" "$default_dir" '*.conf' | sort -u)" +} + +# Copy config files from application to the location where httpd expects them +# Param sets the directory where to look for files +# This function was taken from httpd container +process_config_files() { + local dir=${1:-.} + if [ -d ${dir}/httpd-cfg ]; then + echo "---> Copying httpd configuration files..." + if [ "$(ls -A ${dir}/httpd-cfg/*.conf)" ]; then + cp -v ${dir}/httpd-cfg/*.conf "${HTTPD_CONFIGURATION_PATH}"/ + rm -rf ${dir}/httpd-cfg + fi + fi +} + +# Copy SSL files provided in application source +# This function was taken from httpd container +process_ssl_certs() { + local dir=${1:-.} + if [ -d ${dir}/httpd-ssl/private ] && [ -d ${dir}/httpd-ssl/certs ]; then + echo "---> Looking for SSL certs for httpd..." + cp -r ${dir}/httpd-ssl ${APP_ROOT} + local ssl_cert="$(ls -A ${APP_ROOT}/httpd-ssl/certs/*.pem | head -n 1)" + local ssl_private="$(ls -A ${APP_ROOT}/httpd-ssl/private/*.pem | head -n 1)" + if [ -f "${ssl_cert}" ] ; then + # do sed for SSLCertificateFile and SSLCertificateKeyFile + echo "---> Setting SSL cert file for httpd..." + sed -i -e "s|^SSLCertificateFile .*$|SSLCertificateFile ${ssl_cert}|" ${HTTPD_MAIN_CONF_D_PATH}/ssl.conf + if [ -f "${ssl_private}" ]; then + echo "---> Setting SSL key file for httpd..." + sed -i -e "s|^SSLCertificateKeyFile .*$|SSLCertificateKeyFile ${ssl_private}|" ${HTTPD_MAIN_CONF_D_PATH}/ssl.conf + else + echo "---> Removing SSL key file settings for httpd..." + sed -i '/^SSLCertificateKeyFile .*/d' ${HTTPD_MAIN_CONF_D_PATH}/ssl.conf + fi + fi + rm -rf ${dir}/httpd-ssl + fi +} + diff --git a/8.1/root/usr/share/container-scripts/php/httpd-cnf/00-documentroot.conf b/8.1/root/usr/share/container-scripts/php/httpd-cnf/00-documentroot.conf new file mode 100644 index 000000000..bd3600f09 --- /dev/null +++ b/8.1/root/usr/share/container-scripts/php/httpd-cnf/00-documentroot.conf @@ -0,0 +1 @@ +DocumentRoot "/opt/app-root/src${DOCUMENTROOT}" diff --git a/8.1/root/usr/share/container-scripts/php/httpd-cnf/50-mpm-tuning.conf b/8.1/root/usr/share/container-scripts/php/httpd-cnf/50-mpm-tuning.conf new file mode 100644 index 000000000..bfe131c10 --- /dev/null +++ b/8.1/root/usr/share/container-scripts/php/httpd-cnf/50-mpm-tuning.conf @@ -0,0 +1,12 @@ + + # This value should mirror what is set in MinSpareServers. + StartServers ${HTTPD_START_SERVERS} + MinSpareServers ${HTTPD_START_SERVERS} + MaxSpareServers ${HTTPD_MAX_SPARE_SERVERS} + # The MaxRequestWorkers directive sets the limit on the number of simultaneous requests that will be served. + # The default value, when no Cgroup limits are set is 256. + MaxRequestWorkers ${HTTPD_MAX_REQUEST_WORKERS} + ServerLimit ${HTTPD_MAX_REQUEST_WORKERS} + MaxRequestsPerChild ${HTTPD_MAX_REQUESTS_PER_CHILD} + MaxKeepAliveRequests ${HTTPD_MAX_KEEPALIVE_REQUESTS} + diff --git a/8.1/root/usr/share/container-scripts/php/post-assemble/20-copy-config.sh b/8.1/root/usr/share/container-scripts/php/post-assemble/20-copy-config.sh new file mode 100644 index 000000000..cd6a356af --- /dev/null +++ b/8.1/root/usr/share/container-scripts/php/post-assemble/20-copy-config.sh @@ -0,0 +1,6 @@ +# additional arbitrary httpd configuration provided by user using s2i + +log_info 'Processing additional arbitrary httpd configuration provided by s2i ...' + +process_extending_config_files ${APP_DATA}/httpd-cfg/ ${PHP_CONTAINER_SCRIPTS_PATH}/httpd-cnf/ + diff --git a/8.1/root/usr/share/container-scripts/php/post-assemble/40-ssl-certs.sh b/8.1/root/usr/share/container-scripts/php/post-assemble/40-ssl-certs.sh new file mode 100644 index 000000000..a003826a2 --- /dev/null +++ b/8.1/root/usr/share/container-scripts/php/post-assemble/40-ssl-certs.sh @@ -0,0 +1,4 @@ +source ${PHP_CONTAINER_SCRIPTS_PATH}/common.sh + +# Copy SSL files provided in application source +process_ssl_certs diff --git a/8.1/root/usr/share/container-scripts/php/pre-start/20-copy-config.sh b/8.1/root/usr/share/container-scripts/php/pre-start/20-copy-config.sh new file mode 100644 index 000000000..cd6a356af --- /dev/null +++ b/8.1/root/usr/share/container-scripts/php/pre-start/20-copy-config.sh @@ -0,0 +1,6 @@ +# additional arbitrary httpd configuration provided by user using s2i + +log_info 'Processing additional arbitrary httpd configuration provided by s2i ...' + +process_extending_config_files ${APP_DATA}/httpd-cfg/ ${PHP_CONTAINER_SCRIPTS_PATH}/httpd-cnf/ + diff --git a/8.1/root/usr/share/container-scripts/php/pre-start/40-ssl-certs.sh b/8.1/root/usr/share/container-scripts/php/pre-start/40-ssl-certs.sh new file mode 100644 index 000000000..541bbce40 --- /dev/null +++ b/8.1/root/usr/share/container-scripts/php/pre-start/40-ssl-certs.sh @@ -0,0 +1,4 @@ +source ${PHP_CONTAINER_SCRIPTS_PATH}/common.sh + +# Copy SSL files provided in application source +process_ssl_certs ${APP_ROOT}/src diff --git a/8.1/root/usr/share/container-scripts/php/pre-start/60-fpm.sh b/8.1/root/usr/share/container-scripts/php/pre-start/60-fpm.sh new file mode 100644 index 000000000..f263fd5fc --- /dev/null +++ b/8.1/root/usr/share/container-scripts/php/pre-start/60-fpm.sh @@ -0,0 +1,6 @@ +if [ -d "/run/php-fpm" ]; then + log_info "Starting FPM - pre-start" + /usr/sbin/php-fpm --daemonize +else + log_info "Using mod_php" +fi diff --git a/8.1/s2i/bin/assemble b/8.1/s2i/bin/assemble new file mode 100755 index 000000000..26c41b382 --- /dev/null +++ b/8.1/s2i/bin/assemble @@ -0,0 +1,73 @@ +#!/bin/bash + +set -e + +source ${PHP_CONTAINER_SCRIPTS_PATH}/common.sh + +shopt -s dotglob +echo "---> Installing application source..." +rm -fR /tmp/src/.git +mv /tmp/src/* ./ + +# Fix source directory permissions +fix-permissions ./ +fix-permissions ${HTTPD_CONFIGURATION_PATH} + +# Change the npm registry mirror if provided +if [ -n "$NPM_MIRROR" ]; then + npm config set registry $NPM_MIRROR +fi + +if [ -f composer.json ]; then + echo "Found 'composer.json', installing dependencies using composer.phar... " + + # Install Composer + TEMPFILE=$(mktemp) + RETRIES=6 + for ((i=0; i<$RETRIES; i++)); do + + if [ -z "$COMPOSER_INSTALLER" ]; then + export COMPOSER_INSTALLER="https://getcomposer.org/installer" + fi + + echo "Downloading $COMPOSER_INSTALLER, attempt $((i+1))/$RETRIES" + curl -o $TEMPFILE $COMPOSER_INSTALLER && break + sleep 10 + done + if [[ $i == $RETRIES ]]; then + echo "Download failed, giving up." + exit 1 + fi + + if [ -z $COMPOSER_VERSION ]; then + echo "By default, latest composer will be used. If you want to use v1 please use the environment variable COMPOSER_VERSION=1" + php <$TEMPFILE + else + echo "You set the COMPOSER_VERSION" + php <$TEMPFILE -- --$COMPOSER_VERSION + fi + + if [ "$(ls -a /tmp/artifacts/ 2>/dev/null)" ]; then + echo "Restoring build artifacts" + mv /tmp/artifacts/* $HOME/ + fi + + # Change the repo mirror if provided + if [ -n "$COMPOSER_MIRROR" ]; then + ./composer.phar config -g repositories.packagist composer $COMPOSER_MIRROR + fi + + # Install App dependencies using Composer + ./composer.phar install --no-interaction --no-ansi --optimize-autoloader $COMPOSER_ARGS + + if [ ! -f composer.lock ]; then + echo -e "\nConsider adding a 'composer.lock' file into your source repository.\n" + fi +fi + +# post-assemble files +process_extending_files ./php-post-assemble/ ${PHP_CONTAINER_SCRIPTS_PATH}/post-assemble/ + +# Fix source directory permissions +fix-permissions ./ +fix-permissions ${HTTPD_CONFIGURATION_PATH} diff --git a/8.1/s2i/bin/run b/8.1/s2i/bin/run new file mode 100755 index 000000000..b4d973831 --- /dev/null +++ b/8.1/s2i/bin/run @@ -0,0 +1,75 @@ +#!/bin/bash + +source ${PHP_CONTAINER_SCRIPTS_PATH}/common.sh + +export_vars=$(cgroup-limits); export $export_vars +export DOCUMENTROOT=${DOCUMENTROOT:-/} + +# Default php.ini configuration values, all taken +# from php defaults. +export ERROR_REPORTING=${ERROR_REPORTING:-E_ALL & ~E_NOTICE} +export DISPLAY_ERRORS=${DISPLAY_ERRORS:-ON} +export DISPLAY_STARTUP_ERRORS=${DISPLAY_STARTUP_ERRORS:-OFF} +export TRACK_ERRORS=${TRACK_ERRORS:-OFF} +export HTML_ERRORS=${HTML_ERRORS:-ON} +export INCLUDE_PATH=${INCLUDE_PATH:-.:/opt/app-root/src:${PHP_DEFAULT_INCLUDE_PATH}} +export PHP_MEMORY_LIMIT=${PHP_MEMORY_LIMIT:-128M} +export SESSION_NAME=${SESSION_NAME:-PHPSESSID} +export SESSION_HANDLER=${SESSION_HANDLER:-files} +export SESSION_PATH=${SESSION_PATH:-/tmp/sessions} +export SESSION_COOKIE_DOMAIN=${SESSION_COOKIE_DOMAIN:-} +export SESSION_COOKIE_HTTPONLY=${SESSION_COOKIE_HTTPONLY:-} +export SESSION_COOKIE_SECURE=${SESSION_COOKIE_SECURE:-0} +export SHORT_OPEN_TAG=${SHORT_OPEN_TAG:-OFF} + +# TODO should be dynamically calculated based on container memory limit/16 +export OPCACHE_MEMORY_CONSUMPTION=${OPCACHE_MEMORY_CONSUMPTION:-128} + +export OPCACHE_REVALIDATE_FREQ=${OPCACHE_REVALIDATE_FREQ:-2} +export OPCACHE_MAX_FILES=${OPCACHE_MAX_FILES:-4000} + +export PHPRC=${PHPRC:-${PHP_SYSCONF_PATH}/php.ini} +export PHP_INI_SCAN_DIR=${PHP_INI_SCAN_DIR:-${PHP_SYSCONF_PATH}/php.d} + +envsubst < /opt/app-root/etc/php.ini.template > ${PHP_SYSCONF_PATH}/php.ini +envsubst < /opt/app-root/etc/php.d/10-opcache.ini.template > ${PHP_SYSCONF_PATH}/php.d/10-opcache.ini + +export HTTPD_START_SERVERS=${HTTPD_START_SERVERS:-8} +export HTTPD_MAX_SPARE_SERVERS=$((HTTPD_START_SERVERS+10)) +export HTTPD_MAX_REQUESTS_PER_CHILD=${HTTPD_MAX_REQUESTS_PER_CHILD:-4000} +export HTTPD_MAX_KEEPALIVE_REQUESTS=${HTTPD_MAX_KEEPALIVE_REQUESTS:-100} + +if [ -n "${NO_MEMORY_LIMIT:-}" -o -z "${MEMORY_LIMIT_IN_BYTES:-}" ]; then + # + export HTTPD_MAX_REQUEST_WORKERS=${HTTPD_MAX_REQUEST_WORKERS:-256} +else + # A simple calculation for MaxRequestWorkers would be: Total Memory / Size Per Apache process. + # The total memory is determined from the Cgroups and the average size for the + # Apache process is estimated to 15MB. + max_clients_computed=$((MEMORY_LIMIT_IN_BYTES/1024/1024/15)) + # The MaxClients should never be lower than StartServers, which is set to 5. + # In case the container has memory limit set to <64M we pin the MaxClients to 4. + [[ $max_clients_computed -le 4 ]] && max_clients_computed=4 + export HTTPD_MAX_REQUEST_WORKERS=${HTTPD_MAX_REQUEST_WORKERS:-$max_clients_computed} + echo "-> Cgroups memory limit is set, using HTTPD_MAX_REQUEST_WORKERS=${HTTPD_MAX_REQUEST_WORKERS}" +fi + + + +if [ "x$PLATFORM" == "xel9" ] || [ "x$PLATFORM" == "xfedora" ]; then + if [ -n "${PHP_FPM_RUN_DIR:-}" ]; then + /bin/ln -s /dev/stderr ${PHP_FPM_LOG_PATH}/error.log + mkdir -p ${PHP_FPM_RUN_DIR} + chmod -R a+rwx ${PHP_FPM_RUN_DIR} + chown -R 1001:0 ${PHP_FPM_RUN_DIR} + mkdir -p ${PHP_FPM_LOG_PATH} + chmod -R a+rwx ${PHP_FPM_LOG_PATH} + chown -R 1001:0 ${PHP_FPM_LOG_PATH} + fi + +fi + +# pre-start files +process_extending_files ${APP_DATA}/php-pre-start/ ${PHP_CONTAINER_SCRIPTS_PATH}/pre-start/ + +exec httpd -D FOREGROUND diff --git a/8.1/s2i/bin/save-artifacts b/8.1/s2i/bin/save-artifacts new file mode 100755 index 000000000..7e4773a95 --- /dev/null +++ b/8.1/s2i/bin/save-artifacts @@ -0,0 +1,4 @@ +#!/bin/sh +pushd ${HOME} >/dev/null +tar cf - vendor +popd >/dev/null diff --git a/8.1/s2i/bin/usage b/8.1/s2i/bin/usage new file mode 100755 index 000000000..b3fa8e1df --- /dev/null +++ b/8.1/s2i/bin/usage @@ -0,0 +1,21 @@ +#!/bin/sh + +DISTRO=`cat /etc/*-release | grep ^ID= | grep -Po '".*?"' | tr -d '"'` +NAMESPACE=centos +[[ $DISTRO =~ rhel* ]] && NAMESPACE=rhscl + +cat < -- curl 127.0.0.1:8080 + +Alternatively, to run the image directly using podman or docker, or how to use it as a parent image in a Dockerfile, see documentation at +https://github.com/sclorg/s2i-php-container/blob/master/${PHP_VERSION}/README.md +EOF diff --git a/8.1/test/examples/from-dockerfile/.gitignore b/8.1/test/examples/from-dockerfile/.gitignore new file mode 100644 index 000000000..563e35396 --- /dev/null +++ b/8.1/test/examples/from-dockerfile/.gitignore @@ -0,0 +1 @@ +app-src diff --git a/8.1/test/examples/from-dockerfile/Dockerfile b/8.1/test/examples/from-dockerfile/Dockerfile new file mode 100644 index 000000000..f9abc360c --- /dev/null +++ b/8.1/test/examples/from-dockerfile/Dockerfile @@ -0,0 +1,24 @@ +FROM ubi8/php-73 + +USER 0 +# Add application sources +ADD app-src . +RUN chown -R 1001:0 . +USER 1001 + +# Install the dependencies +RUN TEMPFILE=$(mktemp) && \ + curl -o "$TEMPFILE" "https://getcomposer.org/installer" && \ + php <"$TEMPFILE" && \ + ./composer.phar install --no-interaction --no-ansi --optimize-autoloader + +# Run script uses standard ways to configure the PHP application +# and execs httpd -D FOREGROUND at the end +# See more in /s2i/bin/run in this repository. +# Shortly what the run script does: The httpd daemon and php needs to be +# configured, so this script prepares the configuration based on the container +# parameters (e.g. available memory) and puts the configuration files into +# the approriate places. +# This can obviously be done differently, and in that case, the final CMD +# should be set to "CMD httpd -D FOREGROUND" instead. +CMD /usr/libexec/s2i/run diff --git a/8.1/test/examples/from-dockerfile/Dockerfile.s2i b/8.1/test/examples/from-dockerfile/Dockerfile.s2i new file mode 100644 index 000000000..f507c7afc --- /dev/null +++ b/8.1/test/examples/from-dockerfile/Dockerfile.s2i @@ -0,0 +1,25 @@ +FROM registry.access.redhat.com/ubi8/php-73 + +# This image supports the Source-to-Image +# (see more at https://docs.openshift.com/container-platform/3.11/creating_images/s2i.html). +# In order to support the Source-to-Image framework, there are some interesting +# scripts inside the builder image, that can be run in a Dockerfile directly as well: +# * The `/usr/libexec/s2i/assemble` script inside the image is run in order +# to produce a new image with the application artifacts. +# The script takes sources of a given application and places them into +# appropriate directories inside the image. +# * The `/usr/libexec/s2i/run` script executes the application and is set as +# a default command in the resulting container image. + +# Add application sources to a directory that the assemble script expects them +# and set permissions so that the container runs without root access +USER 0 +ADD app-src /tmp/src +RUN chown -R 1001:0 /tmp/src +USER 1001 + +# Let the assemble script to install the dependencies +RUN /usr/libexec/s2i/assemble + +# Run script uses standard ways to run the application +CMD /usr/libexec/s2i/run diff --git a/8.1/test/examples/from-dockerfile/README.md b/8.1/test/examples/from-dockerfile/README.md new file mode 100644 index 000000000..def2b7c3a --- /dev/null +++ b/8.1/test/examples/from-dockerfile/README.md @@ -0,0 +1,22 @@ +Dockerfile examples +=================== + +This directory contains example Dockerfiles that demonstrate how to use the image with a Dockerfile and `podman build`. + +For demonstration, we use an application code available at https://github.com/sclorg/cakephp-ex.git. + +Pull the source to the local machine first: +``` +git clone https://github.com/sclorg/cakephp-ex.git app-src +``` + +Then, build a new image from a Dockerfile in this directory: +``` +podman build -f Dockerfile -t cakephp-app . +``` + +And run the resulting image with the final application: +``` +podman run -ti --rm cakephp-app +``` + diff --git a/8.1/test/examples/self-signed-ssl/httpd-ssl/certs/server-cert-selfsigned.pem b/8.1/test/examples/self-signed-ssl/httpd-ssl/certs/server-cert-selfsigned.pem new file mode 100644 index 000000000..8495b8847 --- /dev/null +++ b/8.1/test/examples/self-signed-ssl/httpd-ssl/certs/server-cert-selfsigned.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIJAI4x7HuBG49oMA0GCSqGSIb3DQEBCwUAMEIxCzAJBgNV +BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg +Q29tcGFueSBMdGQwHhcNMTcxMjAzMjMzMzU3WhcNMTgwMTAyMjMzMzU3WjBCMQsw +CQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZh +dWx0IENvbXBhbnkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +vH4Vdq0a3UWUQd8Z6s2csxhxjAOyUx0rszGL0m3uTjQido6JRBdjN2dXiZc3LFoq +YeOKR3CeHsn7UdrlzaboHFDfjAaextse0740mB1g14H1bAS0POuTPeKa+3wGfzCb +sTSXnfSrICl3n2D/3KSO93WwmS90kBD6HmKt5nfkLpJnROM/4bHmuoV0Ry8CDjzj +mka7pQU4yzyMKLU3sHpncZU6g7o4Vezic9ksVzIAbdPCSbF7ktVz/hisyCuzyKN6 +s2327jq593vBgGOsNU5PDPDjKW74Q0Bv/FxPK4nx+o4IkcRW1QEb+yAx8XOM7CDZ +ViKvI/A0b+Y4Y3rIQ465+wIDAQABo1MwUTAdBgNVHQ4EFgQUAY1i6ZNbqO1+46aw +pldCyPaWoYswHwYDVR0jBBgwFoAUAY1i6ZNbqO1+46awpldCyPaWoYswDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEADhGjnYGq9JvQcygMYEQiIdyS +t06Nu7NUkWz52GJp7WFognWyG+0jAomBR0GSUchfubvVZ7cHIaVKLhiGOqg+HIol +7tNRfvE6x/Idk674g6OTRAWxO/wOlgnRMpRy6XhHOtb4HcPcpWFZJS8MC8+HRWIs +kzMErXe0/obnKn9O04kcEREfmB7kfcD4ooqk5gwbdQk1W6a44LcN6AB5qYPjOzgF +Qnb2aLQW9XhgNhiMsYqDzCZsy0az0rz7NgkVOnKrGJ8x3kVX13GR2joVVHOazms9 +Gd90z+mLMDTbqCRGIPMLvEp4HtAmBxbgsj/zHyinajIqV96B3Cr3zTdW29lHJg== +-----END CERTIFICATE----- diff --git a/8.1/test/examples/self-signed-ssl/httpd-ssl/private/server-key.pem b/8.1/test/examples/self-signed-ssl/httpd-ssl/private/server-key.pem new file mode 100644 index 000000000..ff2ac89c4 --- /dev/null +++ b/8.1/test/examples/self-signed-ssl/httpd-ssl/private/server-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC8fhV2rRrdRZRB +3xnqzZyzGHGMA7JTHSuzMYvSbe5ONCJ2jolEF2M3Z1eJlzcsWiph44pHcJ4eyftR +2uXNpugcUN+MBp7G2x7TvjSYHWDXgfVsBLQ865M94pr7fAZ/MJuxNJed9KsgKXef +YP/cpI73dbCZL3SQEPoeYq3md+QukmdE4z/hsea6hXRHLwIOPOOaRrulBTjLPIwo +tTewemdxlTqDujhV7OJz2SxXMgBt08JJsXuS1XP+GKzIK7PIo3qzbfbuOrn3e8GA +Y6w1Tk8M8OMpbvhDQG/8XE8rifH6jgiRxFbVARv7IDHxc4zsINlWIq8j8DRv5jhj +eshDjrn7AgMBAAECggEARZxeutxE/pCypv0IqkFS7IVLccTvt2gfemcC1yzIBFOW +oqgTI3Vrq8tbdbHFq3iFDG+m4qlBi+dWDC3GDoPkVoi7dg//1TqZEOO+sqqu2Afj +pge6tIDfeMxWJifwkkpWRURB9hCknhUSW2bMNyUCs3rgREJVTtsmM9CHnoSKXXQL +aOeYXalFVpx3ceK+xdp0VGfpsqEabBKs0yy3EDiQy2huoWce3EVFLVrwx/IkhcsZ +JlI5LPpoiTglSs1g9i88JHS2slBtKtb1lWl/yXHhK1g7s34c6f9jP8snuFE5ddMn +0L4GDA9teaPGvB533eb2RIFy2kUYgpr5c03G6rpoOQKBgQDpY6BFJkPGENnC5Bdb +fJCuN2nyRdC1qvv6ESFaQYb0s6QjKDqpb0dUSYN3+zNgtiAysbQLeU/d9mmt4UR8 +ohjRkOySU0eQ/YNFokjw6g6GPoiMHJJ9cP75NA94uIMIUTY7uHEWWZwXI5UphdPC +p5/3MaF1VlYQys9a5wtiEaDSfQKBgQDOwPV0zQjUabkVQ4yV0amP8xybvHH8ghG0 +RMStHg96RfDmg35JQaw22A2xiVROCoZgLqiE1DFSl/3gBF/vfqBh/uzdxwNerJC6 +ROdCxyS4rys5d/02P4aNOa73sD+ZKyEZRTF1v3bmOGKidRFF5oxIpuHjFWlJFKx1 +O/b3AI0v1wKBgQC/L4N84emm+OrKAfs4UIRckrxRYOulxhmAMkQ2IXOiRP5yZmQX +pDa0TzxJLxhZYxhhLr0koQ3R8CeF7wEhb9AQ7D0/aMU5etLsWhKSd8nKIrPMwyMl +a0kTb5g09kEwsQZSSbcp7eI1+koYp65eyN37q0ZuTnlWbC0MdDQY9APgKQKBgQCb +HqaKNXLUe2XDkGSf2ygOumXSanZS7vt9dsLg59bQ9DyjljBfogglNcBAXTqFOtxK +uXbyAYnn3+U399BKjYSjQXJRioj6tRn4xs2DiooAjlwtx9qQouS+fHLLns54iqVQ +oltTbo00eUV3gcGt4iWKNLrxdxUBIaOqaY0HEMDdDQKBgQCRvcHDF7JSPuBiO3Tw +PSOUD4q6dD/dhI+X2ZKg83w94SZXXms6eMSbedUkLoJ8TDunmdRUUWb6rgP/pJwr +zKRTskItF15i9IWCwC6jBrSfx5n2JcSoBALyc0aR9heF0GQjWwqURd+PC/msomrW +z9SCl8mpQVFtBlui7PcnDLTFAg== +-----END PRIVATE KEY----- diff --git a/8.1/test/examples/self-signed-ssl/index.html b/8.1/test/examples/self-signed-ssl/index.html new file mode 100644 index 000000000..82ff698e7 --- /dev/null +++ b/8.1/test/examples/self-signed-ssl/index.html @@ -0,0 +1 @@ +SSL test works diff --git a/8.1/test/imagestreams/php-centos.json b/8.1/test/imagestreams/php-centos.json new file mode 100644 index 000000000..df464ffdb --- /dev/null +++ b/8.1/test/imagestreams/php-centos.json @@ -0,0 +1,133 @@ +{ + "kind": "ImageStream", + "apiVersion": "image.openshift.io/v1", + "metadata": { + "name": "php", + "annotations": { + "openshift.io/display-name": "PHP" + } + }, + "spec": { + "tags": [ + { + "name": "latest", + "annotations": { + "openshift.io/display-name": "PHP (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP applications on UBI. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/8.0/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major version updates.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "ImageStreamTag", + "name": "8.0-ubi8" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "8.0-ubi9", + "annotations": { + "openshift.io/display-name": "PHP 8.0 (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 8.0 applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/8.0/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:8.0,php", + "version": "8.0", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi9/php-80:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "8.0-ubi8", + "annotations": { + "openshift.io/display-name": "PHP 8.0 (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 8.0 applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/8.0/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:8.0,php", + "version": "8.0", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi8/php-80:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "7.4-ubi8", + "annotations": { + "openshift.io/display-name": "PHP 7.4 (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 7.4 applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.4/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:7.4,php", + "version": "7.4", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi8/php-74:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "7.3-ubi7", + "annotations": { + "openshift.io/display-name": "PHP 7.3 (UBI 7)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 7.3 applications on UBI 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.3/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:7.3,php", + "version": "7.3", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi7/php-73:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "7.3", + "annotations": { + "openshift.io/display-name": "PHP 7.3", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 7.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.3/README.md.", + "iconClass": "icon-php", + "tags": "builder,php,hidden", + "supports":"php:7.3,php", + "version": "7.3", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "quay.io/centos7/php-73-centos7:latest" + }, + "referencePolicy": { + "type": "Local" + } + } + ] + } +} diff --git a/8.1/test/imagestreams/php-rhel-aarch64.json b/8.1/test/imagestreams/php-rhel-aarch64.json new file mode 100644 index 000000000..5aa6089c3 --- /dev/null +++ b/8.1/test/imagestreams/php-rhel-aarch64.json @@ -0,0 +1,93 @@ +{ + "kind": "ImageStream", + "apiVersion": "image.openshift.io/v1", + "metadata": { + "name": "php", + "annotations": { + "openshift.io/display-name": "PHP" + } + }, + "spec": { + "tags": [ + { + "name": "latest", + "annotations": { + "openshift.io/display-name": "PHP (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP applications on UBI. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/8.0/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major version updates.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "ImageStreamTag", + "name": "8.0-ubi8" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "8.0-ubi9", + "annotations": { + "openshift.io/display-name": "PHP 8.0 (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 8.0 applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/8.0/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:8.0,php", + "version": "8.0", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/php-80:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "8.0-ubi8", + "annotations": { + "openshift.io/display-name": "PHP 8.0 (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 8.0 applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/8.0/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:8.0,php", + "version": "8.0", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/php-80:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "7.4-ubi8", + "annotations": { + "openshift.io/display-name": "PHP 7.4 (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 7.4 applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.4/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:7.4,php", + "version": "7.4", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/php-74:latest" + }, + "referencePolicy": { + "type": "Local" + } + } + ] + } +} diff --git a/8.1/test/imagestreams/php-rhel.json b/8.1/test/imagestreams/php-rhel.json new file mode 100644 index 000000000..c484e7027 --- /dev/null +++ b/8.1/test/imagestreams/php-rhel.json @@ -0,0 +1,133 @@ +{ + "kind": "ImageStream", + "apiVersion": "image.openshift.io/v1", + "metadata": { + "name": "php", + "annotations": { + "openshift.io/display-name": "PHP" + } + }, + "spec": { + "tags": [ + { + "name": "latest", + "annotations": { + "openshift.io/display-name": "PHP (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP applications on UBI. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/8.0/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major version updates.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "ImageStreamTag", + "name": "8.0-ubi8" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "8.0-ubi9", + "annotations": { + "openshift.io/display-name": "PHP 8.0 (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 8.0 applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/8.0/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:8.0,php", + "version": "8.0", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/php-80:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "8.0-ubi8", + "annotations": { + "openshift.io/display-name": "PHP 8.0 (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 8.0 applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/8.0/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:8.0,php", + "version": "8.0", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/php-80:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "7.4-ubi8", + "annotations": { + "openshift.io/display-name": "PHP 7.4 (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 7.4 applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.4/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:7.4,php", + "version": "7.4", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/php-74:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "7.3-ubi7", + "annotations": { + "openshift.io/display-name": "PHP 7.3 (UBI 7)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 7.3 applications on UBI 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.3/README.md.", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:7.3,php", + "version": "7.3", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi7/php-73:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "7.3", + "annotations": { + "openshift.io/display-name": "PHP 7.3", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run PHP 7.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.3/README.md.", + "iconClass": "icon-php", + "tags": "builder,php,hidden", + "supports":"php:7.3,php", + "version": "7.3", + "sampleRepo": "https://github.com/sclorg/cakephp-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/rhscl/php-73-rhel7:latest" + }, + "referencePolicy": { + "type": "Local" + } + } + ] + } +} diff --git a/8.1/test/run b/8.1/test/run new file mode 100755 index 000000000..9a8bda67a --- /dev/null +++ b/8.1/test/run @@ -0,0 +1,237 @@ +#!/bin/bash +# +# The 'run' performs a simple test that verifies that S2I image. +# The main focus here is to excersise the S2I scripts. +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# +test -n "${IMAGE_NAME-}" || false 'make sure $IMAGE_NAME is defined' +test -n "${VERSION-}" || false 'make sure $VERSION is defined' + +TEST_LIST="\ +test_s2i_usage +test_docker_run_usage +test_application +test_application_user +test_ssl +test_ssl_own_cert +ct_npm_works +test_build_from_dockerfile +" + +# TODO: Make command compatible for Mac users +test_dir="$(readlink -f $(dirname "${BASH_SOURCE[0]}"))" +image_dir=$(readlink -f ${test_dir}/..) +source "${test_dir}/test-lib.sh" + +# TODO: This should be part of the image metadata +test_port=8080 +test_port_ssl=8443 + +info() { + echo -e "\n\e[1m[INFO] $@...\e[0m\n" +} + +image_exists() { + docker inspect $1 &>/dev/null +} + +container_exists() { + image_exists $(cat $cid_file) +} + +container_ip() { + docker inspect --format="{{ .NetworkSettings.IPAddress }}" $(cat $cid_file) +} + +run_s2i_build() { + ct_s2i_build_as_df file://${test_dir}/test-app ${IMAGE_NAME} ${IMAGE_NAME}-testapp ${s2i_args} $(ct_build_s2i_npm_variables) +} + +prepare() { + if ! image_exists ${IMAGE_NAME}; then + echo "ERROR: The image ${IMAGE_NAME} must exist before this script is executed." + return 1 + fi + # TODO: S2I build require the application is a valid 'GIT' repository, we + # should remove this restriction in the future when a file:// is used. + info "Build the test application image" + pushd ${test_dir}/test-app >/dev/null + git init + git config user.email "build@localhost" && git config user.name "builder" + git add -A && git commit -m "Sample commit" + popd >/dev/null +} + +run_test_application() { + local run_args=${CONTAINER_ARGS:-} + docker run -d --user=100001 ${run_args} --cidfile=${cid_file} ${IMAGE_NAME}-testapp +} + +wait_for_cid() { + local max_attempts=10 + local sleep_time=1 + local attempt=1 + local result=1 + info "Waiting for application container to start" + while [ $attempt -le $max_attempts ]; do + [ -f $cid_file ] && [ -s $cid_file ] && result=0 && break + attempt=$(( $attempt + 1 )) + sleep $sleep_time + done + return $result +} + +test_s2i_usage() { + info "Testing 's2i usage'" + ct_s2i_usage ${IMAGE_NAME} ${s2i_args} &>/dev/null +} + +test_docker_run_usage() { + info "Testing 'docker run' usage" + docker run --rm ${IMAGE_NAME} &>/dev/null +} + +test_scl_usage() { + local run_cmd="$1" + local expected="$2" + + info "Testing the image SCL enable" + out=$(docker run --rm ${IMAGE_NAME} /bin/bash -c "${run_cmd}") + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[/bin/bash -c "${run_cmd}"] Expected '${expected}', got '${out}'" + return 1 + fi + out=$(docker exec $(cat ${cid_file}) /bin/bash -c "${run_cmd}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/bash -c "${run_cmd}"] Expected '${expected}', got '${out}'" + return 1 + fi + out=$(docker exec $(cat ${cid_file}) /bin/sh -ic "${run_cmd}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/sh -ic "${run_cmd}"] Expected '${expected}', got '${out}'" + return 1 + fi +} + +test_session() { + local check_port=$1 ; shift + local check_protocol=${1:-http} ; + info "Testing PHP session" + response=$(curl -s -k ${check_protocol}://$(container_ip):${check_port}/session_test.php) + if [ "${response}" != "Passed" ]; then + echo "ERROR starting PHP session. Test app returned: '${response}'" + return 1 + fi +} + +test_connection() { + local check_port=$1 ; shift + local check_protocol=${1:-http} + info "Testing the HTTP connection (${check_protocol}://$(container_ip):${check_port})" + local max_attempts=10 + local sleep_time=1 + local attempt=1 + local result=1 + while [ $attempt -le $max_attempts ]; do + response_code=$(curl -s -w %{http_code} -o /dev/null -k ${check_protocol}://$(container_ip):${check_port}/) + status=$? + if [ $status -eq 0 ]; then + if [ $response_code -eq 200 ]; then + result=0 + break + fi + fi + attempt=$(( $attempt + 1 )) + sleep $sleep_time + done + return $result +} + + +test_config_writeable() { + local run_cmd="[ -w \${PHP_SYSCONF_PATH}/php.ini ] && [ -w \${PHP_SYSCONF_PATH}/php.d ]" + + info "Checking if configuration is writeable" + docker run --rm "${IMAGE_NAME}" /bin/bash -c "${run_cmd}" +} + +test_application() { + set -x + cid_file=$CID_FILE_DIR/$(mktemp -u -p . --suffix .cid) + # Verify that the HTTP connection can be established to test application container + run_test_application & + + # Wait for the container to write it's CID file + wait_for_cid + ct_check_testcase_result $? + + test_scl_usage "php --version" "$VERSION" + ct_check_testcase_result $? + + test_session ${test_port} + ct_check_testcase_result $? + + test_connection ${test_port} + ct_check_testcase_result $? + + test_connection ${test_port_ssl} https + ct_check_testcase_result $? + + test_config_writeable + ct_check_testcase_result $? +} + +test_application_user() { + # Test application with random uid + CONTAINER_ARGS="--user 12345" test_application +} + +test_ssl() { + local cert_dir=/tmp + local cert_base=mycert + ct_gen_self_signed_cert_pem ${cert_dir} ${cert_base} + local private_key=${cert_dir}/${cert_base}-cert-selfsigned.pem + local cert_file=${cert_dir}/${cert_base}-key.pem + +} + +test_ssl_own_cert() { + local cid_file=$CID_FILE_DIR/$(mktemp -u -p . --suffix .cid) + ct_s2i_build_as_df file://${test_dir}/self-signed-ssl ${IMAGE_NAME} ${IMAGE_NAME}-test-self-signed-ssl ${s2i_args} $(ct_build_s2i_npm_variables) + docker run -d --user=100001 ${run_args} --cidfile=${cid_file} ${IMAGE_NAME}-test-self-signed-ssl + test_connection ${test_port_ssl} https + ct_check_testcase_result $? + + echo | openssl s_client -showcerts -servername $(container_ip) -connect $(container_ip):${test_port_ssl} 2>/dev/null | openssl x509 -inform pem -noout -text >./servercert + openssl x509 -in ${test_dir}/self-signed-ssl/httpd-ssl/certs/server-cert-selfsigned.pem -inform pem -noout -text >./configcert + diff ./configcert ./servercert >cert.diff + ct_check_testcase_result $? +} + +test_build_from_dockerfile() { + info "Check building using a Dockerfile" + ct_test_app_dockerfile ${test_dir}/examples/from-dockerfile/Dockerfile \ + 'https://github.com/sclorg/cakephp-ex.git' \ + 'Welcome to your CakePHP application on OpenShift' \ + app-src + ct_check_testcase_result $? + ct_test_app_dockerfile ${test_dir}/examples/from-dockerfile/Dockerfile.s2i \ + 'https://github.com/sclorg/cakephp-ex.git' \ + 'Welcome to your CakePHP application on OpenShift' \ + app-src + ct_check_testcase_result $? +} + +ct_init + +# Since we built the candidate image locally, we don't want S2I attempt to pull +# it from Docker hub +s2i_args="--pull-policy=never" + +prepare +run_s2i_build +ct_check_testcase_result $? + +TEST_SET=${TESTS:-$TEST_LIST} ct_run_tests_from_testset "php_tests" diff --git a/8.1/test/run-openshift b/8.1/test/run-openshift new file mode 100755 index 000000000..9c4b2f012 --- /dev/null +++ b/8.1/test/run-openshift @@ -0,0 +1,102 @@ +#!/bin/bash +# +# Test the PHP image in OpenShift. +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +source ${THISDIR}/test-lib.sh +source ${THISDIR}/test-lib-openshift.sh +source ${THISDIR}/test-lib-php.sh + +# change the branch to a different value if a new change in the example +# app needs to be tested +BRANCH_TO_TEST=master + +set -eo nounset + +trap ct_os_cleanup EXIT SIGINT + +ct_os_check_compulsory_vars + +ct_os_enable_print_logs + +istag="php:$VERSION" + +function test_file_upload() { + local image_name=$1 + local image_name_no_namespace=${image_name##*/} + local service_name="${image_name_no_namespace%%:*}-testing" + local app="https://github.com/openshift-qe/openshift-php-upload-demo" + local ip="" + + echo "Running file upload test for: $image_name" + ct_os_new_project + ct_os_upload_image "v3" "$image_name" "$istag" + + ct_os_deploy_s2i_image "$istag" "$app" --name "${service_name}" + ct_os_wait_pod_ready "$service_name" 60 + + # Wait until the app is prepared to receive files + ip=$(ct_os_get_service_ip "$service_name") + curl "$ip:8080" 2>/dev/null | grep -q "OpenShift File Upload Demonstration" + + # Upload a file into the pod using the php app + curl -F fto=@README.md "$ip:8080/upload.php" &>/dev/null + ct_os_run_in_pod "$(ct_os_get_pod_name "$service_name")" ls uploaded/README.md >/dev/null + + ct_os_delete_project +} + +test_latest_imagestreams() { + local result=1 + # Switch to root directory of a container + pushd "${THISDIR}/../.." >/dev/null || return 1 + ct_check_latest_imagestreams + result=$? + popd >/dev/null || return 1 + return $result +} + +ct_os_cluster_up + +# test local app +ct_os_test_s2i_app ${IMAGE_NAME} "https://github.com/sclorg/s2i-php-container.git" ${VERSION}/test/test-app "Test PHP passed" + +ct_os_test_s2i_app ${IMAGE_NAME} "https://github.com/sclorg/cakephp-ex.git#${BRANCH_TO_TEST}" . 'Welcome to your CakePHP application on OpenShift' + +ct_os_test_template_app ${IMAGE_NAME} \ + https://raw.githubusercontent.com/sclorg/cakephp-ex/${BRANCH_TO_TEST}/openshift/templates/cakephp.json \ + php \ + 'Welcome to your CakePHP application on OpenShift' \ + 8080 http 200 "-p SOURCE_REPOSITORY_REF=${BRANCH_TO_TEST} -p SOURCE_REPOSITORY_URL=https://github.com/sclorg/cakephp-ex.git -p PHP_VERSION=${VERSION} -p NAME=php-testing" + +# test image update with s2i +old_image=$(ct_get_public_image_name "$OS" "$BASE_IMAGE_NAME" "$VERSION") + +if ct_check_image_availability "$old_image"; then + ct_os_test_image_update "$IMAGE_NAME" "${old_image}" "$istag" \ + 'ct_test_response "http://:8080" "200" "Test PHP passed"' \ + "$istag~https://github.com/sclorg/s2i-php-container.git" \ + --context-dir="$VERSION/test/test-app" +else + echo "Image $old_image not found in public repositories, skipping update test." +fi + +test_file_upload "$IMAGE_NAME" + +# Check the imagestream +test_php_imagestream + +# CentOS-7 and RHEL-7 does not have image streams for 7.4 yet. +# test_latest_imagestreams + +OS_TESTSUITE_RESULT=0 + +ct_os_cluster_down + +# vim: set tabstop=2:shiftwidth=2:expandtab: + diff --git a/8.1/test/run-openshift-remote-cluster b/8.1/test/run-openshift-remote-cluster new file mode 100755 index 000000000..4bb73e581 --- /dev/null +++ b/8.1/test/run-openshift-remote-cluster @@ -0,0 +1,42 @@ +#!/bin/bash +# +# Test the PHP S2I image in OpenShift (remote cluster) +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# VERSION specifies the major version of the PHP runtime in format of X.Y +# OS specifies RHEL version (e.g. OS=rhel7) +# + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +source "${THISDIR}/test-lib-php.sh" +source "${THISDIR}/test-lib-openshift.sh" +source "${THISDIR}/test-lib-remote-openshift.sh" + +TEST_LIST="\ +test_php_integration +test_php_imagestream +" + +trap ct_os_cleanup EXIT SIGINT + +ct_os_check_compulsory_vars || exit $OC_ERR + +ct_os_set_ocp4 || exit $OC_ERR + +oc version + +ct_os_check_login || exit $OC_ERR + +ct_os_tag_image_for_cvp "php" + +set -u + +# For testing on OpenShift 4 we use OpenShift internal registry +export CT_OCP4_TEST=true + +TEST_SUMMARY='' +TEST_SET=${TESTS:-$TEST_LIST} ct_run_tests_from_testset "openshift-remote-cluster" +# vim: set tabstop=2:shiftwidth=2:expandtab: + diff --git a/8.1/test/self-signed-ssl/httpd-ssl/certs/server-cert-selfsigned.pem b/8.1/test/self-signed-ssl/httpd-ssl/certs/server-cert-selfsigned.pem new file mode 100644 index 000000000..8495b8847 --- /dev/null +++ b/8.1/test/self-signed-ssl/httpd-ssl/certs/server-cert-selfsigned.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIJAI4x7HuBG49oMA0GCSqGSIb3DQEBCwUAMEIxCzAJBgNV +BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg +Q29tcGFueSBMdGQwHhcNMTcxMjAzMjMzMzU3WhcNMTgwMTAyMjMzMzU3WjBCMQsw +CQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZh +dWx0IENvbXBhbnkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +vH4Vdq0a3UWUQd8Z6s2csxhxjAOyUx0rszGL0m3uTjQido6JRBdjN2dXiZc3LFoq +YeOKR3CeHsn7UdrlzaboHFDfjAaextse0740mB1g14H1bAS0POuTPeKa+3wGfzCb +sTSXnfSrICl3n2D/3KSO93WwmS90kBD6HmKt5nfkLpJnROM/4bHmuoV0Ry8CDjzj +mka7pQU4yzyMKLU3sHpncZU6g7o4Vezic9ksVzIAbdPCSbF7ktVz/hisyCuzyKN6 +s2327jq593vBgGOsNU5PDPDjKW74Q0Bv/FxPK4nx+o4IkcRW1QEb+yAx8XOM7CDZ +ViKvI/A0b+Y4Y3rIQ465+wIDAQABo1MwUTAdBgNVHQ4EFgQUAY1i6ZNbqO1+46aw +pldCyPaWoYswHwYDVR0jBBgwFoAUAY1i6ZNbqO1+46awpldCyPaWoYswDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEADhGjnYGq9JvQcygMYEQiIdyS +t06Nu7NUkWz52GJp7WFognWyG+0jAomBR0GSUchfubvVZ7cHIaVKLhiGOqg+HIol +7tNRfvE6x/Idk674g6OTRAWxO/wOlgnRMpRy6XhHOtb4HcPcpWFZJS8MC8+HRWIs +kzMErXe0/obnKn9O04kcEREfmB7kfcD4ooqk5gwbdQk1W6a44LcN6AB5qYPjOzgF +Qnb2aLQW9XhgNhiMsYqDzCZsy0az0rz7NgkVOnKrGJ8x3kVX13GR2joVVHOazms9 +Gd90z+mLMDTbqCRGIPMLvEp4HtAmBxbgsj/zHyinajIqV96B3Cr3zTdW29lHJg== +-----END CERTIFICATE----- diff --git a/8.1/test/self-signed-ssl/httpd-ssl/private/server-key.pem b/8.1/test/self-signed-ssl/httpd-ssl/private/server-key.pem new file mode 100644 index 000000000..ff2ac89c4 --- /dev/null +++ b/8.1/test/self-signed-ssl/httpd-ssl/private/server-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC8fhV2rRrdRZRB +3xnqzZyzGHGMA7JTHSuzMYvSbe5ONCJ2jolEF2M3Z1eJlzcsWiph44pHcJ4eyftR +2uXNpugcUN+MBp7G2x7TvjSYHWDXgfVsBLQ865M94pr7fAZ/MJuxNJed9KsgKXef +YP/cpI73dbCZL3SQEPoeYq3md+QukmdE4z/hsea6hXRHLwIOPOOaRrulBTjLPIwo +tTewemdxlTqDujhV7OJz2SxXMgBt08JJsXuS1XP+GKzIK7PIo3qzbfbuOrn3e8GA +Y6w1Tk8M8OMpbvhDQG/8XE8rifH6jgiRxFbVARv7IDHxc4zsINlWIq8j8DRv5jhj +eshDjrn7AgMBAAECggEARZxeutxE/pCypv0IqkFS7IVLccTvt2gfemcC1yzIBFOW +oqgTI3Vrq8tbdbHFq3iFDG+m4qlBi+dWDC3GDoPkVoi7dg//1TqZEOO+sqqu2Afj +pge6tIDfeMxWJifwkkpWRURB9hCknhUSW2bMNyUCs3rgREJVTtsmM9CHnoSKXXQL +aOeYXalFVpx3ceK+xdp0VGfpsqEabBKs0yy3EDiQy2huoWce3EVFLVrwx/IkhcsZ +JlI5LPpoiTglSs1g9i88JHS2slBtKtb1lWl/yXHhK1g7s34c6f9jP8snuFE5ddMn +0L4GDA9teaPGvB533eb2RIFy2kUYgpr5c03G6rpoOQKBgQDpY6BFJkPGENnC5Bdb +fJCuN2nyRdC1qvv6ESFaQYb0s6QjKDqpb0dUSYN3+zNgtiAysbQLeU/d9mmt4UR8 +ohjRkOySU0eQ/YNFokjw6g6GPoiMHJJ9cP75NA94uIMIUTY7uHEWWZwXI5UphdPC +p5/3MaF1VlYQys9a5wtiEaDSfQKBgQDOwPV0zQjUabkVQ4yV0amP8xybvHH8ghG0 +RMStHg96RfDmg35JQaw22A2xiVROCoZgLqiE1DFSl/3gBF/vfqBh/uzdxwNerJC6 +ROdCxyS4rys5d/02P4aNOa73sD+ZKyEZRTF1v3bmOGKidRFF5oxIpuHjFWlJFKx1 +O/b3AI0v1wKBgQC/L4N84emm+OrKAfs4UIRckrxRYOulxhmAMkQ2IXOiRP5yZmQX +pDa0TzxJLxhZYxhhLr0koQ3R8CeF7wEhb9AQ7D0/aMU5etLsWhKSd8nKIrPMwyMl +a0kTb5g09kEwsQZSSbcp7eI1+koYp65eyN37q0ZuTnlWbC0MdDQY9APgKQKBgQCb +HqaKNXLUe2XDkGSf2ygOumXSanZS7vt9dsLg59bQ9DyjljBfogglNcBAXTqFOtxK +uXbyAYnn3+U399BKjYSjQXJRioj6tRn4xs2DiooAjlwtx9qQouS+fHLLns54iqVQ +oltTbo00eUV3gcGt4iWKNLrxdxUBIaOqaY0HEMDdDQKBgQCRvcHDF7JSPuBiO3Tw +PSOUD4q6dD/dhI+X2ZKg83w94SZXXms6eMSbedUkLoJ8TDunmdRUUWb6rgP/pJwr +zKRTskItF15i9IWCwC6jBrSfx5n2JcSoBALyc0aR9heF0GQjWwqURd+PC/msomrW +z9SCl8mpQVFtBlui7PcnDLTFAg== +-----END PRIVATE KEY----- diff --git a/8.1/test/self-signed-ssl/index.html b/8.1/test/self-signed-ssl/index.html new file mode 100644 index 000000000..82ff698e7 --- /dev/null +++ b/8.1/test/self-signed-ssl/index.html @@ -0,0 +1 @@ +SSL test works diff --git a/8.1/test/test-app/composer.json b/8.1/test/test-app/composer.json new file mode 100644 index 000000000..92bd28ed6 --- /dev/null +++ b/8.1/test/test-app/composer.json @@ -0,0 +1,6 @@ +{ + "require": { + "monolog/monolog": "1.0.*" + } +} + diff --git a/8.1/test/test-app/index.php b/8.1/test/test-app/index.php new file mode 100644 index 000000000..e57f08286 --- /dev/null +++ b/8.1/test/test-app/index.php @@ -0,0 +1,13 @@ + + + Test PHP passed + + +

PHP is working

+

+ +

+ + diff --git a/8.1/test/test-app/session_test.php b/8.1/test/test-app/session_test.php new file mode 100644 index 000000000..51165bc77 --- /dev/null +++ b/8.1/test/test-app/session_test.php @@ -0,0 +1,4 @@ + diff --git a/8.1/test/test-lib-openshift.sh b/8.1/test/test-lib-openshift.sh new file mode 100644 index 000000000..c0554bd26 --- /dev/null +++ b/8.1/test/test-lib-openshift.sh @@ -0,0 +1,1441 @@ +# shellcheck disable=SC2148 +if [ -z "${sourced_test_lib_openshift:-}" ]; then + sourced_test_lib_openshift=1 +else + return 0 +fi + +# shellcheck shell=bash +# some functions are used from test-lib.sh, that is usually in the same dir +# shellcheck source=/dev/null +source "$(dirname "${BASH_SOURCE[0]}")"/test-lib.sh + +# Set of functions for testing docker images in OpenShift using 'oc' command + +# A variable containing the overall test result +# TESTSUITE_RESULT=0 +# And the following trap must be set, in the beginning of the test script: +# trap ct_os_cleanup EXIT SIGINT +TESTSUITE_RESULT=0 +OS_CLUSTER_STARTED_BY_TEST=0 + +function ct_os_cleanup() { + local exit_code=$? + echo "${TEST_SUMMARY:-}" + if [ $TESTSUITE_RESULT -ne 0 ] || [ $exit_code -ne 0 ]; then + # shellcheck disable=SC2153 + echo "OpenShift tests for ${IMAGE_NAME} failed." + exit 1 + else + # shellcheck disable=SC2153 + echo "OpenShift tests for ${IMAGE_NAME} succeeded." + exit 0 + fi +} + +# ct_os_check_compulsory_vars +# --------------------------- +# Check the compulsory variables: +# * IMAGE_NAME specifies a name of the candidate image used for testing. +# * VERSION specifies the major version of the MariaDB in format of X.Y +# * OS specifies RHEL version (e.g. OS=rhel7) +function ct_os_check_compulsory_vars() { + # shellcheck disable=SC2016 + test -n "${IMAGE_NAME-}" || ( echo 'make sure $IMAGE_NAME is defined' >&2 ; exit 1) + # shellcheck disable=SC2016 + test -n "${VERSION-}" || ( echo 'make sure $VERSION is defined' >&2 ; exit 1) + # shellcheck disable=SC2016 + test -n "${OS-}" || ( echo 'make sure $OS is defined' >&2 ; exit 1) +} + +# ct_os_get_status +# -------------------- +# Returns status of all objects to make debugging easier. +function ct_os_get_status() { + oc get all + oc status + oc status --suggest +} + +# ct_os_print_logs +# -------------------- +# Returns status of all objects and logs from all pods. +function ct_os_print_logs() { + ct_os_get_status + while read -r pod_name; do + echo "INFO: printing logs for pod ${pod_name}" + oc logs "${pod_name}" + done < <(oc get pods --no-headers=true -o custom-columns=NAME:.metadata.name) +} + +# ct_os_enable_print_logs +# -------------------- +# Enables automatic printing of pod logs on ERR. +function ct_os_enable_print_logs() { + set -E + trap ct_os_print_logs ERR +} + +# ct_get_public_ip +# -------------------- +# Returns best guess for the IP that the node is accessible from other computers. +# This is a bit funny heuristic, simply goes through all IPv4 addresses that +# hostname -I returns and de-prioritizes IP addresses commonly used for local +# addressing. The rest of addresses are taken as public with higher probability. +function ct_get_public_ip() { + local hostnames + local public_ip='' + local found_ip + hostnames=$(hostname -I) + for guess_exp in '127\.0\.0\.1' '192\.168\.[0-9\.]*' '172\.[0-9\.]*' \ + '10\.[0-9\.]*' '[0-9\.]*' ; do + found_ip=$(echo "${hostnames}" | grep -oe "${guess_exp}") + if [ -n "${found_ip}" ] ; then + # shellcheck disable=SC2001 + hostnames=$(echo "${hostnames}" | sed -e "s/${found_ip}//") + public_ip="${found_ip}" + fi + done + if [ -z "${public_ip}" ] ; then + echo "ERROR: public IP could not be guessed." >&2 + return 1 + fi + echo "${public_ip}" +} + +# ct_os_run_in_pod POD_NAME CMD +# -------------------- +# Runs [cmd] in the pod specified by prefix [pod_prefix]. +# Arguments: pod_name - full name of the pod +# Arguments: cmd - command to be run in the pod +function ct_os_run_in_pod() { + local pod_name="$1" ; shift + + oc exec "$pod_name" -- "$@" +} + +# ct_os_get_service_ip SERVICE_NAME +# -------------------- +# Returns IP of the service specified by [service_name]. +# Arguments: service_name - name of the service +function ct_os_get_service_ip() { + local service_name="${1}" ; shift + local ocp_docker_address="172\.30\.[0-9\.]*" + if [ "${CVP:-0}" -eq "1" ]; then + # shellcheck disable=SC2034 + ocp_docker_address="172\.27\.[0-9\.]*" + fi + # shellcheck disable=SC2016 + oc get "svc/${service_name}" -o yaml | grep clusterIP | \ + cut -d':' -f2 | grep -oe "$ocp_docker_address" +} + + +# ct_os_get_all_pods_status +# -------------------- +# Returns status of all pods. +function ct_os_get_all_pods_status() { + oc get pods -o custom-columns=Ready:status.containerStatuses[0].ready,NAME:.metadata.name +} + +# ct_os_get_all_pods_name +# -------------------- +# Returns the full name of all pods. +function ct_os_get_all_pods_name() { + oc get pods --no-headers -o custom-columns=NAME:.metadata.name +} + +# ct_os_get_pod_status POD_PREFIX +# -------------------- +# Returns status of the pod specified by prefix [pod_prefix]. +# Note: Ignores -build and -deploy pods +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_pod_status() { + local pod_prefix="${1}" ; shift + ct_os_get_all_pods_status | grep -e "${pod_prefix}" | grep -Ev "(build|deploy)$" \ + | awk '{print $1}' | head -n 1 +} + +# ct_os_get_build_pod_status POD_PREFIX +# -------------------- +# Returns status of the build pod specified by prefix [pod_prefix]. +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_build_pod_status() { + local pod_prefix="${1}" ; shift + local query="custom-columns=NAME:.metadata.name,Ready:status.phase" + oc get pods -o "$query" | grep -e "${pod_prefix}" | grep -E "\-build\s" \ + | sort -u | awk '{print $2}' | tail -n 1 +} + +# ct_os_get_buildconfig_pod_name POD_PREFIX +# ---------------------------- +# Returns status of the buildconfig pod specified by prefix [pod_prefix]. +# Argument: pod_prefix - prefix +function ct_os_get_buildconfig_pod_name() { + local pod_prefix="${1}" ; shift + local query="custom-columns=NAME:.metadata.name" + oc get bc -o "$query" | grep -e "${pod_prefix}" | sort -u | tail -n 1 +} + +# ct_os_get_pod_name POD_PREFIX +# -------------------- +# Returns the full name of pods specified by prefix [pod_prefix]. +# Note: Ignores -build and -deploy pods +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_pod_name() { + local pod_prefix="${1}" ; shift + ct_os_get_all_pods_name | grep -e "^${pod_prefix}" | grep -Ev "(build|deploy)$" +} + +# ct_os_get_pod_ip POD_NAME +# -------------------- +# Returns the ip of the pod specified by [pod_name]. +# Arguments: pod_name - full name of the pod +function ct_os_get_pod_ip() { + local pod_name="${1}" + oc get pod "$pod_name" --no-headers -o custom-columns=IP:status.podIP +} + +# ct_os_get_sti_build_logs +# ----------------- +# Return logs from sti_build +# Arguments: pod_name +function ct_os_get_sti_build_logs() { + local pod_prefix="${1}" + oc status --suggest + pod_name=$(ct_os_get_buildconfig_pod_name "${pod_prefix}") + # Print logs but do not failed. Just for traces + if [ x"${pod_name}" != "x" ]; then + oc logs "bc/$pod_name" || return 0 + else + echo "Build config bc/$pod_name does not exist for some reason." + echo "Import probably failed." + fi +} + +# ct_os_check_pod_readiness POD_PREFIX STATUS +# -------------------- +# Checks whether the pod is ready. +# Arguments: pod_prefix - prefix or whole ID of the pod +# Arguments: status - expected status (true, false) +function ct_os_check_pod_readiness() { + local pod_prefix="${1}" ; shift + local status="${1}" ; shift + test "$(ct_os_get_pod_status "${pod_prefix}")" == "${status}" +} + +# ct_os_wait_pod_ready POD_PREFIX TIMEOUT +# -------------------- +# Wait maximum [timeout] for the pod becomming ready. +# Arguments: pod_prefix - prefix or whole ID of the pod +# Arguments: timeout - how many seconds to wait seconds +function ct_os_wait_pod_ready() { + local pod_prefix="${1}" ; shift + local timeout="${1}" ; shift + # If there is a build pod - wait for it to finish first + sleep 3 + if ct_os_get_all_pods_name | grep -E "${pod_prefix}.*-build"; then + SECONDS=0 + echo -n "Waiting for ${pod_prefix} build pod to finish ..." + while ! [ "$(ct_os_get_build_pod_status "${pod_prefix}")" == "Succeeded" ] ; do + echo -n "." + if [ "${SECONDS}" -gt "${timeout}0" ]; then + echo " FAIL" + ct_os_print_logs || : + ct_os_get_sti_build_logs "${pod_prefix}" || : + return 1 + fi + sleep 3 + done + echo " DONE" + fi + SECONDS=0 + echo -n "Waiting for ${pod_prefix} pod becoming ready ..." + while ! ct_os_check_pod_readiness "${pod_prefix}" "true" ; do + echo -n "." + if [ "${SECONDS}" -gt "${timeout}" ]; then + echo " FAIL"; + ct_os_print_logs || : + ct_os_get_sti_build_logs "${pod_prefix}" || : + return 1 + fi + sleep 3 + done + echo " DONE" +} + +# ct_os_wait_rc_ready POD_PREFIX TIMEOUT +# -------------------- +# Wait maximum [timeout] for the rc having desired number of replicas ready. +# Arguments: pod_prefix - prefix of the replication controller +# Arguments: timeout - how many seconds to wait seconds +function ct_os_wait_rc_ready() { + local pod_prefix="${1}" ; shift + local timeout="${1}" ; shift + SECONDS=0 + echo -n "Waiting for ${pod_prefix} having desired numbers of replicas ..." + while ! test "$( (oc get --no-headers statefulsets; oc get --no-headers rc) 2>/dev/null \ + | grep "^${pod_prefix}" | awk '$2==$3 {print "ready"}')" == "ready" ; do + echo -n "." + if [ "${SECONDS}" -gt "${timeout}" ]; then + echo " FAIL"; + ct_os_print_logs || : + ct_os_get_sti_build_logs "${pod_prefix}" || : + return 1 + fi + sleep 3 + done + echo " DONE" +} + +# ct_os_deploy_pure_image IMAGE [ENV_PARAMS, ...] +# -------------------- +# Runs [image] in the openshift and optionally specifies env_params +# as environment variables to the image. +# Arguments: image - prefix or whole ID of the pod to run the cmd in +# Arguments: env_params - environment variables parameters for the images. +function ct_os_deploy_pure_image() { + local image="${1}" ; shift + # ignore error exit code, because oc new-app returns error when image exists + oc new-app "${image}" "$@" || : + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_deploy_s2i_image IMAGE APP [ENV_PARAMS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. +# Arguments: image - prefix or whole ID of the pod to run the cmd in +# Arguments: app - url or local path to git repo with the application sources. +# Arguments: env_params - environment variables parameters for the images. +function ct_os_deploy_s2i_image() { + local image="${1}" ; shift + local app="${1}" ; shift + # ignore error exit code, because oc new-app returns error when image exists + oc new-app "${image}~${app}" --strategy=source "$@" || : + + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_deploy_template_image TEMPLATE [ENV_PARAMS, ...] +# -------------------- +# Runs template in the openshift and optionally gives env_params to use +# specific values in the template. +# Arguments: template - prefix or whole ID of the pod to run the cmd in +# Arguments: env_params - environment variables parameters for the template. +# Example usage: ct_os_deploy_template_image mariadb-ephemeral-template.yaml \ +# DATABASE_SERVICE_NAME=mysql-57-centos7 \ +# DATABASE_IMAGE=mysql-57-centos7 \ +# MYSQL_USER=testu \ +# MYSQL_PASSWORD=testp \ +# MYSQL_DATABASE=testdb +function ct_os_deploy_template_image() { + local template="${1}" ; shift + oc process -f "${template}" "$@" | oc create -f - + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# _ct_os_get_uniq_project_name +# -------------------- +# Returns a uniq name of the OpenShift project. +function _ct_os_get_uniq_project_name() { + local r + while true ; do + r=${RANDOM} + mkdir /var/tmp/sclorg-test-${r} &>/dev/null && echo sclorg-test-${r} && break + done +} + +# ct_os_new_project [PROJECT] +# -------------------- +# Creates a new project in the openshfit using 'os' command. +# Arguments: project - project name, uses a new random name if omitted +# Expects 'os' command that is properly logged in to the OpenShift cluster. +# Not using mktemp, because we cannot use uppercase characters. +# The OPENSHIFT_CLUSTER_PULLSECRET_PATH environment variable can be set +# to contain a path to a k8s secret definition which will be used +# to authenticate to image registries. +# shellcheck disable=SC2120 +function ct_os_new_project() { + if [ "${CVP:-0}" -eq "1" ]; then + echo "Testing in CVP environment. No need to create OpenShift project. This is done by CVP pipeline" + return + fi + if [ "${CT_SKIP_NEW_PROJECT:-false}" == 'true' ] ; then + echo "Creating project skipped." + return + fi + local project_name="${1:-$(_ct_os_get_uniq_project_name)}" ; shift || : + oc new-project "${project_name}" + # let openshift cluster to sync to avoid some race condition errors + sleep 3 + if test -n "${OPENSHIFT_CLUSTER_PULLSECRET_PATH:-}" -a -e "${OPENSHIFT_CLUSTER_PULLSECRET_PATH:-}"; then + oc create -f "$OPENSHIFT_CLUSTER_PULLSECRET_PATH" + # add registry pullsecret to the serviceaccount if provided + secret_name=$(grep '^\s*name:' "$OPENSHIFT_CLUSTER_PULLSECRET_PATH" | awk '{ print $2 }') + oc secrets link --for=pull default "$secret_name" + fi +} + +# ct_os_delete_project [PROJECT] +# -------------------- +# Deletes the specified project in the openshfit +# Arguments: project - project name, uses the current project if omitted +# shellcheck disable=SC2120 +function ct_os_delete_project() { + if [ "${CT_SKIP_NEW_PROJECT:-false}" == 'true' ] || [ "${CVP:-0}" -eq "1" ]; then + echo "Deleting project skipped, cleaning objects only." + # when not having enough privileges (remote cluster), it might fail and + # it is not a big problem, so ignore failure in this case + ct_delete_all_objects || : + return + fi + local project_name="${1:-$(oc project -q)}" ; shift || : + if oc delete project "${project_name}" ; then + echo "Project ${project_name} was deleted properly" + else + echo "Project ${project_name} was not delete properly. But it does not block CI." + fi + +} + +# ct_delete_all_objects +# ----------------- +# Deletes all objects within the project. +# Handy when we have one project and want to run more tests. +function ct_delete_all_objects() { + local objects="bc builds dc is isimage istag po pvc rc routes svc" + if [ "${CVP:-0}" -eq "1" ]; then + echo "Testing in CVP environment. No need to delete isimage and istag in OpenShift project. This is done by CVP pipeline" + objects="bc builds dc po pvc rc routes" + fi + for x in $objects; do + echo "oc gets info about $x" + oc get "$x" + echo "oc deletes $x with --all --force --grace-period=0" + oc delete "$x" --all --force --grace-period=0 + done + # for some objects it takes longer to be really deleted, so a dummy sleep + # to avoid some races when other test can see not-yet-deleted objects and can fail + sleep 10 +} + +# ct_os_docker_login_v3 +# -------------------- +# Logs in into docker daemon +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. +# Does not do anything if REGISTRY_ADDRESS is set. +function ct_os_docker_login_v3() { + [ -n "${REGISTRY_ADDRESS:-}" ] && echo "REGISTRY_ADDRESS set, not trying to docker login." && return 0 + # docker login fails with "404 page not found" error sometimes, just try it more times + # shellcheck disable=SC2034 + for i in $(seq 12) ; do + # shellcheck disable=SC2015 + docker login -u developer -p "$(oc whoami -t)" "${REGISRTY_ADDRESS:-172.30.1.1:5000}" && return 0 || : + sleep 5 + done + return 1 +} + +# ct_os_docker_login_v4 +# -------------------- +# Logs in into docker daemon +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. +# Does not do anything if REGISTRY_ADDRESS is set. +function ct_os_docker_login_v4() { + OCP4_REGISTER=$(oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}') + echo "OCP4 loging address is $OCP4_REGISTER." + if [ -z "${OCP4_REGISTER}" ]; then + echo "!!!OpenShift 4 registry address not found. This is an error. Check OpenShift 4 cluster!!!" + return 1 + fi + + if docker login -u kubeadmin -p "$(oc whoami -t)" "${OCP4_REGISTER}"; then + echo "Login to $OCP4_REGISTER was successfully." + return 0 + fi + return 1 +} + +# ct_os_upload_image IMAGE [IMAGESTREAM] +# -------------------- +# Uploads image from local registry to the OpenShift internal registry. +# Arguments: image - image name to upload +# Arguments: imagestream - name and tag to use for the internal registry. +# In the format of name:tag ($image_name:latest by default) +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. +function ct_os_upload_image() { + local os_version="${1}" ; shift + local input_name="${1}" ; shift + local image_name=${1} + local output_name + local source_name + + if [ "${os_version}" != "v3" ] && [ "${os_version}" != "v4" ]; then + echo "You have to specify OpenShift version to upload an image." + echo "Either 'v3' or 'v4' is allowed" + return 1 + fi + + source_name="${input_name}" + if [ "${os_version}" == "v3" ]; then + output_name="${REGISRTY_ADDRESS:-172.30.1.1:5000}/$(oc project -q)/$image_name" + + if ! ct_os_docker_login_v3; then + return 1 + fi + + fi + if [ "${os_version}" == "v4" ]; then + # Variable OCP4_REGISTER is set in function ct_os_docker_login_v4 + if ! ct_os_docker_login_v4; then + return 1 + fi + output_name="$OCP4_REGISTER/$namespace/$image_name" + fi + docker tag "${source_name}" "${output_name}" + docker push "${output_name}" +} + +# ct_os_is_tag_exists IS_NAME TAG +# -------------------- +# Checks whether the specified tag exists for an image stream +# Arguments: is_name - name of the image stream +# Arguments: tag - name of the tag (usually version) +function ct_os_is_tag_exists() { + local is_name=$1 ; shift + local tag=$1 ; shift + oc get is "${is_name}" -n openshift -o=jsonpath='{.spec.tags[*].name}' | grep -qw "${tag}" +} + +# ct_os_template_exists T_NAME +# -------------------- +# Checks whether the specified template exists for an image stream +# Arguments: t_name - template name of the image stream +function ct_os_template_exists() { + local t_name=$1 ; shift + oc get templates -n openshift | grep -q "^${t_name}\s" +} + +# ct_os_install_in_centos +# -------------------- +# Installs os cluster in CentOS +function ct_os_install_in_centos() { + yum install -y centos-release-openshift-origin + yum install -y wget git net-tools bind-utils iptables-services bridge-utils\ + bash-completion origin-clients docker origin-clients +} + + +# ct_os_cluster_up [DIR, IS_PUBLIC, CLUSTER_VERSION] +# -------------------- +# Runs the local OpenShift cluster using 'oc cluster up' and logs in as developer. +# Arguments: dir - directory to keep configuration data in, random if omitted +# Arguments: is_public - sets either private or public hostname for web-UI, +# use "true" for allow remote access to the web-UI, +# "false" is default +# Arguments: cluster_version - version of the OpenShift cluster to use, empty +# means default version of `oc`; example value: 3.7; +# also can be specified outside by OC_CLUSTER_VERSION +function ct_os_cluster_up() { + ct_os_cluster_running && echo "Cluster already running. Nothing is done." && return 0 + ct_os_logged_in && echo "Already logged in to a cluster. Nothing is done." && return 0 + + mkdir -p /var/tmp/openshift + local dir="${1:-$(mktemp -d /var/tmp/openshift/os-data-XXXXXX)}" ; shift || : + local is_public="${1:-'false'}" ; shift || : + local default_cluster_version=${OC_CLUSTER_VERSION:-} + local cluster_version=${1:-${default_cluster_version}} ; shift || : + if ! grep -qe '--insecure-registry.*172\.30\.0\.0' /etc/sysconfig/docker ; then + sed -i "s|OPTIONS='|OPTIONS='--insecure-registry 172.30.0.0/16 |" /etc/sysconfig/docker + fi + + systemctl stop firewalld || : + setenforce 0 + iptables -F + + systemctl restart docker + local cluster_ip="127.0.0.1" + [ "${is_public}" == "true" ] && cluster_ip=$(ct_get_public_ip) + + if [ -n "${cluster_version}" ] ; then + # if $cluster_version is not set, we simply use oc that is available + ct_os_set_path_oc "${cluster_version}" + fi + + mkdir -p "${dir}"/{config,data,pv} + case $(oc version| head -n 1) in + "oc v3.1"?.*) + oc cluster up --base-dir="${dir}/data" --public-hostname="${cluster_ip}" + ;; + "oc v3."*) + oc cluster up --host-data-dir="${dir}/data" --host-config-dir="${dir}/config" \ + --host-pv-dir="${dir}/pv" --use-existing-config --public-hostname="${cluster_ip}" + ;; + *) + echo "ERROR: Unexpected oc version." >&2 + return 1 + ;; + esac + oc version + oc login -u system:admin + oc project default + ct_os_wait_rc_ready docker-registry 180 + ct_os_wait_rc_ready router 30 + oc login -u developer -p developer + OS_CLUSTER_STARTED_BY_TEST=1 + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_cluster_down +# -------------------- +# Shuts down the local OpenShift cluster using 'oc cluster down' +function ct_os_cluster_down() { + if [ ${OS_CLUSTER_STARTED_BY_TEST:-0} -eq 1 ] ; then + echo "Switching user to system:admin before cluster is going down." + oc login -u system:admin + echo "Cluster started by the test, shutting down." + oc cluster down + else + echo "Cluster not started by the test, shutting down skipped." + fi +} + +# ct_os_cluster_running +# -------------------- +# Returns 0 if oc cluster is running +function ct_os_cluster_running() { + oc cluster status &>/dev/null +} + +# ct_os_logged_in +# --------------- +# Returns 0 if logged in to a cluster (remote or local) +function ct_os_logged_in() { + oc whoami >/dev/null +} + +# ct_os_set_path_oc OC_VERSION +# -------------------- +# This is a trick that helps using correct version of the `oc`: +# The input is version of the openshift in format v3.6.0 etc. +# If the currently available version of oc is not of this version, +# it first takes a look into /usr/local/oc-/bin directory, +# and if not found there it downloads the community release from github. +# In the end the PATH variable is changed, so the other tests can still use just 'oc'. +# Arguments: oc_version - X.Y part of the version of OSE (e.g. 3.9) +function ct_os_set_path_oc() { + local oc_version + local oc_path + + oc_version=$(ct_os_get_latest_ver "$1") + + if oc version | grep -q "oc ${oc_version%.*}." ; then + echo "Binary oc found already available in version ${oc_version}: $(command -v oc) Doing noting." + return 0 + fi + + # first check whether we already have oc available in /usr/local + local installed_oc_path="/usr/local/oc-${oc_version%.*}/bin" + + if [ -x "${installed_oc_path}/oc" ] ; then + oc_path="${installed_oc_path}" + echo "Binary oc found in ${installed_oc_path}" >&2 + else + # oc not available in /usr/local, try to download it from github (community release) + oc_path="/tmp/oc-${oc_version}-bin" + ct_os_download_upstream_oc "${oc_version}" "${oc_path}" + fi + if [ -z "${oc_path}" ] ; then + echo "ERROR: oc not found installed, nor downloaded" >&1 + return 1 + fi + export PATH="${oc_path}:${PATH}" + if ! oc version | grep -q "oc ${oc_version%.*}." ; then + echo "ERROR: something went wrong, oc located at ${oc_path}, but oc of version ${oc_version} not found in PATH ($PATH)" >&1 + return 1 + else + echo "PATH set correctly, binary oc found in version ${oc_version}: $(command -v oc)" + fi +} + +# ct_os_get_latest_ver VERSION_PART_X +# -------------------- +# Returns full version (vX.Y.Z) from part of the version (X.Y) +# Arguments: vxy - X.Y part of the version +# Returns vX.Y.Z variant of the version +function ct_os_get_latest_ver(){ + local vxy="v$1" + for vz in {3..0} ; do + curl -sif "https://github.com/openshift/origin/releases/tag/${vxy}.${vz}" >/dev/null && echo "${vxy}.${vz}" && return 0 + done + echo "ERROR: version ${vxy} not found in https://github.com/openshift/origin/tags" >&2 + return 1 +} + +# ct_os_download_upstream_oc OC_VERSION OUTPUT_DIR +# -------------------- +# Downloads a particular version of openshift-origin-client-tools from +# github into specified output directory +# Arguments: oc_version - version of OSE (e.g. v3.7.2) +# Arguments: output_dir - output directory +function ct_os_download_upstream_oc() { + local oc_version=$1 + local output_dir=$2 + + # check whether we already have the binary in place + [ -x "${output_dir}/oc" ] && return 0 + + mkdir -p "${output_dir}" + # using html output instead of https://api.github.com/repos/openshift/origin/releases/tags/${oc_version}, + # because API is limited for number of queries if not authenticated + tarball=$(curl -si "https://github.com/openshift/origin/releases/tag/${oc_version}" | grep -o -e "openshift-origin-client-tools-${oc_version}-[a-f0-9]*-linux-64bit.tar.gz" | head -n 1) + + # download, unpack the binaries and then put them into output directory + echo "Downloading https://github.com/openshift/origin/releases/download/${oc_version}/${tarball} into ${output_dir}/" >&2 + curl -sL https://github.com/openshift/origin/releases/download/"${oc_version}"/"${tarball}" | tar -C "${output_dir}" -xz + mv -f "${output_dir}"/"${tarball%.tar.gz}"/* "${output_dir}/" + + rmdir "${output_dir}"/"${tarball%.tar.gz}" +} + + +# ct_os_test_s2i_app_func IMAGE APP CONTEXT_DIR CHECK_CMD [OC_ARGS] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the container by arbitrary +# function given as argument (such an argument may include string, +# that will be replaced with actual IP). +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: app - url or local path to git repo with the application sources (compulsory) +# Arguments: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Arguments: check_command - CMD line that checks whether the container works (compulsory; '' will be replaced with actual IP) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_s2i_app_func() { + local image_name=${1} + local app=${2} + local context_dir=${3} + local check_command=${4} + local oc_args=${5:-} + local image_name_no_namespace=${image_name##*/} + local service_name="${image_name_no_namespace%%:*}-testing" + local namespace + + if [ $# -lt 4 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ] || [ -z "${4}" ]; then + echo "ERROR: ct_os_test_s2i_app_func() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + # shellcheck disable=SC2119 + ct_os_new_project + + namespace=${CT_NAMESPACE:-"$(oc project -q)"} + local image_tagged="${image_name_no_namespace%:*}:${VERSION}" + + if [ "${CVP:-0}" -eq "0" ]; then + if [ "${CT_OCP4_TEST:-false}" == 'true' ] ; then + echo "Uploading image ${image_name} as ${image_tagged} into OpenShift internal registry." + ct_os_upload_image "v4" "${image_name}" "${image_tagged}" + else + # Create a specific imagestream tag for the image so that oc cannot use anything else + if [ "${CT_SKIP_UPLOAD_IMAGE:-false}" == 'true' ] ; then + echo "Importing image ${image_name} as ${namespace}/${image_tagged}" + # Use --reference-policy=local to pull remote image content to the cluster + # Works around the issue of builder pods not having access to registry.redhat.io + oc tag --source=docker "${image_name}" "${namespace}/${image_tagged}" --insecure=true --reference-policy=local + ct_os_wait_stream_ready "${image_tagged}" "${namespace}" + else + echo "Uploading image ${image_name} as ${image_tagged}" + ct_os_upload_image "v3" "${image_name}" "${image_tagged}" + fi + fi + else + echo "Testing image ${image_name} in CVP pipeline." + fi + + local app_param="${app}" + if [ -d "${app}" ] ; then + # for local directory, we need to copy the content, otherwise too smart os command + # pulls the git remote repository instead + app_param=$(ct_obtain_input "${app}") + fi + + # shellcheck disable=SC2086 + ct_os_deploy_s2i_image "${image_tagged}" "${app_param}" \ + --context-dir="${context_dir}" \ + --name "${service_name}" \ + ${oc_args} + + if [ -d "${app}" ] ; then + # in order to avoid weird race seen sometimes, let's wait shortly + # before starting the build explicitly + sleep 5 + oc start-build "${service_name}" --from-dir="${app_param}" + fi + + ct_os_wait_pod_ready "${service_name}" 300 + + local ip + local check_command_exp + local image_id + + # get image ID from the deployment config + image_id=$(oc get "deploymentconfig.apps.openshift.io/${service_name}" -o custom-columns=IMAGE:.spec.template.spec.containers[*].image | tail -n 1) + + ip=$(ct_os_get_service_ip "${service_name}") + # shellcheck disable=SC2001 + check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g" -e "s||${image_id}|g") + + echo " Checking APP using $check_command_exp ..." + local result=0 + eval "$check_command_exp" || result=1 + + ct_os_service_image_info "${service_name}" + + if [ $result -eq 0 ] ; then + echo " Check passed." + else + echo " Check failed." + fi + + # shellcheck disable=SC2119 + ct_os_delete_project + return $result +} + +# ct_os_test_s2i_app IMAGE APP CONTEXT_DIR EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: app - url or local path to git repo with the application sources (compulsory) +# Arguments: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Arguments: expected_output - PCRE regular expression that must match the response body (compulsory) +# Arguments: port - which port to use (optional; default: 8080) +# Arguments: protocol - which protocol to use (optional; default: http) +# Arguments: response_code - what http response code to expect (optional; default: 200) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_s2i_app() { + local image_name=${1} + local app=${2} + local context_dir=${3} + local expected_output=${4} + local port=${5:-8080} + local protocol=${6:-http} + local response_code=${7:-200} + local oc_args=${8:-} + + if [ $# -lt 4 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ] || [ -z "${4}" ]; then + echo "ERROR: ct_os_test_s2i_app() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_test_s2i_app_func "${image_name}" \ + "${app}" \ + "${context_dir}" \ + "ct_os_test_response_internal '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" +} + +# ct_os_test_template_app_func IMAGE APP IMAGE_IN_TEMPLATE CHECK_CMD [OC_ARGS] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the container by arbitrary +# function given as argument (such an argument may include string, +# that will be replaced with actual IP). +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: template - url or local path to a template to use (compulsory) +# Arguments: name_in_template - image name used in the template +# Arguments: check_command - CMD line that checks whether the container works (compulsory; '' will be replaced with actual IP) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Arguments: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_template_app_func() { + local image_name=${1} + local template=${2} + local name_in_template=${3} + local check_command=${4} + local oc_args=${5:-} + local other_images=${6:-} + + if [ $# -lt 4 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ] || [ -z "${4}" ]; then + echo "ERROR: ct_os_test_template_app_func() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + local service_name="${name_in_template}-testing" + local image_tagged="${name_in_template}:${VERSION}" + local namespace + + # shellcheck disable=SC2119 + ct_os_new_project + + namespace=${CT_NAMESPACE:-"$(oc project -q)"} + # Upload main image is already done by CVP pipeline. No need to do it twice. + if [ "${CVP:-0}" -eq "0" ]; then + # Create a specific imagestream tag for the image so that oc cannot use anything else + if [ "${CT_OCP4_TEST:-false}" == 'true' ] ; then + echo "Uploading image ${image_name} as ${image_tagged} into OpenShift internal registry." + ct_os_upload_image "v4" "${image_name}" "${image_tagged}" + else + if [ "${CT_SKIP_UPLOAD_IMAGE:-false}" == 'true' ] ; then + echo "Importing image ${image_name} as ${image_tagged}" + # Use --reference-policy=local to pull remote image content to the cluster + # Works around the issue of builder pods not having access to registry.redhat.io + oc tag --source=docker "${image_name}" "${namespace}/${image_tagged}" --insecure=true --reference-policy=local + ct_os_wait_stream_ready "${image_tagged}" "${namespace}" + else + echo "Uploading image ${image_name} as ${image_tagged}" + ct_os_upload_image "v3" "${image_name}" "${image_tagged}" + fi + fi + else + echo "Import is already done by CVP pipeline." + fi + # Upload main image is already done by CVP pipeline. No need to do it twice. + if [ "${CVP:-0}" -eq "0" ]; then + # Other images are not uploaded by CVP pipeline. We need to do it. + if [ "${CT_SKIP_UPLOAD_IMAGE:-false}" == 'false' ] ; then + # upload also other images, that template might need (list of pairs in the format | + local image_tag_a + local i_t + for i_t in ${other_images} ; do + echo "${i_t}" + IFS='|' read -ra image_tag_a <<< "${i_t}" + if [[ "$(docker images -q "$image_name" 2>/dev/null)" == "" ]]; then + echo "ERROR: Image $image_name is not pulled yet." + docker images + echo "Add to the beginning of scripts run-openshift-remote-cluster and run-openshift row" + echo "'ct_pull_image $image_name true'." + exit 1 + fi + + if [ "${CT_OCP4_TEST:-false}" == 'true' ] ; then + echo "Uploading image ${image_tag_a[0]} as ${image_tag_a[1]} into OpenShift internal registry." + ct_os_upload_image "v4" "${image_tag_a[0]}" "${image_tag_a[1]}" + else + ct_os_upload_image "v3" "${image_tag_a[0]}" "${image_tag_a[1]}" + fi + done + fi + fi + + # get the template file from remote or local location; if not found, it is + # considered an internal template name, like 'mysql', so use the name + # explicitly + local local_template + + local_template=$(ct_obtain_input "${template}" 2>/dev/null || echo "--template=${template}") + + echo "Creating a new-app with name ${name_in_template} in namespace ${namespace} with args ${oc_args}." + # shellcheck disable=SC2086 + oc new-app "${local_template}" \ + --name "${name_in_template}" \ + -p NAMESPACE="${namespace}" \ + ${oc_args} + + ct_os_wait_pod_ready "${service_name}" 300 + + local ip + local check_command_exp + local image_id + + # get image ID from the deployment config + image_id=$(oc get "deploymentconfig.apps.openshift.io/${service_name}" -o custom-columns=IMAGE:.spec.template.spec.containers[*].image | tail -n 1) + + ip=$(ct_os_get_service_ip "${service_name}") + # shellcheck disable=SC2001 + check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g" -e "s||${image_id}|g") + + echo " Checking APP using $check_command_exp ..." + local result=0 + eval "$check_command_exp" || result=1 + + ct_os_service_image_info "${service_name}" + + if [ $result -eq 0 ] ; then + echo " Check passed." + else + echo " Check failed." + fi + + # shellcheck disable=SC2119 + ct_os_delete_project + return $result +} + +# params: +# ct_os_test_template_app IMAGE APP IMAGE_IN_TEMPLATE EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: template - url or local path to a template to use (compulsory) +# Arguments: name_in_template - image name used in the template +# Arguments: expected_output - PCRE regular expression that must match the response body (compulsory) +# Arguments: port - which port to use (optional; default: 8080) +# Arguments: protocol - which protocol to use (optional; default: http) +# Arguments: response_code - what http response code to expect (optional; default: 200) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Arguments: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_template_app() { + local image_name=${1} + local template=${2} + local name_in_template=${3} + local expected_output=${4} + local port=${5:-8080} + local protocol=${6:-http} + local response_code=${7:-200} + local oc_args=${8:-} + local other_images=${9:-} + + if [ $# -lt 4 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ] || [ -z "${4}" ]; then + echo "ERROR: ct_os_test_template_app() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_test_template_app_func "${image_name}" \ + "${template}" \ + "${name_in_template}" \ + "ct_os_test_response_internal '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" \ + "${other_images}" +} + +# ct_os_test_image_update IMAGE_NAME OLD_IMAGE ISTAG CHECK_FUNCTION OC_ARGS +# -------------------- +# Runs an image update test with [image] uploaded to [is] imagestream +# and checks the services using an arbitrary function provided in [check_function]. +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: old_image - valid name of the image from the registry +# Arguments: istag - imagestream to upload the images into (compulsory) +# Arguments: check_function - command to be run to check functionality of created services (compulsory) +# Arguments: oc_args - arguments to use during oc new-app (compulsory) +ct_os_test_image_update() { + local image_name=$1; shift + local old_image=$1; shift + local istag=$1; shift + local check_function=$1; shift + local ip="" check_command_exp="" + local image_name_no_namespace=${image_name##*/} + local service_name="${image_name_no_namespace%%:*}-testing" + + echo "Running image update test for: $image_name" + # shellcheck disable=SC2119 + ct_os_new_project + + # Get current image from repository and create an imagestream + docker pull "$old_image:latest" 2>/dev/null + ct_os_upload_image "v3" "$old_image" "$istag" + + # Setup example application with curent image + oc new-app "$@" --name "$service_name" + ct_os_wait_pod_ready "$service_name" 60 + + # Check application output + ip=$(ct_os_get_service_ip "$service_name") + check_command_exp=${check_function///$ip} + ct_assert_cmd_success "$check_command_exp" + + # Tag built image into the imagestream and wait for rebuild + ct_os_upload_image "v3" "$image_name" "$istag" + ct_os_wait_pod_ready "${service_name}-2" 60 + + # Check application output + ip=$(ct_os_get_service_ip "$service_name") + check_command_exp=${check_function///$ip} + ct_assert_cmd_success "$check_command_exp" + + # shellcheck disable=SC2119 + ct_os_delete_project +} + +# ct_os_deploy_cmd_image IMAGE_NAME +# -------------------- +# Runs a special command pod, a pod that does nothing, but includes utilities for testing. +# A typical usage is a mysql pod that includes mysql commandline, that we need for testing. +# Running commands inside this command pod is done via ct_os_cmd_image_run function. +# The pod is not run again if already running. +# Arguments: image_name - image to be used as a command pod +function ct_os_deploy_cmd_image() { + local image_name=${1} + oc get pod command-app &>/dev/null && echo "command POD already running" && return 0 + echo "command POD not running yet, will start one called command-app ${image_name}" + oc create -f - <" + local sleep_time=3 + local attempt=1 + local result=1 + local status + local response_code + local response_file + local util_image_name='registry.access.redhat.com/ubi7/ubi' + + response_file=$(mktemp /tmp/ct_test_response_XXXXXX) + ct_os_deploy_cmd_image "${util_image_name}" + + while [ "${attempt}" -le "${max_attempts}" ]; do + ct_os_cmd_image_run "curl --connect-timeout 10 -s -w '%{http_code}' '${url}'" >"${response_file}" && status=0 || status=1 + if [ "${status}" -eq 0 ]; then + response_code=$(tail -c 3 "${response_file}") + if [ "${response_code}" -eq "${expected_code}" ]; then + result=0 + fi + grep -qP -e "${body_regexp}" "${response_file}" || result=1; + # Some services return 40x code until they are ready, so let's give them + # some chance and not end with failure right away + # Do not wait if we already have expected outcome though + if [ "${result}" -eq 0 ] || [ "${attempt}" -gt "${ignore_error_attempts}" ] || [ "${attempt}" -eq "${max_attempts}" ] ; then + break + fi + fi + attempt=$(( attempt + 1 )) + sleep "${sleep_time}" + done + rm -f "${response_file}" + return "${result}" +} + +# ct_os_get_image_from_pod +# ------------------------ +# Print image identifier from an existing pod to stdout +# Argument: pod_prefix - prefix or full name of the pod to get image from +ct_os_get_image_from_pod() { + local pod_prefix=$1 ; shift + local pod_name + pod_name=$(ct_os_get_pod_name "$pod_prefix") + oc get "po/${pod_name}" -o yaml | sed -ne 's/^\s*image:\s*\(.*\)\s*$/\1/ p' | head -1 +} + +# ct_os_check_cmd_internal +# ---------------- +# Runs a specified command, checks exit code and compares the output with expected regexp. +# That all is done inside an image in the cluster, so the function is used +# typically in clusters that are not accessible outside. +# The check is repeated until timeout. +# Argument: util_image_name - name of the image in the cluster that is used for running the cmd +# Argument: service_name - kubernetes' service name to work with (IP address is taken from this one) +# Argument: check_command - command that is run within the util_image_name container +# Argument: expected_content_match - regexp that must be in the output (use .* to ignore check) +# Argument: timeout - number of seconds to wait till the check succeeds +function ct_os_check_cmd_internal() { + local util_image_name=$1 ; shift + local service_name=$1 ; shift + local check_command=$1 ; shift + local expected_content_match=${1:-.*} ; shift + local timeout=${1:-60} ; shift || : + + : " Service ${service_name} check ..." + + local output + local ret + local ip + local check_command_exp + + ip=$(ct_os_get_service_ip "${service_name}") + # shellcheck disable=SC2001 + check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + ct_os_deploy_cmd_image "${util_image_name}" + SECONDS=0 + + echo -n "Waiting for ${service_name} service becoming ready ..." + while true ; do + output=$(ct_os_cmd_image_run "$check_command_exp") + ret=$? + echo "${output}" | grep -qe "${expected_content_match}" || ret=1 + if [ ${ret} -eq 0 ] ; then + echo " PASS" + return 0 + fi + echo -n "." + [ ${SECONDS} -gt "${timeout}" ] && break + sleep 3 + done + echo " FAIL" + return 1 +} + +# ct_os_test_image_stream_template IMAGE_STREAM_FILE TEMPLATE_FILE SERVICE NAME [TEMPLATE_PARAMS] +# ------------------------ +# Creates an image stream and deploys a specified template. Then checks that a pod runs. +# Argument: image_stream_file - local or remote file with the image stream definition +# Argument: template_file - local file name with a template +# Argument: service_name - how the pod will be named (prefix) +# Argument: template_params (optional) - parameters for the template, like image stream version +function ct_os_test_image_stream_template() { + local image_stream_file=${1} + local template_file=${2} + local service_name=${3} + local template_params=${4:-} + local local_image_stream_file + local local_template_file + + if [ $# -lt 3 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ]; then + echo "ERROR: ct_os_test_image_stream() requires at least 3 arguments that cannot be empty." >&2 + return 1 + fi + + echo "Running image stream test for stream ${image_stream_file} and template ${template_file}" + # shellcheck disable=SC2119 + ct_os_new_project + + local_image_stream_file=$(ct_obtain_input "${image_stream_file}") + local_template_file=$(ct_obtain_input "${template_file}") + oc create -f "${local_image_stream_file}" + + # shellcheck disable=SC2086 + if ! ct_os_deploy_template_image "${local_template_file}" -p NAMESPACE="${CT_NAMESPACE:-$(oc project -q)}" ${template_params} ; then + echo "ERROR: ${template_file} could not be loaded" + return 1 + # Deliberately not runnig ct_os_delete_project here because user either + # might want to investigate or the cleanup is done with the cleanup trap. + # Most functions depend on the set -e anyway at this point. + fi + ct_os_wait_pod_ready "${service_name}" 120 + result=$? + + # shellcheck disable=SC2119 + ct_os_delete_project + return $result +} + +# ct_os_wait_stream_ready IMAGE_STREAM_FILE NAMESPACE [ TIMEOUT ] +# ------------------------ +# Waits max timeout seconds till a [stream] is available in the [namespace]. +# Arguments: image_stream - stream name (usuallly :) +# Arguments: namespace - namespace name +# Arguments: timeout - how many seconds to wait +function ct_os_wait_stream_ready() { + local image_stream=${1} + local namespace=${2} + local timeout=${3:-60} + # It takes some time for the first time before the image is pulled in + SECONDS=0 + echo -n "Waiting for ${namespace}/${image_stream} to become available ..." + while ! oc get -n "${namespace}" istag "${image_stream}" &>/dev/null; do + if [ "$SECONDS" -gt "${timeout}" ] ; then + echo "FAIL: ${namespace}/${image_stream} not available after ${timeout}s:" + echo "oc get -n ${namespace} istag ${image_stream}" + oc get -n "${namespace}" istag "${image_stream}" + return 1 + fi + sleep 3 + echo -n . + done + echo " DONE" +} + +# ct_os_test_image_stream_s2i IMAGE_STREAM_FILE IMAGE_NAME APP CONTEXT_DIR EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Check the imagestream with an s2i app check. First it imports the given image stream, then +# it runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Argument: image_stream_file - local or remote file with the image stream definition +# Argument: image_name - container image we test (or name of the existing image stream in : format) +# Argument: app - url or local path to git repo with the application sources (compulsory) +# Argument: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Argument: expected_output - PCRE regular expression that must match the response body (compulsory) +# Argument: port - which port to use (optional; default: 8080) +# Argument: protocol - which protocol to use (optional; default: http) +# Argument: response_code - what http response code to expect (optional; default: 200) +# Argument: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_image_stream_s2i() { + local image_stream_file=${1} + local image_name=${2} + local app=${3} + local context_dir=${4} + local expected_output=${5} + local port=${6:-8080} + local protocol=${7:-http} + local response_code=${8:-200} + local oc_args=${9:-} + local result + local local_image_stream_file + + echo "Running image stream test for stream ${image_stream_file} and application ${app} with context ${context_dir}" + + # shellcheck disable=SC2119 + ct_os_new_project + + local_image_stream_file=$(ct_obtain_input "${image_stream_file}") + oc create -f "${local_image_stream_file}" + + # ct_os_test_s2i_app creates a new project, but we already need + # it before for the image stream import, so tell it to skip this time + CT_SKIP_NEW_PROJECT=true \ + ct_os_test_s2i_app "${IMAGE_NAME}" "${app}" "${context_dir}" "${expected_output}" \ + "${port}" "${protocol}" "${response_code}" "${oc_args}" + result=$? + + # shellcheck disable=SC2119 + ct_os_delete_project + + return $result +} + +# ct_os_test_image_stream_quickstart IMAGE_STREAM_FILE TEMPLATE IMAGE_NAME NAME_IN_TEMPLATE EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, OTHER_IMAGES ] +# -------------------- +# Check the imagestream with an s2i app check. First it imports the given image stream, then +# it runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Argument: image_stream_file - local or remote file with the image stream definition +# Argument: template_file - local file name with a template +# Argument: image_name - container image we test (or name of the existing image stream in : format) +# Argument: name_in_template - image name used in the template +# Argument: expected_output - PCRE regular expression that must match the response body (compulsory) +# Argument: port - which port to use (optional; default: 8080) +# Argument: protocol - which protocol to use (optional; default: http) +# Argument: response_code - what http response code to expect (optional; default: 200) +# Argument: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Argument: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_image_stream_quickstart() { + local image_stream_file=${1} + local template_file=${2} + local image_name=${3} + local name_in_template=${4} + local expected_output=${5} + local port=${6:-8080} + local protocol=${7:-http} + local response_code=${8:-200} + local oc_args=${9:-} + local other_images=${10:-} + local result + local local_image_stream_file + local local_template_file + + echo "Running image stream test for stream ${image_stream_file} and quickstart template ${template_file}" + echo "Image name is ${IMAGE_NAME}" + # shellcheck disable=SC2119 + ct_os_new_project + + local_image_stream_file=$(ct_obtain_input "${image_stream_file}") + local_template_file=$(ct_obtain_input "${template_file}") + # ct_os_test_template_app creates a new project, but we already need + # it before for the image stream import, so tell it to skip this time + namespace=${CT_NAMESPACE:-"$(oc project -q)"} + + # Add namespace into openshift arguments + if [[ $oc_args != *"NAMESPACE"* ]]; then + oc_args="${oc_args} -p NAMESPACE=${namespace}" + fi + oc create -f "${local_image_stream_file}" + + # In case we are testing on OpenShift 4 export variable for mirror image + # which means, that image is going to be mirrored from an internal registry into OpenShift 4 + if [ "${CT_EXTERNAL_REGISTRY:-false}" == 'true' ]; then + export CT_TAG_IMAGE=true + fi + # ct_os_test_template_app creates a new project, but we already need + # it before for the image stream import, so tell it to skip this time + + CT_SKIP_NEW_PROJECT=true \ + ct_os_test_template_app "${image_name}" \ + "${local_template_file}" \ + "${name_in_template}" \ + "${expected_output}" \ + "${port}" "${protocol}" "${response_code}" "${oc_args}" "${other_images}" + + result=$? + + # shellcheck disable=SC2119 + ct_os_delete_project + + return $result +} + +# ct_os_service_image_info SERVICE_NAME +# -------------------- +# Shows information about the image used by a specified service. +# Argument: service_name - Service name (uesd for deployment config) +function ct_os_service_image_info() { + local service_name=$1 + local image_id + local namespace + + # get image ID from the deployment config + image_id=$(oc get "deploymentconfig.apps.openshift.io/${service_name}" -o custom-columns=IMAGE:.spec.template.spec.containers[*].image | tail -n 1) + namespace=${CT_NAMESPACE:-"$(oc project -q)"} + + echo " Information about the image we work with:" + oc get deploymentconfig.apps.openshift.io/"${service_name}" -o yaml | grep lastTriggeredImage + # for s2i builds, the resulting image is actually in the current namespace, + # so if the specified namespace does not succeed, try the current namespace + oc get isimage -n "${namespace}" "${image_id##*/}" -o yaml || oc get isimage "${image_id##*/}" -o yaml +} +# vim: set tabstop=2:shiftwidth=2:expandtab: diff --git a/8.1/test/test-lib-php.sh b/8.1/test/test-lib-php.sh new file mode 100644 index 000000000..857b55884 --- /dev/null +++ b/8.1/test/test-lib-php.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# +# Functions for tests for the PHP image in OpenShift. +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +source "${THISDIR}/test-lib.sh" +source "${THISDIR}/test-lib-openshift.sh" + +function test_php_integration() { + ct_os_test_s2i_app "${IMAGE_NAME}" \ + "https://github.com/sclorg/s2i-php-container.git" \ + "test/test-app" \ + "Test PHP passed" +} + +# Check the imagestream +function test_php_imagestream() { + case ${OS} in + rhel7|centos7|rhel8|rhel9) ;; + *) echo "Imagestream testing not supported for $OS environment." ; return 0 ;; + esac + + ct_os_test_image_stream_s2i "${THISDIR}/imagestreams/php-${OS%[0-9]*}.json" "${IMAGE_NAME}" \ + "https://github.com/sclorg/s2i-php-container.git" \ + test/test-app \ + "Test PHP passed" +} + +# vim: set tabstop=2:shiftwidth=2:expandtab: diff --git a/8.1/test/test-lib-remote-openshift.sh b/8.1/test/test-lib-remote-openshift.sh new file mode 100644 index 000000000..9ffd009b3 --- /dev/null +++ b/8.1/test/test-lib-remote-openshift.sh @@ -0,0 +1,138 @@ +# shellcheck disable=SC2148 +if [ -z "${sourced_test_lib_remote_openshift:-}" ]; then + sourced_test_lib_remote_openshift=1 +else + return 0 +fi + +# shellcheck shell=bash +# some functions are used from test-lib.sh, that is usually in the same dir +# shellcheck source=/dev/null +source "$(dirname "${BASH_SOURCE[0]}")"/test-lib.sh + +# this should be returned when something related to the openshift cluster +# goes wrong during the test pipeline +# shellcheck disable=SC2034 +readonly OC_ERR=11 + +# Set of functions for testing docker images in OpenShift using 'oc' command + +# A variable containing the overall test result +# TESTSUITE_RESULT=0 +# And the following trap must be set, in the beginning of the test script: +# trap ct_os_cleanup EXIT SIGINT + +# ct_os_set_path_oc_4 OC_VERSION +# -------------------- +# This is a trick that helps using correct version 4 of the `oc`: +# The input is version of the openshift in format 4.4 etc. +# If the currently available version of oc is not of this version, +# it first takes a look into /usr/local/oc-/bin directory, + +# Arguments: oc_version - X.Y part of the version of OSE (e.g. 4.4) +function ct_os_set_path_oc_4() { + echo "Setting OCP4 client" + local oc_version=$1 + local installed_oc_path="/usr/local/oc-v${oc_version}/bin" + echo "PATH ${installed_oc_path}" + if [ -x "${installed_oc_path}/oc" ] ; then + oc_path="${installed_oc_path}" + echo "Binary oc found in ${installed_oc_path}" >&2 + else + echo "OpenShift Client binary on path ${installed_oc_path} not found" + return 1 + fi + export PATH="${oc_path}:${PATH}" +} + +# ct_os_prepare_ocp4 +# ------------------ +# Prepares environment for testing images in OpenShift 4 environment +# +# +function ct_os_set_ocp4() { + if [ "${CVP:-0}" -eq "1" ]; then + echo "Testing in CVP environment. No need to login to OpenShift cluster. This is already done by CVP pipeline." + return + fi + local login + OS_OC_CLIENT_VERSION=${OS_OC_CLIENT_VERSION:-4} + ct_os_set_path_oc_4 "${OS_OC_CLIENT_VERSION}" + + login=$(cat "$KUBEPASSWORD") + oc login -u kubeadmin -p "$login" + oc version + if ! oc version | grep -q "Client Version: ${OS_OC_CLIENT_VERSION}." ; then + echo "ERROR: something went wrong, oc located at ${oc_path}, but oc of version ${OS_OC_CLIENT_VERSION} not found in PATH ($PATH)" >&1 + return 1 + else + echo "PATH set correctly, binary oc found in version ${OS_OC_CLIENT_VERSION}: $(command -v oc)" + fi + # Switch to default project as soon as we are logged to cluster + oc project default + echo "Login to OpenShift ${OS_OC_CLIENT_VERSION} is DONE" + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +function ct_os_tag_image_for_cvp() { + if [ "${CVP:-0}" -eq "0" ]; then + echo "The function is valid only for CVP pipeline." + return + fi + local tag_image_name="$1" + local tag="" + if [ "${OS}" == "rhel7" ]; then + tag="-el7" + elif [ "${OS}" == "rhel8" ]; then + tag="-el8" + elif [ "${OS}" == "rhel9" ]; then + tag="-el9" + else + echo "Only RHEL images are supported." + return + fi + oc tag "${tag_image_name}:${VERSION}" "${tag_image_name}:${VERSION}${tag}" +} + +function ct_os_upload_image_external_registry() { + local input_name="${1}" ; shift + local image_name=${input_name##*/} + local imagestream=${1:-$image_name:latest} + local output_name + + ct_os_login_external_registry + + output_name="${INTERNAL_DOCKER_REGISTRY}/rhscl-ci-testing/$imagestream" + + docker images + docker tag "${input_name}" "${output_name}" + docker push "${output_name}" +} + + +function ct_os_import_image_ocp4() { + local image_name="${1}"; shift + local imagestream=${1:-$image_name:latest} + + echo "Uploading image ${image_name} as ${imagestream} into OpenShift internal registry." + ct_os_upload_image "v4" "${image_name}" "${imagestream}" + +} + +# ct_os_check_login +# --------------- +# function checks if the login to openshift was successful +# if successful returns 0 +# if not, write error message, sets test result to 1 +# and exits with non-zero +# Uses: $TESTSUITE_RESULT - overall result of all tests +function ct_os_check_login() { + oc status || { + echo "-------------------------------------------" + echo "It looks like oc is not properly logged in." + # shellcheck disable=SC2034 + TESTSUITE_RESULT=1 + return 1 + } +} diff --git a/8.1/test/test-lib.sh b/8.1/test/test-lib.sh new file mode 100644 index 000000000..08b3998f8 --- /dev/null +++ b/8.1/test/test-lib.sh @@ -0,0 +1,1438 @@ +# shellcheck shell=bash +# +# Test a container image. +# +# Always use sourced from a specific container testfile +# + +# Container CI tests +# abbreviated as "ct" + +# run ct_init before starting the actual testsuite + +# shellcheck disable=SC2148 +if [ -z "${sourced_test_lib:-}" ]; then + sourced_test_lib=1 +else + return 0 +fi + +LINE="==============================================" + +# may be redefined in the specific container testfile +EXPECTED_EXIT_CODE=0 + +# define UNSTABLE_TESTS if not already defined, as this variable +# is not mandatory for containers +UNSTABLE_TESTS="${UNSTABLE_TESTS:-""}" + + +# ct_init +# -------------------- +# This function needs to be called before any container test starts +# Sets: $APP_ID_FILE_DIR - path to directory used for storing +# IDs of application images used during tests. +# Sets: $CID_FILE_DIR - path to directory containing cid_files +# Sets: $TEST_SUMMARY - string, where test results are written +# Sets: $TESTSUITE_RESULT - overall result of run testuite +function ct_init() { + APP_ID_FILE_DIR="$(mktemp -d)" + CID_FILE_DIR="$(mktemp -d)" + TEST_SUMMARY="" + TESTSUITE_RESULT=0 + ct_enable_cleanup +} + +# ct_cleanup +# -------------------- +# Cleans up containers used during tests. Stops and removes all containers +# referenced by cid_files in CID_FILE_DIR. Dumps logs if a container exited +# unexpectedly. Removes the cid_files and CID_FILE_DIR as well. +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $EXPECTED_EXIT_CODE - expected container exit code +# Uses: $TESTSUITE_RESULT - overall result of all tests +function ct_cleanup() { + echo "$LINE" + echo "Cleaning of testing containers and images started." + echo "It may take a few seconds." + echo "$LINE" + ct_clean_app_images + ct_clean_containers +} + +# ct_build_image_and_parse_id +# -------------------- +# Return 0 if build was successful, 1 otherwise +# Uses: $1 - path to docckerfile +# Uses: $2 - build params +# Uses: $APP_IMAGE_ID - sets the app image id value to this variable +# this should be replaced by the --iidfile parameter +# when it becames supported by all versions of podman and docker that we support +ct_build_image_and_parse_id() { + local tmpdir + local log_file + local ret_val + local dockerfile + local command + local pid_build + local pid_sleep + local sleep_time + log_file="$(mktemp)" + sleep_time="10m" + [ -n "$1" ] && dockerfile="-f $1" + command="$(echo "docker build --no-cache $dockerfile $2" | tr -d "'")" + # running command in subshell, the subshell in background, storing pid to variable + ( + $command > "$log_file" 2>&1 + ) & pid_build=$! + # creating second subshell with trap function on ALRM signal + # the subshell sleeps for 10m, then kills the first subshell + ( + trap 'exit 0' ALRM; sleep "$sleep_time" && kill $pid_build + ) & pid_sleep=$! + # waiting for build subshell to finish, either with success, or killed from sleep subshell + wait $pid_build + ret_val=$? + # send ALRM signal to the sleep subshell, so it exits even in case the 10mins + # not yet passed. If the kill was successful (the wait subshell received ALRM signal) + # then the build was not finished yet, so the return value is set to 1 + kill -s ALRM $pid_sleep 2>/dev/null || ret_val=1 + + if [ $ret_val -eq 0 ]; then + APP_IMAGE_ID="$(tail -n 1 "$log_file")" + fi + + cat "$log_file" ; rm -r "$log_file" + return "$ret_val" +} + +# ct_container_running +# -------------------- +# Return 0 if given container is in running state +# Uses: $1 - container id to check +function ct_container_running() { + local running + running="$(docker inspect -f '{{.State.Running}}' "$1")" + [ "$running" = "true" ] || return 1 +} + +# ct_container_exists +# -------------------- +# Return 0 if given container exists +# Uses: $1 - container id to check +function ct_container_exists() { + local exists + exists="$(docker ps -q -a -f "id=$1")" + [ -n "$exists" ] || return 1 +} + +# ct_clean_app_images +# -------------------- +# Cleans up application images referenced by APP_ID_FILE_DIR +# Uses: $APP_ID_FILE_DIR - path to directory containing image ID files +function ct_clean_app_images() { + local image + if [[ ! -d "${APP_ID_FILE_DIR:-}" ]]; then + echo "The \$APP_ID_FILE_DIR=$APP_ID_FILE_DIR is not created. App cleaning is to be skipped." + return 0 + fi; + echo "Examining image ID files in \$APP_ID_FILE_DIR=$APP_ID_FILE_DIR" + for file in "${APP_ID_FILE_DIR:?}"/*; do + image="$(cat "$file")" + docker inspect "$image" > /dev/null 2>&1 || continue + containers="$(docker ps -q -a -f ancestor="$image")" + [[ -z "$containers" ]] || docker rm -f "$containers" 2>/dev/null + docker rmi -f "$image" + done + rm -fr "$APP_ID_FILE_DIR" +} + +# ct_clean_containers +# -------------------- +# Cleans up containers referenced by CID_FILE_DIR +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_clean_containers() { + if [[ -z ${CID_FILE_DIR:-} ]]; then + echo "The \$CID_FILE_DIR is not set. Container cleaning is to be skipped." + return + fi; + + echo "Examining CID files in \$CID_FILE_DIR=$CID_FILE_DIR" + for cid_file in "$CID_FILE_DIR"/* ; do + [ -f "$cid_file" ] || continue + local container + container=$(cat "$cid_file") + + ct_container_exists "$container" || continue + + echo "Stopping and removing container $container..." + if ct_container_running "$container"; then + docker stop "$container" + fi + + exit_status=$(docker inspect -f '{{.State.ExitCode}}' "$container") + if [ "$exit_status" != "$EXPECTED_EXIT_CODE" ]; then + echo "Dumping logs for $container" + docker logs "$container" + fi + docker rm -v "$container" + rm -f "$cid_file" + done + + rm -rf "$CID_FILE_DIR" +} + +# ct_show_results +# --------------- +# Prints results of all test cases that are stored into TEST_SUMMARY variable. +# Uses: $IMAGE_NAME - name of the tested container image +# Uses: $TEST_SUMMARY - text info about test-cases +# Uses: $TESTSUITE_RESULT - overall result of all tests +function ct_show_results() { + echo "$LINE" + #shellcheck disable=SC2153 + echo "Tests were run for image ${IMAGE_NAME}" + echo "$LINE" + echo "Test cases results:" + echo + echo "${TEST_SUMMARY:-}" + + if [ -n "${TESTSUITE_RESULT:-}" ] ; then + if [ "$TESTSUITE_RESULT" -eq 0 ] ; then + # shellcheck disable=SC2153 + echo "Tests for ${IMAGE_NAME} succeeded." + else + # shellcheck disable=SC2153 + echo "Tests for ${IMAGE_NAME} failed." + fi + fi +} + +# ct_enable_cleanup +# -------------------- +# Enables automatic container cleanup after tests. +function ct_enable_cleanup() { + trap ct_trap_on_exit EXIT + trap ct_trap_on_sigint SIGINT +} + +# ct_trap_on_exit +# -------------------- +function ct_trap_on_exit() { + local exit_code=$? + [ $exit_code -eq 130 ] && return # we do not want to catch SIGINT here + # We should not really care about what the script returns + # as the tests are constructed the way they never exit the shell. + # The check is added just to be sure that we catch some not expected behavior + # if any is added in the future. + echo "Tests finished with EXIT=$exit_code" + [ $exit_code -eq 0 ] && exit_code="${TESTSUITE_RESULT:-0}" + ct_show_resources + ct_cleanup + ct_show_results + exit $exit_code +} + +# ct_trap_on_sigint +# -------------------- +function ct_trap_on_sigint() { + echo "Tests were stopped by SIGINT signal" + ct_cleanup + ct_show_results + exit 130 +} + +# ct_pull_image +# ------------- +# Function pull an image before tests execution +# Argument: image_name - string containing the public name of the image to pull +# Argument: exit - in case "true" is defined and pull failed, then script has to exit with 1 and no tests are executed +# Argument: loops - how many times to pull image in case of failure +# Function returns either 0 in case of pull was successful +# Or the test suite exit with 1 in case of pull error +function ct_pull_image() { + local image_name="$1"; [[ $# -gt 0 ]] && shift + local exit_variable=${1:-"false"}; [[ $# -gt 0 ]] && shift + local loops=${1:-10} + local loop=0 + + # Let's try to pull image. + echo "-> Pulling image $image_name ..." + # Sometimes in Fedora case it fails with HTTP 50X + # Check if the image is available locally and try to pull it if it is not + if [[ "$(docker images -q "$image_name" 2>/dev/null)" != "" ]]; then + echo "The image $image_name is already pulled." + return 0 + fi + + # Try pulling the image to see if it is accessible + # WORKAROUND: Since Fedora registry sometimes fails randomly, let's try it more times + while ! docker pull "$image_name"; do + ((loop++)) || : + echo "Pulling image $image_name failed." + if [ "$loop" -gt "$loops" ]; then + echo "Pulling of image $image_name failed $loops times in a row. Giving up." + echo "!!! ERROR with pulling image $image_name !!!!" + # shellcheck disable=SC2268 + if [[ x"$exit_variable" == x"false" ]]; then + return 1 + else + exit 1 + fi + fi + echo "Let's wait $((loop*5)) seconds and try again." + sleep "$((loop*5))" + done +} + + +# ct_check_envs_set env_filter check_envs loop_envs [env_format] +# -------------------- +# Compares values from one list of environment variable definitions against such list, +# checking if the values are present and have a specific format. +# Argument: env_filter - optional string passed to grep used for +# choosing which variables to filter out in env var lists. +# Argument: check_envs - list of env var definitions to check values against +# Argument: loop_envs - list of env var definitions to check values for +# Argument: env_format (optional) - format string for bash substring deletion used +# for checking whether the value is contained in check_envs. +# Defaults to: "*VALUE*", VALUE string gets replaced by actual value from loop_envs +function ct_check_envs_set { + local env_filter check_envs env_format + env_filter=$1; shift + check_envs=$1; shift + loop_envs=$1; shift + env_format=${1:-"*VALUE*"} + while read -r variable; do + [ -z "$variable" ] && continue + var_name=$(echo "$variable" | awk -F= '{ print $1 }') + stripped=$(echo "$variable" | awk -F= '{ print $2 }') + filtered_envs=$(echo "$check_envs" | grep "^$var_name=") + [ -z "$filtered_envs" ] && { echo "$var_name not found during \` docker exec\`"; return 1; } + old_IFS=$IFS + # For each such variable compare its content with the `docker exec` result, use `:` as delimiter + IFS=: + for value in $stripped; do + # If the falue checked does not go through env_filter we do not care about it + echo "$value" | grep -q "$env_filter" || continue + # shellcheck disable=SC2295 + if [ -n "${filtered_envs##${env_format//VALUE/$value}}" ]; then + echo " Value $value is missing from variable $var_name" + echo "$filtered_envs" + IFS=$old_IFS + return 1 + fi + done + IFS=$old_IFS + done <<< "$(echo "$loop_envs" | grep "$env_filter" | grep -v "^PWD=")" +} + +# ct_get_cid [name] +# -------------------- +# Prints container id from cid_file based on the name of the file. +# Argument: name - name of cid_file where the container id will be stored +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_get_cid() { + local name="$1" ; shift || return 1 + cat "$CID_FILE_DIR/$name" +} + +# ct_get_cip [id] +# -------------------- +# Prints container ip address based on the container id. +# Argument: id - container id +function ct_get_cip() { + local id="$1" ; shift + docker inspect --format='{{.NetworkSettings.IPAddress}}' "$(ct_get_cid "$id")" +} + +# ct_wait_for_cid [cid_file] +# -------------------- +# Holds the execution until the cid_file is created. Usually run after container +# creation. +# Argument: cid_file - name of the cid_file that should be created +function ct_wait_for_cid() { + local cid_file=$1 + local max_attempts=10 + local sleep_time=1 + local attempt=1 + local result=1 + while [ $attempt -le $max_attempts ]; do + [ -f "$cid_file" ] && [ -s "$cid_file" ] && return 0 + echo "Waiting for container start... $attempt" + attempt=$(( attempt + 1 )) + sleep $sleep_time + done + return 1 +} + +# ct_assert_container_creation_fails [container_args] +# -------------------- +# The invocation of docker run should fail based on invalid container_args +# passed to the function. Returns 0 when container fails to start properly. +# Argument: container_args - all arguments are passed directly to dokcer run +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_assert_container_creation_fails() { + local ret=0 + local max_attempts=10 + local attempt=1 + local cid_file=assert + set +e + local old_container_args="${CONTAINER_ARGS-}" + # we really work with CONTAINER_ARGS as with a string + # shellcheck disable=SC2124 + CONTAINER_ARGS="$@" + if ct_create_container "$cid_file" ; then + local cid + cid=$(ct_get_cid "$cid_file") + + while [ "$(docker inspect -f '{{.State.Running}}' "$cid")" == "true" ] ; do + sleep 2 + attempt=$(( attempt + 1 )) + if [ "$attempt" -gt "$max_attempts" ]; then + docker stop "$cid" + ret=1 + break + fi + done + exit_status=$(docker inspect -f '{{.State.ExitCode}}' "$cid") + if [ "$exit_status" == "0" ]; then + ret=1 + fi + docker rm -v "$cid" + rm "$CID_FILE_DIR/$cid_file" + fi + [ -n "$old_container_args" ] && CONTAINER_ARGS="$old_container_args" + set -e + return "$ret" +} + +# ct_create_container [name, command] +# -------------------- +# Creates a container using the IMAGE_NAME and CONTAINER_ARGS variables. Also +# stores the container id to a cid_file located in the CID_FILE_DIR, and waits +# for the creation of the file. +# Argument: name - name of cid_file where the container id will be stored +# Argument: command - optional command to be executed in the container +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $CONTAINER_ARGS - optional arguments passed directly to docker run +# Uses: $IMAGE_NAME - name of the image being tested +function ct_create_container() { + local cid_file="$CID_FILE_DIR/$1" ; shift + # create container with a cidfile in a directory for cleanup + # shellcheck disable=SC2086,SC2153 + docker run --cidfile="$cid_file" -d ${CONTAINER_ARGS:-} "$IMAGE_NAME" "$@" + ct_wait_for_cid "$cid_file" || return 1 + : "Created container $(cat "$cid_file")" +} + +# ct_scl_usage_old [name, command, expected] +# -------------------- +# Tests three ways of running the SCL, by looking for an expected string +# in the output of the command +# Argument: name - name of cid_file where the container id will be stored +# Argument: command - executed inside the container +# Argument: expected - string that is expected to be in the command output +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $IMAGE_NAME - name of the image being tested +function ct_scl_usage_old() { + local name="$1" + local command="$2" + local expected="$3" + local out="" + : " Testing the image SCL enable" + out=$(docker run --rm "${IMAGE_NAME}" /bin/bash -c "${command}") + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[/bin/bash -c \"${command}\"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi + out=$(docker exec "$(ct_get_cid "$name")" /bin/bash -c "${command}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/bash -c \"${command}\"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi + out=$(docker exec "$(ct_get_cid "$name")" /bin/sh -ic "${command}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/sh -ic \"${command}\"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi +} + +# ct_doc_content_old [strings] +# -------------------- +# Looks for occurence of stirngs in the documentation files and checks +# the format of the files. Files examined: help.1 +# Argument: strings - strings expected to appear in the documentation +# Uses: $IMAGE_NAME - name of the image being tested +function ct_doc_content_old() { + local tmpdir + tmpdir=$(mktemp -d) + local f + : " Testing documentation in the container image" + # Extract the help files from the container + # shellcheck disable=SC2043 + for f in help.1 ; do + docker run --rm "${IMAGE_NAME}" /bin/bash -c "cat /${f}" >"${tmpdir}/$(basename "${f}")" + # Check whether the files contain some important information + for term in "$@" ; do + if ! grep -E -q -e "${term}" "${tmpdir}/$(basename "${f}")" ; then + echo "ERROR: File /${f} does not include '${term}'." >&2 + return 1 + fi + done + # Check whether the files use the correct format + for term in TH PP SH ; do + if ! grep -q "^\.${term}" "${tmpdir}/help.1" ; then + echo "ERROR: /help.1 is probably not in troff or groff format, since '${term}' is missing." >&2 + return 1 + fi + done + done + : " Success!" +} + +# full_ca_file_path +# Return string for full path to CA file +function full_ca_file_path() +{ + echo "/etc/pki/ca-trust/source/anchors/RH-IT-Root-CA.crt" +} +# ct_mount_ca_file +# ------------------ +# Check if /etc/pki/certs/RH-IT-Root-CA.crt file exists +# return mount string for containers or empty string +function ct_mount_ca_file() +{ + # mount CA file only if NPM_REGISTRY variable is present. + local mount_parameter="" + if [ -n "$NPM_REGISTRY" ] && [ -f "$(full_ca_file_path)" ]; then + mount_parameter="-v $(full_ca_file_path):$(full_ca_file_path):Z" + fi + echo "$mount_parameter" +} + +# ct_build_s2i_npm_variables URL_TO_NPM_JS_SERVER +# ------------------------------------------ +# Function returns -e NPM_MIRROR and -v MOUNT_POINT_FOR_CAFILE +# or empty string +function ct_build_s2i_npm_variables() +{ + npm_variables="" + if [ -n "$NPM_REGISTRY" ] && [ -f "$(full_ca_file_path)" ]; then + npm_variables="-e NPM_MIRROR=$NPM_REGISTRY $(ct_mount_ca_file)" + fi + echo "$npm_variables" +} + +# ct_npm_works +# -------------------- +# Checks existance of the npm tool and runs it. +function ct_npm_works() { + local tmpdir + local cid_file + tmpdir=$(mktemp -d) + : " Testing npm in the container image" + cid_file="$(mktemp --dry-run --tmpdir="${CID_FILE_DIR}")" + if ! docker run --rm "${IMAGE_NAME}" /bin/bash -c "npm --version" >"${tmpdir}/version" ; then + echo "ERROR: 'npm --version' does not work inside the image ${IMAGE_NAME}." >&2 + return 1 + fi + + # shellcheck disable=SC2046 + docker run -d $(ct_mount_ca_file) --rm --cidfile="$cid_file" "${IMAGE_NAME}-testapp" + + # Wait for the container to write it's CID file + ct_wait_for_cid "$cid_file" || return 1 + + if ! docker exec "$(cat "$cid_file")" /bin/bash -c "npm --verbose install jquery && test -f node_modules/jquery/src/jquery.js" >"${tmpdir}/jquery" 2>&1 ; then + echo "ERROR: npm could not install jquery inside the image ${IMAGE_NAME}." >&2 + cat "${tmpdir}/jquery" + return 1 + fi + + if [ -n "$NPM_REGISTRY" ] && [ -f "$(full_ca_file_path)" ]; then + if ! grep -qo "$NPM_REGISTRY" "${tmpdir}/jquery"; then + echo "ERROR: Internal repository is NOT set. Even it is requested." + return 1 + fi + fi + + if [ -f "$cid_file" ]; then + docker stop "$(cat "$cid_file")" + fi + : " Success!" +} + +# ct_binary_found_from_df binary [path] +# -------------------- +# Checks if a binary can be found in PATH during Dockerfile build +# Argument: binary - name of the binary to test accessibility for +# Argument: path - optional path in which the binary should reside in +# /opt/rh by default +function ct_binary_found_from_df() { + local tmpdir + local id_file + local binary=$1; shift + local binary_path=${1:-"^/opt/rh"} + tmpdir=$(mktemp -d) + : " Testing $binary in build from Dockerfile" + + # Create Dockerfile that looks for the binary + cat <"$tmpdir/Dockerfile" +FROM $IMAGE_NAME +RUN command -v $binary | grep "$binary_path" +EOF + # Build an image, looking for expected path in the output + ct_build_image_and_parse_id "$tmpdir/Dockerfile" "$tmpdir" + #shellcheck disable=SC2181 + if [ $? -ne 0 ]; then + echo " ERROR: Failed to find $binary in \$PATH!" >&2 + return 1 + fi + id_file="${APP_ID_FILE_DIR:?}"/"$RANDOM" + echo "$APP_IMAGE_ID" > "$id_file" +} + +# ct_check_exec_env_vars [env_filter] +# -------------------- +# Checks if all relevant environment variables from `docker run` +# can be found in `docker exec` as well. +# Argument: env_filter - optional string passed to grep used for +# choosing which variables to check in the test case. +# Defaults to X_SCLS and variables containing /opt/app-root, /opt/rh +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $IMAGE_NAME - name of the image being tested +function ct_check_exec_env_vars() { + local tmpdir exec_envs cid old_IFS env_filter + local var_name stripped filtered_envs run_envs + env_filter=${1:-"^X_SCLS=\|/opt/rh\|/opt/app-root"} + tmpdir=$(mktemp -d) + CID_FILE_DIR=${CID_FILE_DIR:-$(mktemp -d)} + # Get environment variables from `docker run` + run_envs=$(docker run --rm "$IMAGE_NAME" /bin/bash -c "env") + # Get environment variables from `docker exec` + ct_create_container "test_exec_envs" bash -c "sleep 1000" >/dev/null + cid=$(ct_get_cid "test_exec_envs") + exec_envs=$(docker exec "$cid" env) + # Filter out variables we are not interested in + # Always check X_SCLS, ignore PWD + # Check variables from `docker run` that have alternative paths inside (/opt/rh, /opt/app-root) + ct_check_envs_set "$env_filter" "$exec_envs" "$run_envs" "*VALUE*" || return 1 + echo " All values present in \`docker exec\`" + return 0 +} + +# ct_check_scl_enable_vars [env_filter] +# -------------------- +# Checks if all relevant environment variables from `docker run` +# are set twice after a second call of `scl enable $SCLS`. +# Argument: env_filter - optional string passed to grep used for +# choosing which variables to check in the test case. +# Defaults to paths containing enabled SCLS in the image +# Uses: $IMAGE_NAME - name of the image being tested +function ct_check_scl_enable_vars() { + local tmpdir exec_envs cid old_IFS env_filter enabled_scls + local var_name stripped filtered_envs loop_envs + env_filter=$1 + tmpdir=$(mktemp -d) + enabled_scls=$(docker run --rm "$IMAGE_NAME" /bin/bash -c "echo \$X_SCLS") + if [ -z "$env_filter" ]; then + for scl in $enabled_scls; do + [ -z "$env_filter" ] && env_filter="/$scl" && continue + # env_filter not empty, append to the existing list + env_filter="$env_filter|/$scl" + done + fi + # Get environment variables from `docker run` + loop_envs=$(docker run --rm "$IMAGE_NAME" /bin/bash -c "env") + run_envs=$(docker run --rm "$IMAGE_NAME" /bin/bash -c "X_SCLS= scl enable $enabled_scls env") + # Check if the values are set twice in the second set of envs + ct_check_envs_set "$env_filter" "$run_envs" "$loop_envs" "*VALUE*VALUE*" || return 1 + echo " All scl_enable values present" + return 0 +} + +# ct_path_append PATH_VARNAME DIRECTORY +# ------------------------------------- +# Append DIRECTORY to VARIABLE of name PATH_VARNAME, the VARIABLE must consist +# of colon-separated list of directories. +ct_path_append () +{ + if eval "test -n \"\${$1-}\""; then + eval "$1=\$2:\$$1" + else + eval "$1=\$2" + fi +} + + +# ct_path_foreach PATH ACTION [ARGS ...] +# -------------------------------------- +# For each DIR in PATH execute ACTION (path is colon separated list of +# directories). The particular calls to ACTION will look like +# '$ ACTION directory [ARGS ...]' +ct_path_foreach () +{ + local dir dirlist action save_IFS + save_IFS=$IFS + IFS=: + dirlist=$1 + action=$2 + shift 2 + for dir in $dirlist; do "$action" "$dir" "$@" ; done + IFS=$save_IFS +} + + +# ct_gen_self_signed_cert_pem +# --------------------------- +# Generates a self-signed PEM certificate pair into specified directory. +# Argument: output_dir - output directory path +# Argument: base_name - base name of the certificate files +# Resulted files will be those: +# /-cert-selfsigned.pem -- public PEM cert +# /-key.pem -- PEM private key +ct_gen_self_signed_cert_pem() { + local output_dir=$1 ; shift + local base_name=$1 ; shift + mkdir -p "${output_dir}" + openssl req -newkey rsa:2048 -nodes -keyout "${output_dir}"/"${base_name}"-key.pem -subj '/C=GB/ST=Berkshire/L=Newbury/O=My Server Company' > "${base_name}"-req.pem + openssl req -new -x509 -nodes -key "${output_dir}"/"${base_name}"-key.pem -batch > "${output_dir}"/"${base_name}"-cert-selfsigned.pem +} + +# ct_obtain_input FILE|DIR|URL +# -------------------- +# Either copies a file or a directory to a tmp location for local copies, or +# downloads the file from remote location. +# Resulted file path is printed, so it can be later used by calling function. +# Arguments: input - local file, directory or remote URL +function ct_obtain_input() { + local input=$1 + local extension="${input##*.}" + + # Try to use same extension for the temporary file if possible + [[ "${extension}" =~ ^[a-z0-9]*$ ]] && extension=".${extension}" || extension="" + + local output + output=$(mktemp "/var/tmp/test-input-XXXXXX$extension") + if [ -f "${input}" ] ; then + cp -f "${input}" "${output}" + elif [ -d "${input}" ] ; then + rm -f "${output}" + cp -r -LH "${input}" "${output}" + elif echo "${input}" | grep -qe '^http\(s\)\?://' ; then + curl "${input}" > "${output}" + else + echo "ERROR: file type not known: ${input}" >&2 + return 1 + fi + echo "${output}" +} + +# ct_test_response +# ---------------- +# Perform GET request to the application container, checks output with +# a reg-exp and HTTP response code. +# Argument: url - request URL path +# Argument: expected_code - expected HTTP response code +# Argument: body_regexp - PCRE regular expression that must match the response body +# Argument: max_attempts - Optional number of attempts (default: 20), three seconds sleep between +# Argument: ignore_error_attempts - Optional number of attempts when we ignore error output (default: 10) +ct_test_response() { + local url="$1" + local expected_code="$2" + local body_regexp="$3" + local max_attempts=${4:-20} + local ignore_error_attempts=${5:-10} + + echo " Testing the HTTP(S) response for <${url}>" + local sleep_time=3 + local attempt=1 + local result=1 + local status + local response_code + local response_file + response_file=$(mktemp /tmp/ct_test_response_XXXXXX) + while [ "${attempt}" -le "${max_attempts}" ]; do + echo "Trying to connect ... ${attempt}" + curl --connect-timeout 10 -s -w '%{http_code}' "${url}" >"${response_file}" && status=0 || status=1 + if [ "${status}" -eq 0 ]; then + response_code=$(tail -c 3 "${response_file}") + if [ "${response_code}" -eq "${expected_code}" ]; then + result=0 + fi + grep -qP -e "${body_regexp}" "${response_file}" || result=1; + # Some services return 40x code until they are ready, so let's give them + # some chance and not end with failure right away + # Do not wait if we already have expected outcome though + if [ "${result}" -eq 0 ] || [ "${attempt}" -gt "${ignore_error_attempts}" ] || [ "${attempt}" -eq "${max_attempts}" ] ; then + break + fi + fi + attempt=$(( attempt + 1 )) + sleep "${sleep_time}" + done + rm -f "${response_file}" + return "${result}" +} + +# ct_registry_from_os OS +# ---------------- +# Transform operating system string [os] into registry url +# Argument: OS - string containing the os version +ct_registry_from_os() { + local registry="" + case $1 in + rhel*) + registry=registry.redhat.io + ;; + *) + registry=quay.io + ;; + esac + echo "$registry" +} + + # ct_get_public_image_name OS BASE_IMAGE_NAME VERSION +# ---------------- +# Transform the arguments into public image name +# Argument: OS - string containing the os version +# Argument: BASE_IMAGE_NAME - string containing the base name of the image as defined in the Makefile +# Argument: VERSION - string containing the version of the image as defined in the Makefile +ct_get_public_image_name() { + local os=$1; shift + local base_image_name=$1; shift + local version=$1; shift + + local public_image_name + local registry + + registry=$(ct_registry_from_os "$os") + if [ "$os" == "rhel7" ]; then + public_image_name=$registry/rhscl/$base_image_name-${version//./}-rhel7 + elif [ "$os" == "rhel8" ]; then + public_image_name=$registry/rhel8/$base_image_name-${version//./} + elif [ "$os" == "rhel9" ]; then + public_image_name=$registry/rhel9/$base_image_name-${version//./} + elif [ "$os" == "centos7" ]; then + public_image_name=$registry/centos7/$base_image_name-${version//./}-centos7 + elif [ "$os" == "c8s" ]; then + public_image_name=$registry/sclorg/$base_image_name-${version//./}-c8s + elif [ "$os" == "c9s" ]; then + public_image_name=$registry/sclorg/$base_image_name-${version//./}-c9s + fi + + echo "$public_image_name" +} + +# ct_assert_cmd_success CMD +# ---------------- +# Evaluates [cmd] and fails if it does not succeed. +# Argument: CMD - Command to be run +function ct_assert_cmd_success() { + echo "Checking '$*' for success ..." + # shellcheck disable=SC2294 + if ! eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + +# ct_assert_cmd_failure CMD +# ---------------- +# Evaluates [cmd] and fails if it succeeds. +# Argument: CMD - Command to be run +function ct_assert_cmd_failure() { + echo "Checking '$*' for failure ..." + # shellcheck disable=SC2294 + if eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + + +# ct_random_string [LENGTH=10] +# ---------------------------- +# Generate pseudorandom alphanumeric string of LENGTH bytes, the +# default length is 10. The string is printed on stdout. +ct_random_string() +( + export LC_ALL=C + dd if=/dev/urandom count=1 bs=10k 2>/dev/null \ + | tr -dc 'a-z0-9' \ + | fold -w "${1-10}" \ + | head -n 1 +) + +# ct_s2i_usage IMG_NAME [S2I_ARGS] +# ---------------------------- +# Create a container and run the usage script inside +# Argument: IMG_NAME - name of the image to be used for the container run +# Argument: S2I_ARGS - Additional list of source-to-image arguments, currently unused. +ct_s2i_usage() +{ + local img_name=$1; shift + local s2i_args="$*"; + local usage_command="/usr/libexec/s2i/usage" + docker run --rm "$img_name" bash -c "$usage_command" +} + +# ct_s2i_build_as_df APP_PATH SRC_IMAGE DST_IMAGE [S2I_ARGS] +# ---------------------------- +# Create a new s2i app image from local sources in a similar way as source-to-image would have used. +# Argument: APP_PATH - local path to the app sources to be used in the test +# Argument: SRC_IMAGE - image to be used as a base for the s2i build +# Argument: DST_IMAGE - image name to be used during the tagging of the s2i build result +# Argument: S2I_ARGS - Additional list of source-to-image arguments. +# Only used to check for pull-policy=never and environment variable definitions. +ct_s2i_build_as_df() +{ + local app_path=$1; shift + local src_image=$1; shift + local dst_image=$1; shift + local s2i_args="$*"; + local local_app=upload/src/ + local local_scripts=upload/scripts/ + local user_id= + local df_name= + local tmpdir= + local incremental=false + local mount_options=() + local id_file + + # Run the entire thing inside a subshell so that we do not leak shell options outside of the function + ( + # Error out if any part of the build fails + set -e + + # Use /tmp to not pollute cwd + tmpdir=$(mktemp -d) + df_name=$(mktemp -p "$tmpdir" Dockerfile.XXXX) + cd "$tmpdir" + # Check if the image is available locally and try to pull it if it is not + docker images "$src_image" &>/dev/null || echo "$s2i_args" | grep -q "pull-policy=never" || docker pull "$src_image" + user=$(docker inspect -f "{{.Config.User}}" "$src_image") + # Default to root if no user is set by the image + user=${user:-0} + # run the user through the image in case it is non-numeric or does not exist + if ! user_id=$(ct_get_uid_from_image "$user" "$src_image"); then + echo "Terminating s2i build." + return 1 + fi + + echo "$s2i_args" | grep -q "\-\-incremental" && incremental=true + if $incremental; then + inc_tmp=$(mktemp -d --tmpdir incremental.XXXX) + setfacl -m "u:$user_id:rwx" "$inc_tmp" + # Check if the image exists, build should fail (for testing use case) if it does not + docker images "$dst_image" &>/dev/null || (echo "Image $dst_image not found."; false) + # Run the original image with a mounted in volume and get the artifacts out of it + cmd="if [ -s /usr/libexec/s2i/save-artifacts ]; then /usr/libexec/s2i/save-artifacts > \"$inc_tmp/artifacts.tar\"; else touch \"$inc_tmp/artifacts.tar\"; fi" + docker run --rm -v "$inc_tmp:$inc_tmp:Z" "$dst_image" bash -c "$cmd" + # Move the created content into the $tmpdir for the build to pick it up + mv "$inc_tmp/artifacts.tar" "$tmpdir/" + fi + # Strip file:// from APP_PATH and copy its contents into current context + mkdir -p "$local_app" + cp -r "${app_path/file:\/\//}/." "$local_app" + [ -d "$local_app/.s2i/bin/" ] && mv "$local_app/.s2i/bin" "$local_scripts" + # Create a Dockerfile named df_name and fill it with proper content + #FIXME: Some commands could be combined into a single layer but not sure if worth the trouble for testing purposes + cat <"$df_name" +FROM $src_image +LABEL "io.openshift.s2i.build.image"="$src_image" \\ + "io.openshift.s2i.build.source-location"="$app_path" +USER root +COPY $local_app /tmp/src +EOF + [ -d "$local_scripts" ] && echo "COPY $local_scripts /tmp/scripts" >> "$df_name" && + echo "RUN chown -R $user_id:0 /tmp/scripts" >>"$df_name" + echo "RUN chown -R $user_id:0 /tmp/src" >>"$df_name" + # Check for custom environment variables inside .s2i/ folder + if [ -e "$local_app/.s2i/environment" ]; then + # Remove any comments and add the contents as ENV commands to the Dockerfile + sed '/^\s*#.*$/d' "$local_app/.s2i/environment" | while read -r line; do + echo "ENV $line" >>"$df_name" + done + fi + # Filter out env var definitions from $s2i_args and create Dockerfile ENV commands out of them + echo "$s2i_args" | grep -o -e '\(-e\|--env\)[[:space:]=]\S*=\S*' | sed -e 's/-e /ENV /' -e 's/--env[ =]/ENV /' >>"$df_name" + # Check if CA autority is present on host and add it into Dockerfile + [ -f "$(full_ca_file_path)" ] && echo "RUN cd /etc/pki/ca-trust/source/anchors && update-ca-trust extract" >>"$df_name" + + # Add in artifacts if doing an incremental build + if $incremental; then + { echo "RUN mkdir /tmp/artifacts" + echo "ADD artifacts.tar /tmp/artifacts" + echo "RUN chown -R $user_id:0 /tmp/artifacts" ; } >>"$df_name" + fi + + echo "USER $user_id" >>"$df_name" + # If exists, run the custom assemble script, else default to /usr/libexec/s2i/assemble + if [ -x "$local_scripts/assemble" ]; then + echo "RUN /tmp/scripts/assemble" >>"$df_name" + else + echo "RUN /usr/libexec/s2i/assemble" >>"$df_name" + fi + # If exists, set the custom run script as CMD, else default to /usr/libexec/s2i/run + if [ -x "$local_scripts/run" ]; then + echo "CMD /tmp/scripts/run" >>"$df_name" + else + echo "CMD /usr/libexec/s2i/run" >>"$df_name" + fi + + # Check if -v parameter is present in s2i_args and add it into docker build command + read -ra mount_options <<< "$(echo "$s2i_args" | grep -o -e '\(-v\)[[:space:]]\.*\S*' || true)" + + # Run the build and tag the result + ct_build_image_and_parse_id "$df_name" "${mount_options[*]+${mount_options[*]}} -t $dst_image ." + #shellcheck disable=SC2181 + if [ "$?" -ne 0 ]; then + echo " ERROR: Failed to to build $df_name" >&2 + return 1 + fi + id_file="${APP_ID_FILE_DIR:?}"/"$RANDOM" + echo "$APP_IMAGE_ID" > "$id_file" + ) +} + +# ct_s2i_multistage_build APP_PATH SRC_IMAGE DST_IMAGE SEC_IMAGE [S2I_ARGS] +# ---------------------------- +# Create a new s2i app image from local sources in a similar way as source-to-image would have used. +# Argument: APP_PATH - local path to the app sources to be used in the test +# Argument: SRC_IMAGE - image to be used as a base for the s2i build process +# Argument: SEC_IMAGE - image to be used as the base for the result of the build process +# Argument: DST_IMAGE - image name to be used during the tagging of the s2i build result +# Argument: S2I_ARGS - Additional list of source-to-image arguments. +# Only used to check for environment variable definitions. +ct_s2i_multistage_build() { + + local app_path=$1; shift + local src_image=$1; shift + local sec_image=$1; shift + local dst_image=$1; shift + local s2i_args=$*; + local local_app="app-src" + local user_id= + local mount_options=() + local id_file + + + # Run the entire thing inside a subshell so that we do not leak shell options outside of the function + ( + # Error out if any part of the build fails + set -e + + user=$(docker inspect -f "{{.Config.User}}" "$src_image") + # Default to root if no user is set by the image + user=${user:-0} + # run the user through the image in case it is non-numeric or does not exist + if ! user_id=$(ct_get_uid_from_image "$user" "$src_image"); then + echo "Terminating s2i build." + return 1 + fi + + # Use /tmp to not pollute cwd + tmpdir=$(mktemp -d) + df_name=$(mktemp -p "$tmpdir" Dockerfile.XXXX) + cd "$tmpdir" + + # If the path exists on the local host, copy it into the directory for the build + # Otherwise handle it as a link to a git repository + if [ -e "${app_path/file:\/\//}/." ] ; then + mkdir -p "$local_app" + # Strip file:// from APP_PATH and copy its contents into current context + cp -r "${app_path/file:\/\//}/." "$local_app" + + else + ct_clone_git_repository "$app_path" "$local_app" + fi + + cat <"$df_name" +# First stage builds the application +FROM $src_image as builder +# Add application sources to a directory that the assemble script expects them +# and set permissions so that the container runs without root access +USER 0 +ADD app-src /tmp/src +RUN chown -R 1001:0 /tmp/src +$(echo "$s2i_args" | grep -o -e '\(-e\|--env\)[[:space:]=]\S*=\S*' | sed -e 's/-e /ENV /' -e 's/--env[ =]/ENV /') +# Check if CA autority is present on host and add it into Dockerfile +$([ -f "$(full_ca_file_path)" ] && echo "RUN cd /etc/pki/ca-trust/source/anchors && update-ca-trust extract") +USER $user_id +# Install the dependencies +RUN /usr/libexec/s2i/assemble +# Second stage copies the application to the minimal image +FROM $sec_image +# Copy the application source and build artifacts from the builder image to this one +COPY --from=builder \$HOME \$HOME +# Set the default command for the resulting image +CMD /usr/libexec/s2i/run +EOF + + # Check if -v parameter is present in s2i_args and add it into docker build command + read -ra mount_options <<< "$(echo "$s2i_args" | grep -o -e '\(-v\)[[:space:]]\.*\S*' || true)" + + ct_build_image_and_parse_id "$df_name" "${mount_options[*]+${mount_options[*]}} -t $dst_image ." + #shellcheck disable=SC2181 + if [ "$?" -ne 0 ]; then + echo " ERROR: Failed to to build $df_name" >&2 + return 1 + fi + id_file="${APP_ID_FILE_DIR:?}"/"$RANDOM" + echo "$APP_IMAGE_ID" > "$id_file" + ) +} + +# ct_check_image_availability PUBLIC_IMAGE_NAME +# ---------------------------- +# Pull an image from the public repositories to see if the image is already available. +# Argument: PUBLIC_IMAGE_NAME - string containing the public name of the image to pull +ct_check_image_availability() { + local public_image_name=$1; + + # Try pulling the image to see if it is accessible + if ! ct_pull_image "$public_image_name" &>/dev/null; then + echo "$public_image_name could not be downloaded via 'docker'" + return 1 + fi +} + +# ct_check_latest_imagestreams +# ----------------------------- +# Check if the latest version present in Makefile in the variable VERSIONS +# is present in all imagestreams. +# Also the latest tag in the imagestreams has to contain the latest version +ct_check_latest_imagestreams() { + local latest_version= + local test_lib_dir= + + # We only maintain imagestreams for RHEL and CentOS (Community) + if [[ "$OS" =~ ^fedora.* ]] ; then + echo "Imagestreams for Fedora are not maintained, skipping ct_check_latest_imagestreams" + return 0 + fi + + # Check only lines which starts with VERSIONS + latest_version=$(grep '^VERSIONS' Makefile | rev | cut -d ' ' -f 1 | rev ) + # Fall back to previous version if the latest is excluded for this OS + [ -f "$latest_version/.exclude-$OS" ] && latest_version=$(grep '^VERSIONS' Makefile | rev | cut -d ' ' -f 2 | rev ) + # Only test the imagestream once, when the version matches + # ignore the SC warning, $VERSION is always available + # shellcheck disable=SC2153 + if [ "$latest_version" == "$VERSION" ]; then + test_lib_dir=$(dirname "$(readlink -f "$0")") + python3 "${test_lib_dir}/check_imagestreams.py" "$latest_version" + else + echo "Image version $VERSION is not latest, skipping ct_check_latest_imagestreams" + fi +} + +# ct_show_resources +# ---------------- +# Prints the available resources +ct_show_resources() +{ + echo + echo "$LINE" + echo "Resources info:" + echo "Memory:" + free -h + echo "Storage:" + df -h || : + echo "CPU" + lscpu + + echo "$LINE" + echo "Image ${IMAGE_NAME} information:" + echo "$LINE" + echo "Uncompressed size of the image: $(ct_get_image_size_uncompresseed "${IMAGE_NAME}")" + echo "Compressed size of the image: $(ct_get_image_size_compresseed "${IMAGE_NAME}")" + echo +} + +# ct_clone_git_repository +# ----------------------------- +# Argument: app_url - git URI pointing to a repository, supports "@" to indicate a different branch +# Argument: app_dir (optional) - name of the directory to clone the repository into +ct_clone_git_repository() +{ + local app_url=$1; shift + local app_dir=$1 + + # If app_url contains @, the string after @ is considered + # as a name of a branch to clone instead of the main/master branch + IFS='@' read -ra git_url_parts <<< "${app_url}" + + if [ -n "${git_url_parts[1]}" ]; then + git_clone_cmd="git clone --branch ${git_url_parts[1]} ${git_url_parts[0]} ${app_dir}" + else + git_clone_cmd="git clone ${app_url} ${app_dir}" + fi + + if ! $git_clone_cmd ; then + echo "ERROR: Git repository ${app_url} cannot be cloned into ${app_dir}." + return 1 + fi +} + +# ct_get_uid_from_image +# ----------------------------- +# Argument: user - user to get uid for inside the image +# Argument: src_image - image to use for user information +ct_get_uid_from_image() +{ + local user=$1; shift + local src_image=$1 + local user_id= + + # NOTE: The '-eq' test is used to check if $user is numeric as it will fail if $user is not an integer + if ! [ "$user" -eq "$user" ] 2>/dev/null && ! user_id=$(docker run --rm "$src_image" bash -c "id -u $user 2>/dev/null"); then + echo "ERROR: id of user $user not found inside image $src_image." + return 1 + else + echo "${user_id:-$user}" + fi +} + +# ct_test_app_dockerfile +# ----------------------------- +# Argument: dockerfile - path to a Dockerfile that will be used for building an image +# (must work with an application directory called 'app-src') +# Argument: app_url - git or local URI with a testing application, supports "@" to indicate a different branch +# Argument: body_regexp - PCRE regular expression that must match the response body +# Argument: app_dir - name of the application directory that is used in the Dockerfile +# Argument: port - Optional port number (default: 8080) +ct_test_app_dockerfile() { + local dockerfile=$1 + local app_url=$2 + local expected_text=$3 + local app_dir=$4 # this is a directory that must match with the name in the Dockerfile + local port=${5:-8080} + local app_image_name=myapp + local ret + local cname=app_dockerfile + local id_file + + if [ -z "$app_dir" ] ; then + echo "ERROR: Option app_dir not set. Terminating the Dockerfile build." + return 1 + fi + + if ! [ -r "${dockerfile}" ] || ! [ -s "${dockerfile}" ] ; then + echo "ERROR: Dockerfile ${dockerfile} does not exist or is empty." + echo "Terminating the Dockerfile build." + return 1 + fi + + CID_FILE_DIR=${CID_FILE_DIR:-$(mktemp -d)} + local dockerfile_abs + dockerfile_abs=$(readlink -f "${dockerfile}") + tmpdir=$(mktemp -d) + pushd "$tmpdir" >/dev/null + cp "${dockerfile_abs}" Dockerfile + + # Rewrite the source image to what we test + sed -i -e "s|^FROM.*$|FROM $IMAGE_NAME|" Dockerfile + # a bit more verbose, but should help debugging failures + echo "Using this Dockerfile:" + cat Dockerfile + + if [ -d "$app_url" ] ; then + echo "Copying local folder: $app_url -> $app_dir." + cp -Lr "$app_url" "$app_dir" + else + if ! ct_clone_git_repository "$app_url" "$app_dir" ; then + echo "Terminating the Dockerfile build." + return 1 + fi + fi + + echo "Building '${app_image_name}' image using docker build" + if ! ct_build_image_and_parse_id "" "-t ${app_image_name} ." ; then + echo "ERROR: The image cannot be built from ${dockerfile} and application ${app_url}." + echo "Terminating the Dockerfile build." + return 1 + fi + id_file="${APP_ID_FILE_DIR:?}"/"$RANDOM" + echo "$APP_IMAGE_ID" > "$id_file" + + if ! docker run -d --cidfile="${CID_FILE_DIR}/app_dockerfile" --rm "${app_image_name}" ; then + echo "ERROR: The image ${app_image_name} cannot be run for ${dockerfile} and application ${app_url}." + echo "Terminating the Dockerfile build." + return 1 + fi + echo "Waiting for ${app_image_name} to start" + ct_wait_for_cid "${CID_FILE_DIR}/app_dockerfile" + + ip="$(ct_get_cip "${cname}")" + if [ -z "$ip" ]; then + echo "ERROR: Cannot get container's IP address." + return 1 + fi + ct_test_response "http://$ip:${port}" 200 "${expected_text}" + ret=$? + + [[ $ret -eq 0 ]] || docker logs "$(ct_get_cid "${cname}")" + + # cleanup + docker kill "$(ct_get_cid "${cname}")" + sleep 2 + docker rmi "${app_image_name}" + popd >/dev/null + rm -rf "${tmpdir}" + rm -f "${CID_FILE_DIR}/${cname}" + return $ret +} + +# ct_check_testcase_result +# ----------------------------- +# Check if testcase ended in success or error +# Argument: result - testcase result value +# Uses: $TESTCASE_RESULT - result of the testcase +# Uses: $IMAGE_NAME - name of the image being tested +ct_check_testcase_result() { + local result="$1" + if [[ "$result" != "0" ]]; then + echo "Test for image '${IMAGE_NAME}' FAILED (exit code: ${result})" + TESTCASE_RESULT=1 + fi + return "$result" +} + +# ct_update_test_result +# ----------------------------- +# adds result to the $TEST_SUMMARY variable +# Argument: test_msg +# Argument: app_name +# Argument: test_name +# Argument: time_diff (optional) +# Uses: $TEST_SUMMARY - variable for storing test results +ct_update_test_result() { + local test_msg="$1" + local app_name="$2" + local test_case="$3" + local time_diff="${4:-}" + printf -v TEST_SUMMARY "%s %s for '%s' %s (%s)\n" "${TEST_SUMMARY:-}" "${test_msg}" "${app_name}" "$test_case" "$time_diff" +} + +# ct_run_tests_from_testset +# ----------------------------- +# Runs all tests in $TEST_SET, prints result to +# the $TEST_SUMMARY variable +# Argument: app_name - application name to log +# Uses: $TEST_SET - set of test cases to run +# Uses: $TEST_SUMMARY - variable for storing test results +# Uses: $IMAGE_NAME - name of the image being tested +# Uses: $UNSTABLE_TESTS - set of tests, whose result can be ignored +# Uses: $IGNORE_UNSTABLE_TESTS - flag to ignore unstable tests +ct_run_tests_from_testset() { + local app_name="${1:-appnamenotset}" + local time_beg_pretty + local time_beg + local time_end + local time_diff + local test_msg + local is_unstable + + # Let's store in the log what change do we test + echo + git show -s + echo + + echo "Running tests for image ${IMAGE_NAME}" + + for test_case in $TEST_SET; do + TESTCASE_RESULT=0 + # shellcheck disable=SC2076 + if [[ " ${UNSTABLE_TESTS[*]} " =~ " ${app_name} " ]] || \ + [[ " ${UNSTABLE_TESTS[*]} " =~ " ${test_case} " ]]; then + is_unstable=1 + else + is_unstable=0 + fi + time_beg_pretty=$(ct_timestamp_pretty) + time_beg=$(ct_timestamp_s) + echo "-----------------------------------------------" + echo "Running test $test_case (starting at $time_beg_pretty) ... " + echo "-----------------------------------------------" + $test_case + ct_check_testcase_result $? + time_end=$(ct_timestamp_s) + if [ $TESTCASE_RESULT -eq 0 ]; then + test_msg="[PASSED]" + else + if [ -n "${IGNORE_UNSTABLE_TESTS:-""}" ] && [ $is_unstable -eq 1 ]; then + test_msg="[FAILED][UNSTABLE-IGNORED]" + else + test_msg="[FAILED]" + TESTSUITE_RESULT=1 + fi + fi + # As soon as test is finished + # switch the project from sclorg-test- to default. + if [ "${CT_OCP4_TEST:-false}" == "true" ]; then + oc project default + fi + time_diff=$(ct_timestamp_diff "$time_beg" "$time_end") + ct_update_test_result "${test_msg}" "${app_name}" "$test_case" "$time_diff" + done +} + +# ct_timestamp_s +# -------------- +# Returns timestamp in seconds since unix era -- a large integer +function ct_timestamp_s() { + date '+%s' +} + +# ct_timestamp_pretty +# ----------------- +# Returns timestamp readable to a human, like 2022-05-18 10:52:44+02:00 +function ct_timestamp_pretty() { + date --rfc-3339=seconds +} + +# ct_timestamp_diff +# ----------------- +# Computes a time diff between two timestamps +# Argument: start_date - Beginning (in seconds since unix era -- a large integer) +# Argument: final_date - End (in seconds since unix era -- a large integer) +# Returns: Time difference in format HH:MM:SS +function ct_timestamp_diff() { + local start_date=$1 + local final_date=$2 + date -u -d "0 $final_date seconds - $start_date seconds" +"%H:%M:%S" +} + +# ct_get_image_size_uncompresseed +# ------------------------------- +# Shows uncompressed image size in MB +# Argument: image_name - image locally available +ct_get_image_size_uncompresseed() { + local image_name=$1 + local size_bytes + size_bytes=$(docker inspect "${image_name}" -f '{{.Size}}') + echo "$(( size_bytes / 1024 / 1024 ))MB" +} + +# ct_get_image_size_compresseed +# ------------------------------- +# Shows compressed image size in MB +# This is a slight hack, that counts compressed size based on the compressed +# content. It might not be entirely same as what docker pull shows, but should +# be close enough. +# Argument: image_name - image locally available +ct_get_image_size_compresseed() { + local image_name=$1 + local size_bytes + size_bytes=$(docker save "${image_name}" | gzip - | wc --bytes) + echo "$(( size_bytes / 1024 / 1024 ))MB" +} + +# vim: set tabstop=2:shiftwidth=2:expandtab: diff --git a/8.1/test/test-openshift.yaml b/8.1/test/test-openshift.yaml new file mode 100644 index 000000000..78f33b628 --- /dev/null +++ b/8.1/test/test-openshift.yaml @@ -0,0 +1,77 @@ +--- + ### + # + # This playbook is used for testing SCLORG images in OpenShift 4 + # by Container Verification Pipeline (CVP). + # + # + # The Ansible log created when this playbook is run is archived by CVP as an artifact. + # + ### +- hosts: all # At runtime this playbook will be executed on a Jenkins slave against 'localhost' + gather_facts: false + tags: + - openshift + + # Here's an example of setting environment vars that will be picked up by + # the runtest.sh shell script below. + environment: + VERSION: VERSION_NUMBER + OS: OS_NUMBER + SHORT_NAME: CONTAINER_NAME + IMAGE_FULL_NAME: "{{ image_full_name }}" + IMAGE_REGISTRY_URL: "{{ image_registry_url }}" + IMAGE_NAMESPACE: "{{ image_namespace }}" + IMAGE_NAME: "{{ image_name }}" + IMAGE_TAG: "{{ image_tag }}" + IMAGE_DIGEST: "{{ image_digest }}" + OPENSHIFT_CLUSTER_URL: "{{ openshift_cluster_url }}" + OPENSHIFT_AUTH_TOKEN: "{{ openshift_auth_token }}" + OPENSHIFT_USERNAME: "{{ openshift_username }}" + OPENSHIFT_PROJECT_NAME: "{{ openshift_project_name }}" + CVP_ARTIFACTS_DIR: "{{ cvp_artifacts_dir }}" + + tasks: + # CVP should have created the artifacts directory already, but it's always good to check. + - name: "Make sure the artifacts directory exists" + file: + path: "{{ cvp_artifacts_dir }}" + state: directory + + # This block is an example of a solely Ansible approach to test a container image in OpenShift. + # It demonstrates how to interact with the unique 'sandbox' project created by CVP in OpenShift + # to import, run, and interact with your container image. + - name: "Run sclorg image name tests in OpenShift 4 environment." + block: + # Log into the cluster where CVP is running + - name: Log into the OpenShift cluster + shell: oc login {{ openshift_cluster_url }} --token="{{ openshift_auth_token }}" --insecure-skip-tls-verify + + # Connect to the newly-created temporary 'sandbox' project in OpenShift to run your tests + - name: Select the project {{ openshift_project_name }} + shell: oc project {{ openshift_project_name }} + + - name: Import the image into OpenShift + shell: oc import-image {{ image_name }}:{{ environment[0]['VERSION'] }} --from={{ image_full_name }} --insecure=true --confirm + retries: 3 + delay: 10 + + - name: Tag image into OpenShift + shell: oc tag {{ image_name }}:{{ environment[0]['VERSION'] }} {{ environment[0]['SHORT_NAME'] }}:{{ environment[0]['VERSION'] }} + retries: 3 + delay: 10 + + # Derive fully qualified image name of your newly imported image for the next step + - name: Get imported image registry URL + shell: oc get is {{ image_name }} --output=jsonpath='{ .status.dockerImageRepository }' + register: imported_image_url + + # Ensure that we can access the /apis/config.openshift.io/v1/clusterversions/version endpoint on OCP4.x + - name: Test the version command on v4.x + shell: oc get clusterversions + register: oc_version_cmd + when: openshift_cluster_version == "v4.x" + + # Run tests on OpenShift 4 + - name: Run a sclorg test suite in OpenShift 4 + shell: VERSION={{ environment[0]['VERSION'] }} IMAGE_NAME={{ image_name }} OS={{ environment[0]['OS'] }} CVP=1 bash {{ playbook_dir }}/run-openshift-remote-cluster | tee {{ cvp_artifacts_dir }}/{{ image_name }}.log From c27e66dfe3c82cdd40ae8dffd8abfe3384cd0e1b Mon Sep 17 00:00:00 2001 From: "Petr \"Stone\" Hracek" Date: Wed, 23 Aug 2023 09:01:18 +0200 Subject: [PATCH 3/4] Fix sources in version 8.2 so it uses proper PHP version Signed-off-by: Petr "Stone" Hracek --- 8.2/.exclude-rhel9 | 0 8.2/Dockerfile.c9s | 4 ++-- 8.2/Dockerfile.fedora | 6 +++--- 8.2/Dockerfile.rhel8 | 4 ++-- 8.2/Dockerfile.rhel9 | 4 ++-- 8.2/README.md | 22 +++++++++++----------- Makefile | 2 +- README.md | 5 +++++ 8 files changed, 26 insertions(+), 21 deletions(-) create mode 100644 8.2/.exclude-rhel9 diff --git a/8.2/.exclude-rhel9 b/8.2/.exclude-rhel9 new file mode 100644 index 000000000..e69de29bb diff --git a/8.2/Dockerfile.c9s b/8.2/Dockerfile.c9s index fc4d11748..1c53cd982 100644 --- a/8.2/Dockerfile.c9s +++ b/8.2/Dockerfile.c9s @@ -11,8 +11,8 @@ EXPOSE 8443 # Exposed ports: # * 8080 - alternative port for http -ENV PHP_VERSION=8.1 \ - PHP_VER_SHORT=81 \ +ENV PHP_VERSION=8.2 \ + PHP_VER_SHORT=82 \ NAME=php ENV SUMMARY="Platform for building and running PHP $PHP_VERSION applications" \ diff --git a/8.2/Dockerfile.fedora b/8.2/Dockerfile.fedora index ec83b89b5..c831adf74 100644 --- a/8.2/Dockerfile.fedora +++ b/8.2/Dockerfile.fedora @@ -1,4 +1,4 @@ -FROM quay.io/fedora/s2i-base:37 +FROM quay.io/fedora/s2i-base:38 # This image provides an Apache+PHP environment for running PHP # applications. @@ -6,8 +6,8 @@ FROM quay.io/fedora/s2i-base:37 EXPOSE 8080 EXPOSE 8443 -ENV PHP_VERSION=8.1 \ - PHP_SHORT_VER=81 \ +ENV PHP_VERSION=8.2 \ + PHP_SHORT_VER=82 \ PATH=$PATH:/usr/bin ENV SUMMARY="Platform for building and running PHP $PHP_VERSION applications" \ diff --git a/8.2/Dockerfile.rhel8 b/8.2/Dockerfile.rhel8 index 8032fdddb..ef604c03a 100644 --- a/8.2/Dockerfile.rhel8 +++ b/8.2/Dockerfile.rhel8 @@ -11,8 +11,8 @@ EXPOSE 8443 # Exposed ports: # * 8080 - alternative port for http -ENV PHP_VERSION=8.1 \ - PHP_VER_SHORT=81 \ +ENV PHP_VERSION=8.2 \ + PHP_VER_SHORT=82 \ NAME=php ENV SUMMARY="Platform for building and running PHP $PHP_VERSION applications" \ diff --git a/8.2/Dockerfile.rhel9 b/8.2/Dockerfile.rhel9 index 6d64d6886..7d8df973e 100644 --- a/8.2/Dockerfile.rhel9 +++ b/8.2/Dockerfile.rhel9 @@ -11,8 +11,8 @@ EXPOSE 8443 # Exposed ports: # * 8080 - alternative port for http -ENV PHP_VERSION=8.1 \ - PHP_VER_SHORT=81 \ +ENV PHP_VERSION=8.2 \ + PHP_VER_SHORT=82 \ NAME=php ENV SUMMARY="Platform for building and running PHP $PHP_VERSION applications" \ diff --git a/8.2/README.md b/8.2/README.md index 6b4a6345a..f581b5fc3 100644 --- a/8.2/README.md +++ b/8.2/README.md @@ -1,7 +1,7 @@ -PHP 8.1 container image +PHP 8.2 container image ======================= -This container image includes PHP 8.1 as a [S2I](https://github.com/openshift/source-to-image) base image for your PHP 8.1 applications. +This container image includes PHP 8.2 as a [S2I](https://github.com/openshift/source-to-image) base image for your PHP 8.2 applications. Users can choose between RHEL and CentOS Stream based builder images. The RHEL UBI images are available in the [Red Hat Container Catalog](https://access.redhat.com/containers/), the CentOS Stream images are available on [Quay.io](https://quay.io/organization/sclorg), @@ -13,8 +13,8 @@ Note: while the examples in this README are calling `podman`, you can replace an Description ----------- -PHP 8.1 available as container is a base platform for -building and running various PHP 8.1 applications and frameworks. +PHP 8.2 available as container is a base platform for +building and running various PHP 8.2 applications and frameworks. PHP is an HTML-embedded scripting language. PHP attempts to make it easy for developers to write dynamically generated web pages. PHP also offers built-in database integration for several commercial and non-commercial database management systems, so writing @@ -28,12 +28,12 @@ the nodejs itself is included just to make the npm work. Usage in OpenShift ------------------ -In this example, we will assume that you are using the `ubi8/php-81` image, available via `php:8.1` imagestream tag in Openshift. +In this example, we will assume that you are using the `ubi8/php-82` image, available via `php:8.2` imagestream tag in Openshift. To build a simple [cakephp-sample-app](https://github.com/sclorg/cakephp-ex.git) application in Openshift: ``` -oc new-app php:8.1~https://github.com/sclorg/cakephp-ex.git +oc new-app php:8.2~https://github.com/sclorg/cakephp-ex.git ``` To access the application: @@ -72,10 +72,10 @@ To use the PHP image in a Dockerfile, follow these steps: #### 1. Pull a base builder image to build on ``` -podman pull ubi8/php-81 +podman pull ubi8/php-82 ``` -An UBI image `ubi8/php-81` is used in this example. This image is usable and freely redistributable under the terms of the UBI End User License Agreement (EULA). See more about UBI at [UBI FAQ](https://developers.redhat.com/articles/ubi-faq). +An UBI image `ubi8/php-82` is used in this example. This image is usable and freely redistributable under the terms of the UBI End User License Agreement (EULA). See more about UBI at [UBI FAQ](https://developers.redhat.com/articles/ubi-faq). #### 2. Pull an application code @@ -97,7 +97,7 @@ For all these three parts, users can either setup all manually and use commands ##### 3.1. To use your own setup, create a Dockerfile with this content: ``` -FROM ubi8/php-81 +FROM ubi8/php-82 # Add application sources ADD app-src . @@ -123,7 +123,7 @@ CMD /usr/libexec/s2i/run ##### 3.2. To use the Source-to-Image scripts and build an image using a Dockerfile, create a Dockerfile with this content: ``` -FROM ubi8/php-81 +FROM ubi8/php-82 # Add application sources to a directory that the assemble script expects them # and set permissions so that the container runs without root access @@ -175,7 +175,7 @@ The following environment variables set their equivalent property value in the p * Default: ON * **INCLUDE_PATH** * Path for PHP source files - * Default: .:/opt/app-root/src:/opt/rh/rh-php81/root/usr/share/pear (EL7) + * Default: .:/opt/app-root/src:/opt/rh/rh-php82/root/usr/share/pear (EL7) * Default: .:/opt/app-root/src:/usr/share/pear (EL8, Fedora) * **PHP_MEMORY_LIMIT** * Memory Limit diff --git a/Makefile b/Makefile index ae904dbd2..45155c892 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ # Include common Makefile code. BASE_IMAGE_NAME = php -VERSIONS = 7.3 7.4 8.0 8.1 +VERSIONS = 7.3 7.4 8.0 8.1 8.2 OPENSHIFT_NAMESPACES = # HACK: Ensure that 'git pull' for old clones doesn't cause confusion. diff --git a/README.md b/README.md index b519a377f..972a5755f 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ Images available on Quay are: * CentOS Stream 9 [php-74](https://quay.io/repository/sclorg/php-74-c9s) * Fedora [php-80](https://quay.io/repository/fedora/php-80) * Fedora [php-81](https://quay.io/repository/fedora/php-81) +* Fedora [php-82](https://quay.io/repository/fedora/php-82) This repository contains the source for building various versions of the PHP application as a reproducible Docker image using @@ -33,6 +34,7 @@ PHP versions currently supported are: * [php-7.4](7.4) * [php-8.0](8.0) * [php-8.1](8.1) +* [php-8.2](8.2) RHEL versions currently supported are: * RHEL7 @@ -97,6 +99,9 @@ see [usage documentation](8.0/README.md). For information about usage of Dockerfile for PHP 8.1, see [usage documentation](8.1/README.md). +For information about usage of Dockerfile for PHP 8.2, +see [usage documentation](8.2/README.md). + Test ---- This repository also provides a [S2I](https://github.com/openshift/source-to-image) test framework, From aa5fc8cd2e9abcfb36d09e7d155a55db7cf3e993 Mon Sep 17 00:00:00 2001 From: "Petr \"Stone\" Hracek" Date: Fri, 25 Aug 2023 12:22:19 +0200 Subject: [PATCH 4/4] Add missing variable `max_multipart_body_parts` Signed-off-by: Petr "Stone" Hracek --- 8.2/root/opt/app-root/etc/php.ini.template | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/8.2/root/opt/app-root/etc/php.ini.template b/8.2/root/opt/app-root/etc/php.ini.template index efaab65de..25cddd45b 100644 --- a/8.2/root/opt/app-root/etc/php.ini.template +++ b/8.2/root/opt/app-root/etc/php.ini.template @@ -384,6 +384,11 @@ max_input_time = 60 ; How many GET/POST/COOKIE input variables may be accepted ; max_input_vars = 1000 +; How many multipart body parts (combined input variable and file uploads) may +; be accepted. +; Default Value: -1 (Sum of max_input_vars and max_file_uploads) +;max_multipart_body_parts = 1500 + ; Maximum amount of memory a script may consume (128MB) ; http://php.net/memory-limit memory_limit = ${PHP_MEMORY_LIMIT}