diff --git a/.github/workflows/nginx.yml b/.github/workflows/nginx.yml index 130fdd3e6..5b6075c22 100644 --- a/.github/workflows/nginx.yml +++ b/.github/workflows/nginx.yml @@ -15,51 +15,47 @@ on: jobs: nginx-build-test: name: nginx - runs-on: ubuntu-20.04 + runs-on: ubuntu-24.04 strategy: + fail-fast: false matrix: - os: [ubuntu-20.04, ubuntu-18.04, debian-10.11, debian-11.3] - nginx-rel: [mainline, stable] + image: ["ubuntu:24.04", "debian:11", "alpine:3.20", "amazonlinux:2", "amazonlinux:2023"] + nginx: ["1.27.3", "1.27.1", "1.26.2", "1.24.0", "1.22.1"] steps: - name: checkout otel nginx - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: setup run: | sudo ./instrumentation/nginx/ci/setup_environment.sh - - name: generate dockerfile - run: | - cd instrumentation/nginx/test/instrumentation - mix local.hex --force --if-missing - mix local.rebar --force --if-missing - mix deps.get - mix dockerfiles .. ${{ matrix.os }}:${{ matrix.nginx-rel }} - name: setup buildx id: buildx - uses: docker/setup-buildx-action@master + uses: docker/setup-buildx-action@v3 with: install: true - - name: cache docker layers - uses: actions/cache@v3 - with: - path: /tmp/buildx-cache/ - key: nginx-${{ matrix.os }}-${{ matrix.nginx-rel }}-${{ github.sha }} - restore-keys: | - nginx-${{ matrix.os }}-${{ matrix.nginx-rel }} - name: build express backend docker run: | cd instrumentation/nginx - docker buildx build -t otel-nginx-test/express-backend \ + docker build -t otel-nginx-test/express-backend \ -f test/backend/simple_express/Dockerfile \ --cache-from type=local,src=/tmp/buildx-cache/express \ --cache-to type=local,dest=/tmp/buildx-cache/express-new \ --load \ test/backend/simple_express + - name: Choose Dockerfile + run: | + if [[ "${{ matrix.image }}" == "alpine"* ]]; then + echo "dockerfile=Dockerfile_alpine" >> $GITHUB_ENV + elif [[ "${{ matrix.image }}" == "amazonlinux"* ]]; then + echo "dockerfile=Dockerfile_amazonlinux" >> $GITHUB_ENV + else + echo "dockerfile=Dockerfile" >> $GITHUB_ENV + fi - name: build nginx docker run: | cd instrumentation/nginx - docker buildx build -t otel-nginx-test/nginx \ - --build-arg image=$(echo ${{ matrix.os }} | sed s/-/:/) \ - -f test/Dockerfile.${{ matrix.os }}.${{ matrix.nginx-rel }} \ + docker build -t otel-nginx-test/nginx \ + --build-arg image=${{ matrix.image }} \ + -f test/${{ env.dockerfile }} \ --cache-from type=local,src=/tmp/buildx-cache/nginx \ --cache-to type=local,dest=/tmp/buildx-cache/nginx-new \ --load \ @@ -73,18 +69,22 @@ jobs: - name: run tests run: | cd instrumentation/nginx/test/instrumentation + mix local.hex --force --if-missing + mix local.rebar --force --if-missing + mix deps.get mix test - name: copy artifacts id: artifacts run: | cd instrumentation/nginx mkdir -p /tmp/otel_ngx/ - docker buildx build -f test/Dockerfile.${{ matrix.os }}.${{ matrix.nginx-rel}} \ + docker build -f test/${{ env.dockerfile }} \ + --build-arg image=${{ matrix.image }} \ --target export \ - --cache-from type=local,src=/tmp/.buildx-cache \ --output type=local,dest=/tmp/otel_ngx . + echo "artifactName=otel_ngx_module-$(echo ${{ matrix.image }} | sed s/:/-/)-${{ matrix.nginx }}.so" >> $GITHUB_ENV - name: upload artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: otel_ngx_module-${{ matrix.os }}-${{ matrix.nginx-rel }}.so + name: ${{ env.artifactName }} path: /tmp/otel_ngx/otel_ngx_module.so diff --git a/instrumentation/nginx/.gitignore b/instrumentation/nginx/.gitignore new file mode 100644 index 000000000..84c048a73 --- /dev/null +++ b/instrumentation/nginx/.gitignore @@ -0,0 +1 @@ +/build/ diff --git a/instrumentation/nginx/CMakeLists.txt b/instrumentation/nginx/CMakeLists.txt index 82924354a..9d33d0102 100644 --- a/instrumentation/nginx/CMakeLists.txt +++ b/instrumentation/nginx/CMakeLists.txt @@ -2,18 +2,22 @@ cmake_minimum_required(VERSION 3.12) project(opentelemetry-nginx) -find_package(opentelemetry-cpp REQUIRED) +option(WITH_ABSEIL "Use abseil" OFF) + +find_package(opentelemetry-cpp CONFIG REQUIRED) +find_package(nlohmann_json) find_package(Threads REQUIRED) find_package(Protobuf REQUIRED) -find_package(gRPC REQUIRED) find_package(CURL REQUIRED) +if (WITH_ABSEIL) + find_package(absl REQUIRED) +endif() + include(${CMAKE_CURRENT_SOURCE_DIR}/nginx.cmake) add_library(otel_ngx_module SHARED src/nginx_config.cpp - src/toml.c - src/agent_config.cpp src/trace_context.cpp src/otel_ngx_module.cpp src/otel_ngx_module_modules.c @@ -42,6 +46,5 @@ target_include_directories(otel_ngx_module target_link_libraries(otel_ngx_module PRIVATE ${OPENTELEMETRY_CPP_LIBRARIES} - gRPC::grpc++ CURL::libcurl ) diff --git a/instrumentation/nginx/README.md b/instrumentation/nginx/README.md index 14daae1f2..31478f43a 100644 --- a/instrumentation/nginx/README.md +++ b/instrumentation/nginx/README.md @@ -8,24 +8,18 @@ Supported propagation types: ## Requirements -* OS: Linux. Test suite currently runs on Ubuntu 18.04, 20.04, 20.10. +* OS: Linux. Test suite currently runs on Ubuntu 24.04, Alpine 3.20, Amazon Linux 2 & 2023. * [Nginx](http://nginx.org/en/download.html) - * both stable (`1.18.0`) and mainline (`1.19.8`) * Nginx modules * ngx_http_upstream_module (proxy_pass) * ngx_http_fastcgi_module (fastcgi_pass) -Additional platforms and/or versions coming soon. - - ## Dependencies (for building) -1. [gRPC](https://github.com/grpc/grpc) - currently the only supported exporter is OTLP_GRPC. This requirement will be lifted - once more exporters become available. 2. [opentelemetry-cpp](https://github.com/open-telemetry/opentelemetry-cpp) - opentelemetry-cpp needs to be built with - position independent code and OTLP_GRPC support, e.g.: + position independent code, e.g.: ``` -cmake -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DWITH_OTLP_GRPC=ON .. +cmake -DCMAKE_POSITION_INDEPENDENT_CODE=ON .. ``` ## Building @@ -33,11 +27,11 @@ cmake -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DWITH_OTLP_GRPC=ON .. ``` mkdir build cd build -cmake .. +cmake -DNGINX_VERSION=1.27.3 .. make ``` -## Usage +## Quick usage Download the .so file from the latest [GitHub Action run](https://github.com/open-telemetry/opentelemetry-cpp-contrib/actions/workflows/nginx.yml) or follow the instructions above to build. Then modify nginx.conf, or see the [example](test/conf/nginx.conf) @@ -45,7 +39,8 @@ Download the .so file from the latest [GitHub Action run](https://github.com/ope load_module /path/to/otel_ngx_module.so; http { - opentelemetry_config /conf/otel-nginx.toml; + opentelemetry_service_name "nginx-proxy"; + opentelemetry_otlp_traces_endpoint "http://collector:4318/v1/traces" server { listen 80; @@ -56,7 +51,7 @@ http { location = / { opentelemetry_operation_name my_example_backend; opentelemetry_propagate; - proxy_pass http://localhost:3500/; + proxy_pass http://localhost:3501/; } location = /b3 { @@ -79,52 +74,6 @@ http { ``` -Example [otel-nginx.toml](test/conf/otel-nginx.toml): -```toml -exporter = "otlp" -processor = "batch" - -[exporters.otlp] -# Alternatively the OTEL_EXPORTER_OTLP_ENDPOINT environment variable can also be used. -host = "localhost" -port = 4317 -# Optional: enable SSL, for endpoints that support it -# use_ssl = true -# Optional: set a filesystem path to a pem file to be used for SSL encryption -# (when use_ssl = true) -# ssl_cert_path = "/path/to/cert.pem" - -[processors.batch] -max_queue_size = 2048 -schedule_delay_millis = 5000 -max_export_batch_size = 512 - -[service] -# Can also be set by the OTEL_SERVICE_NAME environment variable. -name = "nginx-proxy" # Opentelemetry resource name - -[sampler] -name = "AlwaysOn" # Also: AlwaysOff, TraceIdRatioBased -ratio = 0.1 -parent_based = false -``` - -Here's what it would look like if you used the OTLP exporter, but only set the endpoint with an environment variables (e.g. `OTEL_EXPORTER_OTLP_ENDPOINT="localhost:4317"`). -```toml -exporter = "otlp" -processor = "batch" - -[exporters.otlp] - -[processors.batch] -max_queue_size = 2048 -schedule_delay_millis = 5000 -max_export_batch_size = 512 - -[service] -name = "nginx-proxy" # Opentelemetry resource name -``` - To use other environment variables defined in the [specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md), must add the "env" directive. ``` @@ -141,35 +90,34 @@ http { ### `opentelemetry` -Enable or disable OpenTelemetry (default: enabled). +Enable or disable OpenTelemetry (default: `enabled`). - **required**: `false` - **syntax**: `opentelemetry on|off` - **block**: `http`, `server`, `location` -### `opentelemetry_trust_incoming_spans` +### `opentelemetry_service_name` -Enables or disables using spans from incoming requests as parent for created ones. (default: enabled). +Service name for the nginx instance (default: `uknown:nginx`). - **required**: `false` -- **syntax**: `opentelemetry_trust_incoming_spans on|off` -- **block**: `http`, `server`, `location` +- **syntax**: `opentelemetry_service_name ` +- **block**: `http` -### `opentelemetry_attribute` +### `opentelemetry_span_processor` -Adds a custom attribute to the span. It is possible to access nginx variables, e.g. -`opentelemetry_attribute "my.user.agent" "$http_user_agent"`. +Chooses between simple and batch span processor (default: `batch`). - **required**: `false` -- **syntax**: `opentelemetry_attribute ` -- **block**: `http`, `server`, `location` +- **syntax**: `opentelemetry_span_processor simple|batch` +- **block**: `http` -### `opentelemetry_config` +### `opentelemetry_otlp_traces_endpoint` -Exporters, processors +OTLP HTTP traces endpoint. (default: `http://localhost:4318/v1/traces`). -- **required**: `true` -- **syntax**: `opentelemetry_config /path/to/config.toml` +- **required**: `false` +- **syntax**: `opentelemetry_otlp_traces_endpoint ` - **block**: `http` ### `opentelemetry_operation_name` @@ -180,6 +128,39 @@ Set the operation name when starting a new span. - **syntax**: `opentelemetry_operation_name ` - **block**: `http`, `server`, `location` +### `opentelemetry_traces_sampler` + +Chooses the traces sampler. (default: `parentbased_always_on`). + +- **required**: `false` +- **syntax**: `opentelemetry_traces_sampler always_on|always_off|traceidratio|parentbased_always_on|parentbased_always_off|parentbased_traceidratio` +- **block**: `http` + +### `opentelemetry_traces_sampler` + +Chooses the trace sampling ratio between `0.0` and `1.0` when a ratio based sampler is active. (default: `1.0`). + +- **required**: `false` +- **syntax**: `opentelemetry_traces_sampler_ratio ` +- **block**: `http` + +### `opentelemetry_trust_incoming_spans` + +Enables or disables using spans from incoming requests as parent for created ones. (default: `enabled`). + +- **required**: `false` +- **syntax**: `opentelemetry_trust_incoming_spans on|off` +- **block**: `http`, `server`, `location` + +### `opentelemetry_attribute` + +Adds a custom attribute to the span. It is possible to access nginx variables, e.g. +`opentelemetry_attribute "my.user.agent" "$http_user_agent"`. + +- **required**: `false` +- **syntax**: `opentelemetry_attribute ` +- **block**: `http`, `server`, `location` + ### `opentelemetry_propagate` Enable propagation of distributed tracing headers, e.g. `traceparent`. When no parent trace is given, a new trace will @@ -193,7 +174,7 @@ The same inheritance rules as [`proxy_set_header`](http://nginx.org/en/docs/http ### `opentelemetry_capture_headers` -Enables the capturing of request and response headers. (default: disabled). +Enables the capturing of request and response headers. (default: `off`). - **required**: `false` - **syntax**: `opentelemetry_capture_headers on|off` @@ -223,6 +204,33 @@ No span will be created for URIs matching the given regex (case insensitive). - **syntax**: `opentelemetry_ignore_paths ` - **block**: `http`, `server`, `location` +### `opentelemetry_bsp_schedule_delay_millis` + +Only applicable when batch span processor is selected. +Chooses the span batch exporting interval in milliseconds. (default: `5000`) + +- **required**: `false` +- **syntax**: `opentelemetry_bsp_schedule_delay_millis ` +- **block**: `http` + +### `opentelemetry_bsp_max_export_batch_size` + +Only applicable when batch span processor is selected. +Chooses the span export batch size. (default: `512`) + +- **required**: `false` +- **syntax**: `opentelemetry_bsp_max_export_batch_size ` +- **block**: `http` + +### `opentelemetry_bsp_max_queue_size` + +Only applicable when batch span processor is selected. +Chooses the span exporter queue size. (default: `2048`) + +- **required**: `false` +- **syntax**: `opentelemetry_bsp_max_queue_size ` +- **block**: `http` + ## OpenTelemetry attributes List of exported attributes and their corresponding nginx variables if applicable: @@ -245,10 +253,8 @@ List of exported attributes and their corresponding nginx variables if applicabl The following nginx variables are set by the instrumentation: -- `opentelemetry_context_traceparent` - [W3C trace - context](https://www.w3.org/TR/trace-context/#trace-context-http-headers-format), e.g.: `00-0af7651916cd43dd8448eb211c80319c-b9c7c989f97918e1-01` -- `opentelemetry_context_b3` - Trace context in the [B3 - format](https://github.com/openzipkin/b3-propagation#single-header). Only set when using `opentelemetry_propagate b3`. +- `opentelemetry_context_traceparent` - [W3C trace context](https://www.w3.org/TR/trace-context/#trace-context-http-headers-format), e.g.: `00-0af7651916cd43dd8448eb211c80319c-b9c7c989f97918e1-01` +- `opentelemetry_context_b3` - Trace context in the [B3 format](https://github.com/openzipkin/b3-propagation#single-header). Only set when using `opentelemetry_propagate b3`. - `opentelemetry_trace_id` - Trace Id of the current span - `opentelemetry_span_id` - Span Id of the current span @@ -274,8 +280,7 @@ apk --no-cache add docker-compose docker-cli ``` cd test/instrumentation -mix dockerfiles .. ubuntu-20.04:mainline -docker build -t otel-nginx-test/nginx -f ../Dockerfile.ubuntu-20.04.mainline ../.. +docker build -t otel-nginx-test/nginx -f ../Dockerfile ../.. docker build -t otel-nginx-test/express-backend -f ../backend/simple_express/Dockerfile ../backend/simple_express mix test ``` @@ -283,4 +288,5 @@ mix test ## Troubleshooting ### `otel_ngx_module.so is not binary compatible` -- Make sure your nginx is compiled with `--with-compat` (`nginx -V`). On Ubuntu 18.04 the default nginx (`1.14.0`) from apt does not have compatibility enabled. nginx provides [repositories](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-open-source/#prebuilt_ubuntu) to install more up to date versions. + +Make sure your nginx is compiled with `--with-compat` (`nginx -V`). diff --git a/instrumentation/nginx/ci/setup_environment.sh b/instrumentation/nginx/ci/setup_environment.sh index 1767b6f63..287d4fb14 100755 --- a/instrumentation/nginx/ci/setup_environment.sh +++ b/instrumentation/nginx/ci/setup_environment.sh @@ -3,16 +3,16 @@ export DEBIAN_FRONTEND=noninteractive export TZ="Europe/London" -wget https://packages.erlang-solutions.com/erlang-solutions_2.0_all.deb && dpkg -i erlang-solutions_2.0_all.deb -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - -add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get install ca-certificates curl +install -m 0755 -d /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc +chmod a+r /etc/apt/keyrings/docker.asc + +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null apt-get update apt-get install --no-install-recommends --no-install-suggests -y \ - apt-transport-https ca-certificates curl gnupg-agent software-properties-common \ - python3 esl-erlang elixir docker-ce docker-ce-cli containerd.io -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - -curl -L "https://github.com/docker/compose/releases/download/1.28.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose - -chmod +x /usr/local/bin/docker-compose + erlang elixir docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin diff --git a/instrumentation/nginx/config b/instrumentation/nginx/config index 4ad7737e1..fee33886a 100644 --- a/instrumentation/nginx/config +++ b/instrumentation/nginx/config @@ -9,16 +9,13 @@ ngx_module_deps=" \ $ngx_addon_dir/src/nginx_utils.h \ $ngx_addon_dir/src/propagate.h \ $ngx_addon_dir/src/script.h \ - $ngx_addon_dir/src/toml.h \ $ngx_addon_dir/src/trace_context.h \ " ngx_module_srcs=" \ - $ngx_addon_dir/src/agent_config.cpp \ $ngx_addon_dir/src/nginx_config.cpp \ $ngx_addon_dir/src/otel_ngx_module.cpp \ $ngx_addon_dir/src/propagate.cpp \ $ngx_addon_dir/src/script.cpp \ - $ngx_addon_dir/src/toml.c \ $ngx_addon_dir/src/trace_context.cpp \ " ngx_module_libs=" \ @@ -26,7 +23,7 @@ ngx_module_libs=" \ -lopentelemetry_common \ -lopentelemetry_resources \ -lopentelemetry_trace \ - -lopentelemetry_exporter_otlp_grpc \ + -lopentelemetry_exporter_otlp_http \ -lopentelemetry_otlp_recordable \ " diff --git a/instrumentation/nginx/nginx.cmake b/instrumentation/nginx/nginx.cmake index db1a168ef..f8681972b 100644 --- a/instrumentation/nginx/nginx.cmake +++ b/instrumentation/nginx/nginx.cmake @@ -8,7 +8,7 @@ if (NOT NGINX_VERSION) string(REGEX MATCH "[0-9]+\\.\[0-9]+\\.[0-9]+" NGINX_VER ${NGINX_VERSION_STRING}) else() - set(NGINX_VER "1.18.0") + set(NGINX_VER "1.27.1") endif() set(NGINX_VERSION ${NGINX_VER} CACHE STRING "Nginx version to compile against") diff --git a/instrumentation/nginx/src/agent_config.cpp b/instrumentation/nginx/src/agent_config.cpp deleted file mode 100644 index 2921c2a4d..000000000 --- a/instrumentation/nginx/src/agent_config.cpp +++ /dev/null @@ -1,247 +0,0 @@ -#include "agent_config.h" -#include "toml.h" -#include -#include - -struct ScopedTable { - ScopedTable(toml_table_t* table) : table(table) {} - ~ScopedTable() { toml_free(table); } - - toml_table_t* table; -}; - -static std::string FromStringDatum(toml_datum_t datum) { - std::string val{datum.u.s}; - free(datum.u.s); - return val; -} - -static bool SetupOtlpExporter(toml_table_t* table, ngx_log_t* log, OtelNgxAgentConfig* config) { - const char *otel_exporter_otlp_endpoint_env = "OTEL_EXPORTER_OTLP_ENDPOINT"; - auto endpoint_from_env = std::getenv(otel_exporter_otlp_endpoint_env); - - if (endpoint_from_env) { - config->exporter.endpoint = endpoint_from_env; - return true; - } - - toml_datum_t hostVal = toml_string_in(table, "host"); - toml_datum_t portVal = toml_int_in(table, "port"); - - if (!hostVal.ok) { - ngx_log_error(NGX_LOG_ERR, log, 0, "Missing required host field for OTLP exporter"); - return false; - } - - std::string host = FromStringDatum(hostVal); - - if (!portVal.ok) { - ngx_log_error(NGX_LOG_ERR, log, 0, "Missing required port field for OTLP exporter"); - return false; - } - - config->exporter.endpoint = host + ":" + std::to_string(portVal.u.i);; - - toml_datum_t useSSLVal = toml_bool_in(table, "use_ssl"); - if (useSSLVal.ok) { - config->exporter.use_ssl_credentials = useSSLVal.u.b; - - toml_datum_t certPathVal = toml_string_in(table, "ssl_cert_path"); - if (certPathVal.ok) { - config->exporter.ssl_credentials_cacert_path = FromStringDatum(certPathVal); - } - } - - return true; -} - -static bool SetupExporter(toml_table_t* root, ngx_log_t* log, OtelNgxAgentConfig* config) { - toml_datum_t exporterVal = toml_string_in(root, "exporter"); - - if (!exporterVal.ok) { - ngx_log_error(NGX_LOG_ERR, log, 0, "Missing required exporter field"); - return false; - } - - std::string exporter = FromStringDatum(exporterVal); - - toml_table_t* exporters = toml_table_in(root, "exporters"); - - if (!exporters) { - ngx_log_error(NGX_LOG_ERR, log, 0, "Unable to find exporters table"); - return false; - } - - if (exporter == "otlp") { - toml_table_t* otlp = toml_table_in(exporters, "otlp"); - - if (!otlp) { - ngx_log_error(NGX_LOG_ERR, log, 0, "Unable to find exporters.otlp"); - return false; - } - - if (!SetupOtlpExporter(otlp, log, config)) { - return false; - } - - config->exporter.type = OtelExporterOTLP; - } else { - ngx_log_error(NGX_LOG_ERR, log, 0, "Unsupported exporter %s", exporter.c_str()); - return false; - } - - return true; -} - -static bool SetupService(toml_table_t* root, ngx_log_t*, OtelNgxAgentConfig* config) { - const char *otel_service_name_env = "OTEL_SERVICE_NAME"; - auto service_name_from_env = std::getenv(otel_service_name_env); - - if (service_name_from_env) { - config->service.name = service_name_from_env; - return true; - } - - toml_table_t* service = toml_table_in(root, "service"); - - if (service) { - toml_datum_t serviceName = toml_string_in(service, "name"); - - if (serviceName.ok) { - config->service.name = FromStringDatum(serviceName); - } - } - - return true; -} - -static bool SetupProcessor(toml_table_t* root, ngx_log_t* log, OtelNgxAgentConfig* config) { - toml_datum_t processorVal = toml_string_in(root, "processor"); - - if (!processorVal.ok) { - ngx_log_error(NGX_LOG_ERR, log, 0, "Unable to find required processor field"); - return false; - } - - std::string processor = FromStringDatum(processorVal); - - if (processor != "batch") { - config->processor.type = OtelProcessorSimple; - return true; - } - - config->processor.type = OtelProcessorBatch; - - toml_table_t* processors = toml_table_in(root, "processors"); - - if (!processors) { - // Go with the default batch processor config - return true; - } - - toml_table_t* batchProcessor = toml_table_in(processors, "batch"); - - if (!batchProcessor) { - return true; - } - - toml_datum_t maxQueueSize = toml_int_in(batchProcessor, "max_queue_size"); - - if (maxQueueSize.ok) { - config->processor.batch.maxQueueSize = std::max(int64_t(1), maxQueueSize.u.i); - } - - toml_datum_t scheduleDelayMillis = toml_int_in(batchProcessor, "schedule_delay_millis"); - - if (scheduleDelayMillis.ok) { - config->processor.batch.scheduleDelayMillis = std::max(int64_t(0), scheduleDelayMillis.u.i); - } - - toml_datum_t maxExportBatchSize = toml_int_in(batchProcessor, "max_export_batch_size"); - - if (maxExportBatchSize.ok) { - config->processor.batch.maxExportBatchSize = std::max(int64_t(1), maxExportBatchSize.u.i); - } - - return true; -} - -static bool SetupSampler(toml_table_t* root, ngx_log_t* log, OtelNgxAgentConfig* config) { - toml_table_t* sampler = toml_table_in(root, "sampler"); - - if (!sampler) { - return true; - } - - toml_datum_t samplerNameVal = toml_string_in(sampler, "name"); - - if (samplerNameVal.ok) { - std::string samplerName = FromStringDatum(samplerNameVal); - - if (samplerName == "AlwaysOn") { - config->sampler.type = OtelSamplerAlwaysOn; - } else if (samplerName == "AlwaysOff") { - config->sampler.type = OtelSamplerAlwaysOff; - } else if (samplerName == "TraceIdRatioBased") { - config->sampler.type = OtelSamplerTraceIdRatioBased; - - toml_datum_t ratio = toml_double_in(sampler, "ratio"); - - if (ratio.ok) { - config->sampler.ratio = ratio.u.d; - } else { - ngx_log_error(NGX_LOG_ERR, log, 0, "TraceIdRatioBased requires a ratio to be specified"); - return false; - } - } else { - ngx_log_error(NGX_LOG_ERR, log, 0, "Unsupported sampler %s", samplerName.c_str()); - return false; - } - } - - toml_datum_t parentBased = toml_bool_in(sampler, "parent_based"); - - if (parentBased.ok) { - config->sampler.parentBased = parentBased.u.b; - } - - return true; -} - -bool OtelAgentConfigLoad(const std::string& path, ngx_log_t* log, OtelNgxAgentConfig* config) { - FILE* confFile = fopen(path.c_str(), "r"); - - if (!confFile) { - ngx_log_error(NGX_LOG_ERR, log, 0, "Unable to open agent config file at %s", path.c_str()); - return false; - } - - char errBuf[256] = {0}; - ScopedTable scopedConf{toml_parse_file(confFile, errBuf, sizeof(errBuf))}; - fclose(confFile); - - if (!scopedConf.table) { - ngx_log_error(NGX_LOG_ERR, log, 0, "Configuration error: %s", errBuf); - return false; - } - - toml_table_t* root = scopedConf.table; - - if (!SetupExporter(root, log, config)) { - return false; - } - - if (!SetupService(root, log, config)) { - return false; - } - - if (!SetupProcessor(root, log, config)) { - return false; - } - - if (!SetupSampler(root, log, config)) { - return false; - } - - return true; -} diff --git a/instrumentation/nginx/src/agent_config.h b/instrumentation/nginx/src/agent_config.h index 1f447ddb4..2e667c5dd 100644 --- a/instrumentation/nginx/src/agent_config.h +++ b/instrumentation/nginx/src/agent_config.h @@ -6,37 +6,36 @@ extern "C" { #include } -enum OtelExporterType { OtelExporterOTLP, OtelExporterJaeger }; -enum OtelProcessorType { OtelProcessorSimple, OtelProcessorBatch }; -enum OtelSamplerType { OtelSamplerAlwaysOn, OtelSamplerAlwaysOff, OtelSamplerTraceIdRatioBased }; +enum OtelProcessorType +{ + OtelProcessorSimple, + OtelProcessorBatch +}; -struct OtelNgxAgentConfig { - struct { - OtelExporterType type = OtelExporterOTLP; +struct OtelNgxAgentConfig +{ + struct + { std::string endpoint; - bool use_ssl_credentials = false; - std::string ssl_credentials_cacert_path = ""; } exporter; - struct { - std::string name = "unknown:nginx"; + struct + { + std::string name; } service; - struct { - OtelProcessorType type = OtelProcessorSimple; + struct + { + OtelProcessorType type = OtelProcessorBatch; - struct { - uint32_t maxQueueSize = 2048; - uint32_t maxExportBatchSize = 512; + struct + { + uint32_t maxQueueSize = 2048; + uint32_t maxExportBatchSize = 512; uint32_t scheduleDelayMillis = 5000; } batch; } processor; - struct { - OtelSamplerType type = OtelSamplerAlwaysOn; - bool parentBased = false; - double ratio = 0; - } sampler; + std::string sampler = "parentbased_always_on"; + double samplerRatio = 1.0; }; - -bool OtelAgentConfigLoad(const std::string& path, ngx_log_t* log, OtelNgxAgentConfig* config); diff --git a/instrumentation/nginx/src/otel_ngx_module.cpp b/instrumentation/nginx/src/otel_ngx_module.cpp index 5b2aa218b..7023c580a 100644 --- a/instrumentation/nginx/src/otel_ngx_module.cpp +++ b/instrumentation/nginx/src/otel_ngx_module.cpp @@ -1,13 +1,9 @@ -// clang-format off -// otlp_grpc_exporter header has to be included before any other API header to -// avoid conflict between Abseil library and OpenTelemetry C++ absl::variant. -// https://github.com/open-telemetry/opentelemetry-cpp/tree/main/examples/otlp#additional-notes-regarding-abseil-library -#include -// clang-format on - #include +#include #include #include +#include +#include #include #include @@ -15,6 +11,7 @@ extern "C" { #include #include #include +#include extern ngx_module_t otel_ngx_module; } @@ -29,13 +26,16 @@ extern ngx_module_t otel_ngx_module; #include #include #include -#include -#include -#include +#include +#include +#include #include #include +#include + #include #include +#include namespace trace = opentelemetry::trace; namespace nostd = opentelemetry::nostd; @@ -98,8 +98,8 @@ static void OtelCaptureHeaders(nostd::shared_ptr spa continue; } - u_char key[keyPrefix.len + header[i].key.len]; - NgxNormalizeAndCopyString((u_char*)ngx_copy(key, keyPrefix.data, keyPrefix.len), header[i].key); + std::vector key(keyPrefix.len + header[i].key.len, 0); + NgxNormalizeAndCopyString((u_char*)ngx_copy(key.data(), keyPrefix.data, keyPrefix.len), header[i].key); bool sensitiveHeader = false; #if (NGX_PCRE) @@ -124,7 +124,7 @@ static void OtelCaptureHeaders(nostd::shared_ptr spa value = FromNgxString(header[i].value); } - span->SetAttribute({(const char*)key, keyPrefix.len + header[i].key.len}, nostd::span(&value, 1)); + span->SetAttribute({(const char*)key.data(), keyPrefix.len + header[i].key.len}, nostd::span(&value, 1)); } } } @@ -202,7 +202,7 @@ TraceContext* GetTraceContext(ngx_http_request_t* req) { } std::unordered_map* map = (std::unordered_map*)val->data; - if (map == nullptr){ + if (map == nullptr) { ngx_log_error(NGX_LOG_INFO, req->connection->log, 0, "TraceContext not found"); return nullptr; } @@ -266,6 +266,8 @@ OtelGetTraceContextVar(ngx_http_request_t* req, ngx_http_variable_value_t* v, ui static ngx_int_t OtelGetTraceId(ngx_http_request_t* req, ngx_http_variable_value_t* v, uintptr_t data) { + (void)data; + if (!IsOtelEnabled(req)) { v->valid = 0; v->not_found = 1; @@ -321,6 +323,7 @@ OtelGetTraceId(ngx_http_request_t* req, ngx_http_variable_value_t* v, uintptr_t static ngx_int_t OtelGetSpanId(ngx_http_request_t* req, ngx_http_variable_value_t* v, uintptr_t data) { + (void)data; if (!IsOtelEnabled(req)) { v->valid = 0; v->not_found = 1; @@ -410,6 +413,10 @@ OtelMainConf* GetOtelMainConf(ngx_http_request_t* req) { return (OtelMainConf*)ngx_http_get_module_main_conf(req, otel_ngx_module); } +OtelMainConf* GetOtelMainConf(ngx_conf_t* conf) { + return (OtelMainConf*)ngx_http_conf_get_module_main_conf(conf, otel_ngx_module); +} + nostd::string_view GetNgxServerName(const ngx_http_request_t* req) { ngx_http_core_srv_conf_t* cscf = (ngx_http_core_srv_conf_t*)ngx_http_get_module_srv_conf(req, ngx_http_core_module); @@ -598,8 +605,7 @@ static ngx_int_t InitModule(ngx_conf_t* conf) { *ngx_handler = ph.handler; } - OtelMainConf* otelMainConf = - (OtelMainConf*)ngx_http_conf_get_module_main_conf(conf, otel_ngx_module); + OtelMainConf* otelMainConf = GetOtelMainConf(conf); if (!otelMainConf) { return NGX_ERROR; @@ -836,19 +842,148 @@ char* OtelNgxSetPropagation(ngx_conf_t* conf, ngx_command_t*, void* locConf) { return NGX_CONF_OK; } -char* OtelNgxSetConfig(ngx_conf_t* conf, ngx_command_t*, void*) { - OtelMainConf* mainConf = (OtelMainConf*)ngx_http_conf_get_module_main_conf(conf, otel_ngx_module); +char* OtelNgxSetServiceName(ngx_conf_t* cf, ngx_command_t*, void*) { + OtelMainConf* otelMainConf = GetOtelMainConf(cf); - ngx_str_t* values = (ngx_str_t*)conf->args->elts; - ngx_str_t* path = &values[1]; + ngx_str_t* values = (ngx_str_t*)cf->args->elts; + ngx_str_t* name = &values[1]; - if (!OtelAgentConfigLoad( - std::string((const char*)path->data, path->len), conf->log, &mainConf->agentConfig)) { - return (char*)NGX_CONF_ERROR; + otelMainConf->agentConfig.service.name = std::string((const char*)name->data, name->len); + + return NGX_CONF_OK; +} + +char* OtelNgxSetEndpoint(ngx_conf_t* cf, ngx_command_t*, void*) { + OtelMainConf* otelMainConf = GetOtelMainConf(cf); + + ngx_str_t* values = (ngx_str_t*)cf->args->elts; + ngx_str_t* name = &values[1]; + + otelMainConf->agentConfig.exporter.endpoint = std::string((const char*)name->data, name->len); + + return NGX_CONF_OK; +} + +char* OtelNgxSetSpanProcessorType(ngx_conf_t* cf, ngx_command_t*, void*) { + OtelMainConf* otelMainConf = GetOtelMainConf(cf); + + ngx_str_t* values = (ngx_str_t*)cf->args->elts; + ngx_str_t* name = &values[1]; + + std::string type = std::string((const char*)name->data, name->len); + + if (type == "simple") { + otelMainConf->agentConfig.processor.type = OtelProcessorSimple; + return NGX_CONF_OK; + } + + if (type == "batch") { + otelMainConf->agentConfig.processor.type = OtelProcessorBatch; + return NGX_CONF_OK; + } + + return (char*)NGX_CONF_ERROR; +} + +char* OtelNgxSetBspMaxQueueSize(ngx_conf_t* cf, ngx_command_t*, void*) { + OtelMainConf* otelMainConf = GetOtelMainConf(cf); + + ngx_str_t* values = (ngx_str_t*)cf->args->elts; + ngx_str_t* value = &values[1]; + + std::string strValue((const char*)value->data, value->len); + int32_t v = atoi(strValue.c_str()); + + if (v <= 0) { + ngx_log_error(NGX_LOG_ERR, cf->log, 0, "opentelemetry: max bsp queue size can't be <= 0"); + } else { + otelMainConf->agentConfig.processor.batch.maxQueueSize = v; } return NGX_CONF_OK; } + +char* OtelNgxSetBspScheduleDelayMillis(ngx_conf_t* cf, ngx_command_t*, void*) { + OtelMainConf* otelMainConf = GetOtelMainConf(cf); + + ngx_str_t* values = (ngx_str_t*)cf->args->elts; + ngx_str_t* value = &values[1]; + + std::string strValue((const char*)value->data, value->len); + int32_t v = atoi(strValue.c_str()); + + if (v <= 0) { + ngx_log_error(NGX_LOG_ERR, cf->log, 0, "opentelemetry: bsp schedule delay can't be <= 0"); + } else { + otelMainConf->agentConfig.processor.batch.maxExportBatchSize = v; + } + + return NGX_CONF_OK; +} + +char* OtelNgxSetBspMaxExportBatchSize(ngx_conf_t* cf, ngx_command_t*, void*) { + OtelMainConf* otelMainConf = GetOtelMainConf(cf); + + ngx_str_t* values = (ngx_str_t*)cf->args->elts; + ngx_str_t* value = &values[1]; + + std::string strValue((const char*)value->data, value->len); + int32_t v = atoi(strValue.c_str()); + + if (v <= 0) { + ngx_log_error(NGX_LOG_ERR, cf->log, 0, "opentelemetry: bsp export batch size can't be <= 0"); + } else { + otelMainConf->agentConfig.processor.batch.maxExportBatchSize = v; + } + + return NGX_CONF_OK; +} + +char* OtelNgxSetTracesSampler(ngx_conf_t* cf, ngx_command_t*, void*) { + OtelMainConf* otelMainConf = GetOtelMainConf(cf); + + ngx_str_t* values = (ngx_str_t*)cf->args->elts; + ngx_str_t* value = &values[1]; + + std::string strSampler((const char*)value->data, value->len); + + const std::vector knownSamplers = { + "always_on", + "always_off", + "traceidratio", + "parentbased_always_on", + "parentbased_always_off", + "parentbased_traceidratio" + }; + + bool isValidSampler = false; + for (const auto& knownSampler : knownSamplers) { + if (strSampler == knownSampler) { + isValidSampler = true; + break; + } + } + + if (isValidSampler) { + otelMainConf->agentConfig.sampler = strSampler; + } else { + ngx_log_error(NGX_LOG_ERR, cf->log, 0, "opentelemetry: unknown sampler %V", values); + } + + return NGX_CONF_OK; +} + +char* OtelNgxSetTracesSamplerRatio(ngx_conf_t* cf, ngx_command_t*, void*) { + OtelMainConf* otelMainConf = GetOtelMainConf(cf); + + ngx_str_t* values = (ngx_str_t*)cf->args->elts; + ngx_str_t* value = &values[1]; + + std::string strRatio((const char*)value->data, value->len); + + otelMainConf->agentConfig.samplerRatio = std::min(1.0, std::max(0.0, atof(strRatio.c_str()))); + return NGX_CONF_OK; +} static char* OtelNgxSetCustomAttribute(ngx_conf_t* conf, ngx_command_t*, void* userConf) { OtelNgxLocationConf* locConf = (OtelNgxLocationConf*)userConf; @@ -953,14 +1088,6 @@ static ngx_command_t kOtelNgxCommands[] = { 0, nullptr, }, - { - ngx_string("opentelemetry_config"), - NGX_HTTP_MAIN_CONF | NGX_CONF_TAKE1, - OtelNgxSetConfig, - NGX_HTTP_LOC_CONF_OFFSET, - 0, - nullptr, - }, { ngx_string("opentelemetry_attribute"), NGX_HTTP_MAIN_CONF | NGX_HTTP_SRV_CONF | NGX_HTTP_LOC_CONF | NGX_CONF_TAKE2, @@ -977,6 +1104,14 @@ static ngx_command_t kOtelNgxCommands[] = { offsetof(OtelNgxLocationConf, enabled), nullptr, }, + { + ngx_string("opentelemetry_service_name"), + NGX_HTTP_MAIN_CONF | NGX_CONF_TAKE1, + OtelNgxSetServiceName, + NGX_HTTP_MAIN_CONF_OFFSET, + 0, + nullptr, + }, { ngx_string("opentelemetry_trust_incoming_spans"), NGX_HTTP_MAIN_CONF | NGX_HTTP_SRV_CONF | NGX_HTTP_LOC_CONF | NGX_CONF_TAKE1, @@ -993,6 +1128,62 @@ static ngx_command_t kOtelNgxCommands[] = { offsetof(OtelNgxLocationConf, captureHeaders), nullptr, }, + { + ngx_string("opentelemetry_otlp_traces_endpoint"), + NGX_HTTP_MAIN_CONF | NGX_CONF_TAKE1, + OtelNgxSetEndpoint, + NGX_HTTP_MAIN_CONF_OFFSET, + 0, + nullptr, + }, + { + ngx_string("opentelemetry_span_processor"), + NGX_HTTP_MAIN_CONF | NGX_CONF_TAKE1, + OtelNgxSetSpanProcessorType, + NGX_HTTP_MAIN_CONF_OFFSET, + 0, + nullptr, + }, + { + ngx_string("opentelemetry_bsp_max_queue_size"), + NGX_HTTP_MAIN_CONF | NGX_CONF_TAKE1, + OtelNgxSetBspMaxQueueSize, + NGX_HTTP_MAIN_CONF_OFFSET, + 0, + nullptr, + }, + { + ngx_string("opentelemetry_bsp_schedule_delay_millis"), + NGX_HTTP_MAIN_CONF | NGX_CONF_TAKE1, + OtelNgxSetBspScheduleDelayMillis, + NGX_HTTP_MAIN_CONF_OFFSET, + 0, + nullptr, + }, + { + ngx_string("opentelemetry_bsp_max_export_batch_size"), + NGX_HTTP_MAIN_CONF | NGX_CONF_TAKE1, + OtelNgxSetBspMaxExportBatchSize, + NGX_HTTP_MAIN_CONF_OFFSET, + 0, + nullptr, + }, + { + ngx_string("opentelemetry_traces_sampler"), + NGX_HTTP_MAIN_CONF | NGX_CONF_TAKE1, + OtelNgxSetTracesSampler, + NGX_HTTP_MAIN_CONF_OFFSET, + 0, + nullptr, + }, + { + ngx_string("opentelemetry_traces_sampler_ratio"), + NGX_HTTP_MAIN_CONF | NGX_CONF_TAKE1, + OtelNgxSetTracesSamplerRatio, + NGX_HTTP_MAIN_CONF_OFFSET, + 0, + nullptr, + }, #if (NGX_PCRE) { ngx_string("opentelemetry_sensitive_header_names"), @@ -1025,19 +1216,10 @@ static ngx_command_t kOtelNgxCommands[] = { static std::unique_ptr CreateExporter(const OtelNgxAgentConfig* conf) { std::unique_ptr exporter; - switch (conf->exporter.type) { - case OtelExporterOTLP: { - std::string endpoint = conf->exporter.endpoint; - otlp::OtlpGrpcExporterOptions opts; - opts.endpoint = endpoint; - opts.use_ssl_credentials = conf->exporter.use_ssl_credentials; - opts.ssl_credentials_cacert_path = conf->exporter.ssl_credentials_cacert_path; - exporter.reset(new otlp::OtlpGrpcExporter(opts)); - break; - } - default: - break; - } + std::string endpoint = conf->exporter.endpoint; + otlp::OtlpHttpExporterOptions opts; + opts.url = endpoint.empty() ? opts.url : endpoint; + exporter.reset(new otlp::OtlpHttpExporter(opts)); return exporter; } @@ -1060,48 +1242,41 @@ CreateProcessor(const OtelNgxAgentConfig* conf, std::unique_ptr CreateSampler(const OtelNgxAgentConfig* conf) { - if (conf->sampler.parentBased) { - std::shared_ptr sampler; + if (conf->sampler == "always_on") { + return sdktrace::AlwaysOnSamplerFactory::Create(); + } - switch (conf->sampler.type) { - case OtelSamplerAlwaysOn: { - sampler = std::make_shared(); - break; - } - case OtelSamplerAlwaysOff: { - sampler = std::make_shared(); - break; - } - case OtelSamplerTraceIdRatioBased: { - sampler = std::make_shared(conf->sampler.ratio); - break; - } - default: - break; - } + if (conf->sampler == "always_off") { + return sdktrace::AlwaysOffSamplerFactory::Create(); + } - return std::unique_ptr(new sdktrace::ParentBasedSampler(sampler)); + if (conf->sampler == "traceidratio") { + return sdktrace::TraceIdRatioBasedSamplerFactory::Create(conf->samplerRatio); } - std::unique_ptr sampler; + if (conf->sampler == "parentbased_always_on") { + return sdktrace::ParentBasedSamplerFactory::Create(std::make_shared()); + } - switch (conf->sampler.type) { - case OtelSamplerAlwaysOn: { - sampler.reset(new sdktrace::AlwaysOnSampler()); - break; - } - case OtelSamplerAlwaysOff: { - sampler.reset(new sdktrace::AlwaysOffSampler()); - break; - } - case OtelSamplerTraceIdRatioBased: { - sampler.reset(new sdktrace::TraceIdRatioBasedSampler(conf->sampler.ratio)); - break; - } - default: - break; + if (conf->sampler == "parentbased_always_off") { + return sdktrace::ParentBasedSamplerFactory::Create(std::make_shared()); + } + + if (conf->sampler == "parentbased_traceidratio") { + return sdktrace::ParentBasedSamplerFactory::Create(std::make_shared(conf->samplerRatio)); } - return sampler; + + return sdktrace::AlwaysOnSamplerFactory::Create(); +} + +static std::string getEnvValue(const char* key, const std::string& defaultValue) { + const char* envValue = std::getenv(key); + + if (envValue) { + return envValue; + } + + return defaultValue; } static ngx_int_t OtelNgxStart(ngx_cycle_t* cycle) { @@ -1124,11 +1299,17 @@ static ngx_int_t OtelNgxStart(ngx_cycle_t* cycle) { return NGX_ERROR; } + std::string serviceName = agentConf->service.name; + + if (serviceName.empty()) { + serviceName = getEnvValue("OTEL_SERVICE_NAME", "unknown:nginx"); + } + auto processor = CreateProcessor(agentConf, std::move(exporter)); auto provider = nostd::shared_ptr(new sdktrace::TracerProvider( std::move(processor), - opentelemetry::sdk::resource::Resource::Create({{"service.name", agentConf->service.name}}), + opentelemetry::sdk::resource::Resource::Create({{"service.name", serviceName}}), std::move(sampler))); opentelemetry::trace::Provider::SetTracerProvider(std::move(provider)); diff --git a/instrumentation/nginx/src/toml.c b/instrumentation/nginx/src/toml.c deleted file mode 100644 index bb899d5d4..000000000 --- a/instrumentation/nginx/src/toml.c +++ /dev/null @@ -1,2247 +0,0 @@ -/* - - MIT License - - Copyright (c) 2017 - 2019 CK Tan - https://github.com/cktan/tomlc99 - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - -*/ -#define _POSIX_C_SOURCE 200809L -#include -#include -#include -#include -#include -#include -#include -#include -#include "toml.h" - - -static void* (*ppmalloc)(size_t) = malloc; -static void (*ppfree)(void*) = free; - -void toml_set_memutil(void* (*xxmalloc)(size_t), - void (*xxfree)(void*)) -{ - if (xxmalloc) ppmalloc = xxmalloc; - if (xxfree) ppfree = xxfree; -} - - -#define MALLOC(a) ppmalloc(a) -#define FREE(a) ppfree(a) - -static void* CALLOC(size_t nmemb, size_t sz) -{ - int nb = sz * nmemb; - void* p = MALLOC(nb); - if (p) { - memset(p, 0, nb); - } - return p; -} - - -static char* STRDUP(const char* s) -{ - int len = strlen(s); - char* p = MALLOC(len+1); - if (p) { - memcpy(p, s, len); - p[len] = 0; - } - return p; -} - -static char* STRNDUP(const char* s, size_t n) -{ - size_t len = strnlen(s, n); - char* p = MALLOC(len+1); - if (p) { - memcpy(p, s, len); - p[len] = 0; - } - return p; -} - - - -/** - * Convert a char in utf8 into UCS, and store it in *ret. - * Return #bytes consumed or -1 on failure. - */ -int toml_utf8_to_ucs(const char* orig, int len, int64_t* ret) -{ - const unsigned char* buf = (const unsigned char*) orig; - unsigned i = *buf++; - int64_t v; - - /* 0x00000000 - 0x0000007F: - 0xxxxxxx - */ - if (0 == (i >> 7)) { - if (len < 1) return -1; - v = i; - return *ret = v, 1; - } - /* 0x00000080 - 0x000007FF: - 110xxxxx 10xxxxxx - */ - if (0x6 == (i >> 5)) { - if (len < 2) return -1; - v = i & 0x1f; - for (int j = 0; j < 1; j++) { - i = *buf++; - if (0x2 != (i >> 6)) return -1; - v = (v << 6) | (i & 0x3f); - } - return *ret = v, (const char*) buf - orig; - } - - /* 0x00000800 - 0x0000FFFF: - 1110xxxx 10xxxxxx 10xxxxxx - */ - if (0xE == (i >> 4)) { - if (len < 3) return -1; - v = i & 0x0F; - for (int j = 0; j < 2; j++) { - i = *buf++; - if (0x2 != (i >> 6)) return -1; - v = (v << 6) | (i & 0x3f); - } - return *ret = v, (const char*) buf - orig; - } - - /* 0x00010000 - 0x001FFFFF: - 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - */ - if (0x1E == (i >> 3)) { - if (len < 4) return -1; - v = i & 0x07; - for (int j = 0; j < 3; j++) { - i = *buf++; - if (0x2 != (i >> 6)) return -1; - v = (v << 6) | (i & 0x3f); - } - return *ret = v, (const char*) buf - orig; - } - - /* 0x00200000 - 0x03FFFFFF: - 111110xx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx - */ - if (0x3E == (i >> 2)) { - if (len < 5) return -1; - v = i & 0x03; - for (int j = 0; j < 4; j++) { - i = *buf++; - if (0x2 != (i >> 6)) return -1; - v = (v << 6) | (i & 0x3f); - } - return *ret = v, (const char*) buf - orig; - } - - /* 0x04000000 - 0x7FFFFFFF: - 1111110x 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx - */ - if (0x7e == (i >> 1)) { - if (len < 6) return -1; - v = i & 0x01; - for (int j = 0; j < 5; j++) { - i = *buf++; - if (0x2 != (i >> 6)) return -1; - v = (v << 6) | (i & 0x3f); - } - return *ret = v, (const char*) buf - orig; - } - return -1; -} - - -/** - * Convert a UCS char to utf8 code, and return it in buf. - * Return #bytes used in buf to encode the char, or - * -1 on error. - */ -int toml_ucs_to_utf8(int64_t code, char buf[6]) -{ - /* http://stackoverflow.com/questions/6240055/manually-converting-unicode-codepoints-into-utf-8-and-utf-16 */ - /* The UCS code values 0xd800–0xdfff (UTF-16 surrogates) as well - * as 0xfffe and 0xffff (UCS noncharacters) should not appear in - * conforming UTF-8 streams. - */ - if (0xd800 <= code && code <= 0xdfff) return -1; - if (0xfffe <= code && code <= 0xffff) return -1; - - /* 0x00000000 - 0x0000007F: - 0xxxxxxx - */ - if (code < 0) return -1; - if (code <= 0x7F) { - buf[0] = (unsigned char) code; - return 1; - } - - /* 0x00000080 - 0x000007FF: - 110xxxxx 10xxxxxx - */ - if (code <= 0x000007FF) { - buf[0] = 0xc0 | (code >> 6); - buf[1] = 0x80 | (code & 0x3f); - return 2; - } - - /* 0x00000800 - 0x0000FFFF: - 1110xxxx 10xxxxxx 10xxxxxx - */ - if (code <= 0x0000FFFF) { - buf[0] = 0xe0 | (code >> 12); - buf[1] = 0x80 | ((code >> 6) & 0x3f); - buf[2] = 0x80 | (code & 0x3f); - return 3; - } - - /* 0x00010000 - 0x001FFFFF: - 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - */ - if (code <= 0x001FFFFF) { - buf[0] = 0xf0 | (code >> 18); - buf[1] = 0x80 | ((code >> 12) & 0x3f); - buf[2] = 0x80 | ((code >> 6) & 0x3f); - buf[3] = 0x80 | (code & 0x3f); - return 4; - } - - /* 0x00200000 - 0x03FFFFFF: - 111110xx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx - */ - if (code <= 0x03FFFFFF) { - buf[0] = 0xf8 | (code >> 24); - buf[1] = 0x80 | ((code >> 18) & 0x3f); - buf[2] = 0x80 | ((code >> 12) & 0x3f); - buf[3] = 0x80 | ((code >> 6) & 0x3f); - buf[4] = 0x80 | (code & 0x3f); - return 5; - } - - /* 0x04000000 - 0x7FFFFFFF: - 1111110x 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx 10xxxxxx - */ - if (code <= 0x7FFFFFFF) { - buf[0] = 0xfc | (code >> 30); - buf[1] = 0x80 | ((code >> 24) & 0x3f); - buf[2] = 0x80 | ((code >> 18) & 0x3f); - buf[3] = 0x80 | ((code >> 12) & 0x3f); - buf[4] = 0x80 | ((code >> 6) & 0x3f); - buf[5] = 0x80 | (code & 0x3f); - return 6; - } - - return -1; -} - -/* - * TOML has 3 data structures: value, array, table. - * Each of them can have identification key. - */ -typedef struct toml_keyval_t toml_keyval_t; -struct toml_keyval_t { - const char* key; /* key to this value */ - const char* val; /* the raw value */ -}; - - -struct toml_array_t { - const char* key; /* key to this array */ - int kind; /* element kind: 'v'alue, 'a'rray, or 't'able */ - int type; /* for value kind: 'i'nt, 'd'ouble, 'b'ool, 's'tring, 't'ime, 'D'ate, 'T'imestamp */ - - int nelem; /* number of elements */ - union { - char** val; - toml_array_t** arr; - toml_table_t** tab; - } u; -}; - - -struct toml_table_t { - const char* key; /* key to this table */ - bool implicit; /* table was created implicitly */ - - /* key-values in the table */ - int nkval; - toml_keyval_t** kval; - - /* arrays in the table */ - int narr; - toml_array_t** arr; - - /* tables in the table */ - int ntab; - toml_table_t** tab; -}; - - -static inline void xfree(const void* x) { if (x) FREE((void*)(intptr_t)x); } - - -enum tokentype_t { - INVALID, - DOT, - COMMA, - EQUAL, - LBRACE, - RBRACE, - NEWLINE, - LBRACKET, - RBRACKET, - STRING, -}; -typedef enum tokentype_t tokentype_t; - -typedef struct token_t token_t; -struct token_t { - tokentype_t tok; - int lineno; - char* ptr; /* points into context->start */ - int len; - int eof; -}; - - -typedef struct context_t context_t; -struct context_t { - char* start; - char* stop; - char* errbuf; - int errbufsz; - - token_t tok; - toml_table_t* root; - toml_table_t* curtab; - - struct { - int top; - char* key[10]; - token_t tok[10]; - } tpath; - -}; - -#define STRINGIFY(x) #x -#define TOSTRING(x) STRINGIFY(x) -#define FLINE __FILE__ ":" TOSTRING(__LINE__) - -static int next_token(context_t* ctx, int dotisspecial); - -/* - Error reporting. Call when an error is detected. Always return -1. -*/ -static int e_outofmemory(context_t* ctx, const char* fline) -{ - snprintf(ctx->errbuf, ctx->errbufsz, "ERROR: out of memory (%s)", fline); - return -1; -} - - -static int e_internal(context_t* ctx, const char* fline) -{ - snprintf(ctx->errbuf, ctx->errbufsz, "internal error (%s)", fline); - return -1; -} - -static int e_syntax(context_t* ctx, int lineno, const char* msg) -{ - snprintf(ctx->errbuf, ctx->errbufsz, "line %d: %s", lineno, msg); - return -1; -} - -static int e_badkey(context_t* ctx, int lineno) -{ - snprintf(ctx->errbuf, ctx->errbufsz, "line %d: bad key", lineno); - return -1; -} - -static int e_keyexists(context_t* ctx, int lineno) -{ - snprintf(ctx->errbuf, ctx->errbufsz, "line %d: key exists", lineno); - return -1; -} - -static void* expand(void* p, int sz, int newsz) -{ - void* s = MALLOC(newsz); - if (!s) return 0; - - memcpy(s, p, sz); - FREE(p); - return s; -} - -static void** expand_ptrarr(void** p, int n) -{ - void** s = MALLOC((n+1) * sizeof(void*)); - if (!s) return 0; - - s[n] = 0; - memcpy(s, p, n * sizeof(void*)); - FREE(p); - return s; -} - - -static char* norm_lit_str(const char* src, int srclen, - int multiline, - char* errbuf, int errbufsz) -{ - char* dst = 0; /* will write to dst[] and return it */ - int max = 0; /* max size of dst[] */ - int off = 0; /* cur offset in dst[] */ - const char* sp = src; - const char* sq = src + srclen; - int ch; - - /* scan forward on src */ - for (;;) { - if (off >= max - 10) { /* have some slack for misc stuff */ - int newmax = max + 50; - char* x = expand(dst, max, newmax); - if (!x) { - xfree(dst); - snprintf(errbuf, errbufsz, "out of memory"); - return 0; - } - dst = x; - max = newmax; - } - - /* finished? */ - if (sp >= sq) break; - - ch = *sp++; - /* control characters other than tab is not allowed */ - if ((0 <= ch && ch <= 0x08) - || (0x0a <= ch && ch <= 0x1f) - || (ch == 0x7f)) { - if (! (multiline && (ch == '\r' || ch == '\n'))) { - xfree(dst); - snprintf(errbuf, errbufsz, "invalid char U+%04x", ch); - return 0; - } - } - - // a plain copy suffice - dst[off++] = ch; - } - - dst[off++] = 0; - return dst; -} - - - - -/* - * Convert src to raw unescaped utf-8 string. - * Returns NULL if error with errmsg in errbuf. - */ -static char* norm_basic_str(const char* src, int srclen, - int multiline, - char* errbuf, int errbufsz) -{ - char* dst = 0; /* will write to dst[] and return it */ - int max = 0; /* max size of dst[] */ - int off = 0; /* cur offset in dst[] */ - const char* sp = src; - const char* sq = src + srclen; - int ch; - - /* scan forward on src */ - for (;;) { - if (off >= max - 10) { /* have some slack for misc stuff */ - int newmax = max + 50; - char* x = expand(dst, max, newmax); - if (!x) { - xfree(dst); - snprintf(errbuf, errbufsz, "out of memory"); - return 0; - } - dst = x; - max = newmax; - } - - /* finished? */ - if (sp >= sq) break; - - ch = *sp++; - if (ch != '\\') { - /* these chars must be escaped: U+0000 to U+0008, U+000A to U+001F, U+007F */ - if ((0 <= ch && ch <= 0x08) - || (0x0a <= ch && ch <= 0x1f) - || (ch == 0x7f)) { - if (! (multiline && (ch == '\r' || ch == '\n'))) { - xfree(dst); - snprintf(errbuf, errbufsz, "invalid char U+%04x", ch); - return 0; - } - } - - // a plain copy suffice - dst[off++] = ch; - continue; - } - - /* ch was backslash. we expect the escape char. */ - if (sp >= sq) { - snprintf(errbuf, errbufsz, "last backslash is invalid"); - xfree(dst); - return 0; - } - - /* for multi-line, we want to kill line-ending-backslash ... */ - if (multiline) { - - // if there is only whitespace after the backslash ... - if (sp[strspn(sp, " \t\r")] == '\n') { - /* skip all the following whitespaces */ - sp += strspn(sp, " \t\r\n"); - continue; - } - } - - /* get the escaped char */ - ch = *sp++; - switch (ch) { - case 'u': case 'U': - { - int64_t ucs = 0; - int nhex = (ch == 'u' ? 4 : 8); - for (int i = 0; i < nhex; i++) { - if (sp >= sq) { - snprintf(errbuf, errbufsz, "\\%c expects %d hex chars", ch, nhex); - xfree(dst); - return 0; - } - ch = *sp++; - int v = ('0' <= ch && ch <= '9') - ? ch - '0' - : (('A' <= ch && ch <= 'F') ? ch - 'A' + 10 : -1); - if (-1 == v) { - snprintf(errbuf, errbufsz, "invalid hex chars for \\u or \\U"); - xfree(dst); - return 0; - } - ucs = ucs * 16 + v; - } - int n = toml_ucs_to_utf8(ucs, &dst[off]); - if (-1 == n) { - snprintf(errbuf, errbufsz, "illegal ucs code in \\u or \\U"); - xfree(dst); - return 0; - } - off += n; - } - continue; - - case 'b': ch = '\b'; break; - case 't': ch = '\t'; break; - case 'n': ch = '\n'; break; - case 'f': ch = '\f'; break; - case 'r': ch = '\r'; break; - case '"': ch = '"'; break; - case '\\': ch = '\\'; break; - default: - snprintf(errbuf, errbufsz, "illegal escape char \\%c", ch); - xfree(dst); - return 0; - } - - dst[off++] = ch; - } - - // Cap with NUL and return it. - dst[off++] = 0; - return dst; -} - - -/* Normalize a key. Convert all special chars to raw unescaped utf-8 chars. */ -static char* normalize_key(context_t* ctx, token_t strtok) -{ - const char* sp = strtok.ptr; - const char* sq = strtok.ptr + strtok.len; - int lineno = strtok.lineno; - char* ret; - int ch = *sp; - char ebuf[80]; - - /* handle quoted string */ - if (ch == '\'' || ch == '\"') { - /* if ''' or """, take 3 chars off front and back. Else, take 1 char off. */ - int multiline = 0; - if (sp[1] == ch && sp[2] == ch) { - sp += 3, sq -= 3; - multiline = 1; - } - else - sp++, sq--; - - if (ch == '\'') { - /* for single quote, take it verbatim. */ - if (! (ret = STRNDUP(sp, sq - sp))) { - e_outofmemory(ctx, FLINE); - return 0; - } - } else { - /* for double quote, we need to normalize */ - ret = norm_basic_str(sp, sq - sp, multiline, ebuf, sizeof(ebuf)); - if (!ret) { - e_syntax(ctx, lineno, ebuf); - return 0; - } - } - - /* newlines are not allowed in keys */ - if (strchr(ret, '\n')) { - xfree(ret); - e_badkey(ctx, lineno); - return 0; - } - return ret; - } - - /* for bare-key allow only this regex: [A-Za-z0-9_-]+ */ - const char* xp; - for (xp = sp; xp != sq; xp++) { - int k = *xp; - if (isalnum(k)) continue; - if (k == '_' || k == '-') continue; - e_badkey(ctx, lineno); - return 0; - } - - /* dup and return it */ - if (! (ret = STRNDUP(sp, sq - sp))) { - e_outofmemory(ctx, FLINE); - return 0; - } - return ret; -} - - -/* - * Look up key in tab. Return 0 if not found, or - * 'v'alue, 'a'rray or 't'able depending on the element. - */ -static int check_key(toml_table_t* tab, const char* key, - toml_keyval_t** ret_val, - toml_array_t** ret_arr, - toml_table_t** ret_tab) -{ - int i; - void* dummy; - - if (!ret_tab) ret_tab = (toml_table_t**) &dummy; - if (!ret_arr) ret_arr = (toml_array_t**) &dummy; - if (!ret_val) ret_val = (toml_keyval_t**) &dummy; - - *ret_tab = 0; *ret_arr = 0; *ret_val = 0; - - for (i = 0; i < tab->nkval; i++) { - if (0 == strcmp(key, tab->kval[i]->key)) { - *ret_val = tab->kval[i]; - return 'v'; - } - } - for (i = 0; i < tab->narr; i++) { - if (0 == strcmp(key, tab->arr[i]->key)) { - *ret_arr = tab->arr[i]; - return 'a'; - } - } - for (i = 0; i < tab->ntab; i++) { - if (0 == strcmp(key, tab->tab[i]->key)) { - *ret_tab = tab->tab[i]; - return 't'; - } - } - return 0; -} - - -static int key_kind(toml_table_t* tab, const char* key) -{ - return check_key(tab, key, 0, 0, 0); -} - -/* Create a keyval in the table. - */ -static toml_keyval_t* create_keyval_in_table(context_t* ctx, toml_table_t* tab, token_t keytok) -{ - /* first, normalize the key to be used for lookup. - * remember to free it if we error out. - */ - char* newkey = normalize_key(ctx, keytok); - if (!newkey) return 0; - - /* if key exists: error out. */ - toml_keyval_t* dest = 0; - if (key_kind(tab, newkey)) { - xfree(newkey); - e_keyexists(ctx, keytok.lineno); - return 0; - } - - /* make a new entry */ - int n = tab->nkval; - toml_keyval_t** base; - if (0 == (base = (toml_keyval_t**) expand_ptrarr((void**)tab->kval, n))) { - xfree(newkey); - e_outofmemory(ctx, FLINE); - return 0; - } - tab->kval = base; - - if (0 == (base[n] = (toml_keyval_t*) CALLOC(1, sizeof(*base[n])))) { - xfree(newkey); - e_outofmemory(ctx, FLINE); - return 0; - } - dest = tab->kval[tab->nkval++]; - - /* save the key in the new value struct */ - dest->key = newkey; - return dest; -} - - -/* Create a table in the table. - */ -static toml_table_t* create_keytable_in_table(context_t* ctx, toml_table_t* tab, token_t keytok) -{ - /* first, normalize the key to be used for lookup. - * remember to free it if we error out. - */ - char* newkey = normalize_key(ctx, keytok); - if (!newkey) return 0; - - /* if key exists: error out */ - toml_table_t* dest = 0; - if (check_key(tab, newkey, 0, 0, &dest)) { - xfree(newkey); /* don't need this anymore */ - - /* special case: if table exists, but was created implicitly ... */ - if (dest && dest->implicit) { - /* we make it explicit now, and simply return it. */ - dest->implicit = false; - return dest; - } - e_keyexists(ctx, keytok.lineno); - return 0; - } - - /* create a new table entry */ - int n = tab->ntab; - toml_table_t** base; - if (0 == (base = (toml_table_t**) expand_ptrarr((void**)tab->tab, n))) { - xfree(newkey); - e_outofmemory(ctx, FLINE); - return 0; - } - tab->tab = base; - - if (0 == (base[n] = (toml_table_t*) CALLOC(1, sizeof(*base[n])))) { - xfree(newkey); - e_outofmemory(ctx, FLINE); - return 0; - } - dest = tab->tab[tab->ntab++]; - - /* save the key in the new table struct */ - dest->key = newkey; - return dest; -} - - -/* Create an array in the table. - */ -static toml_array_t* create_keyarray_in_table(context_t* ctx, - toml_table_t* tab, - token_t keytok, - char kind) -{ - /* first, normalize the key to be used for lookup. - * remember to free it if we error out. - */ - char* newkey = normalize_key(ctx, keytok); - if (!newkey) return 0; - - /* if key exists: error out */ - if (key_kind(tab, newkey)) { - xfree(newkey); /* don't need this anymore */ - e_keyexists(ctx, keytok.lineno); - return 0; - } - - /* make a new array entry */ - int n = tab->narr; - toml_array_t** base; - if (0 == (base = (toml_array_t**) expand_ptrarr((void**)tab->arr, n))) { - xfree(newkey); - e_outofmemory(ctx, FLINE); - return 0; - } - tab->arr = base; - - if (0 == (base[n] = (toml_array_t*) CALLOC(1, sizeof(*base[n])))) { - xfree(newkey); - e_outofmemory(ctx, FLINE); - return 0; - } - toml_array_t* dest = tab->arr[tab->narr++]; - - /* save the key in the new array struct */ - dest->key = newkey; - dest->kind = kind; - return dest; -} - -/* Create an array in an array - */ -static toml_array_t* create_array_in_array(context_t* ctx, - toml_array_t* parent) -{ - const int n = parent->nelem; - toml_array_t** base; - if (0 == (base = (toml_array_t**) expand_ptrarr((void**)parent->u.arr, n))) { - e_outofmemory(ctx, FLINE); - return 0; - } - parent->u.arr = base; - parent->nelem++; - - if (0 == (base[n] = (toml_array_t*) CALLOC(1, sizeof(*base[n])))) { - e_outofmemory(ctx, FLINE); - return 0; - } - - return parent->u.arr[n]; -} - -/* Create a table in an array - */ -static toml_table_t* create_table_in_array(context_t* ctx, - toml_array_t* parent) -{ - int n = parent->nelem; - toml_table_t** base; - if (0 == (base = (toml_table_t**) expand_ptrarr((void**)parent->u.tab, n))) { - e_outofmemory(ctx, FLINE); - return 0; - } - parent->u.tab = base; - - if (0 == (base[n] = (toml_table_t*) CALLOC(1, sizeof(*base[n])))) { - e_outofmemory(ctx, FLINE); - return 0; - } - - return parent->u.tab[parent->nelem++]; -} - - -static int skip_newlines(context_t* ctx, int isdotspecial) -{ - while (ctx->tok.tok == NEWLINE) { - if (next_token(ctx, isdotspecial)) return -1; - if (ctx->tok.eof) break; - } - return 0; -} - - -static int parse_keyval(context_t* ctx, toml_table_t* tab); - -static inline int eat_token(context_t* ctx, tokentype_t typ, int isdotspecial, const char* fline) -{ - if (ctx->tok.tok != typ) - return e_internal(ctx, fline); - - if (next_token(ctx, isdotspecial)) - return -1; - - return 0; -} - - - -/* We are at '{ ... }'. - * Parse the table. - */ -static int parse_table(context_t* ctx, toml_table_t* tab) -{ - if (eat_token(ctx, LBRACE, 1, FLINE)) - return -1; - - for (;;) { - if (ctx->tok.tok == NEWLINE) - return e_syntax(ctx, ctx->tok.lineno, "newline not allowed in inline table"); - - /* until } */ - if (ctx->tok.tok == RBRACE) - break; - - if (ctx->tok.tok != STRING) - return e_syntax(ctx, ctx->tok.lineno, "expect a string"); - - if (parse_keyval(ctx, tab)) - return -1; - - if (ctx->tok.tok == NEWLINE) - return e_syntax(ctx, ctx->tok.lineno, "newline not allowed in inline table"); - - /* on comma, continue to scan for next keyval */ - if (ctx->tok.tok == COMMA) { - if (eat_token(ctx, COMMA, 1, FLINE)) - return -1; - continue; - } - break; - } - - if (eat_token(ctx, RBRACE, 1, FLINE)) - return -1; - return 0; -} - -static int valtype(const char* val) -{ - toml_timestamp_t ts; - if (*val == '\'' || *val == '"') return 's'; - if (0 == toml_rtob(val, 0)) return 'b'; - if (0 == toml_rtoi(val, 0)) return 'i'; - if (0 == toml_rtod(val, 0)) return 'd'; - if (0 == toml_rtots(val, &ts)) { - if (ts.year && ts.hour) return 'T'; /* timestamp */ - if (ts.year) return 'D'; /* date */ - return 't'; /* time */ - } - return 'u'; /* unknown */ -} - - -/* We are at '[...]' */ -static int parse_array(context_t* ctx, toml_array_t* arr) -{ - if (eat_token(ctx, LBRACKET, 0, FLINE)) return -1; - - for (;;) { - if (skip_newlines(ctx, 0)) return -1; - - /* until ] */ - if (ctx->tok.tok == RBRACKET) break; - - switch (ctx->tok.tok) { - case STRING: - { - char* val = ctx->tok.ptr; - int vlen = ctx->tok.len; - - /* set array kind if this will be the first entry */ - if (arr->kind == 0) arr->kind = 'v'; - /* check array kind */ - if (arr->kind != 'v') - return e_syntax(ctx, ctx->tok.lineno, "a string array can only contain strings"); - - /* make a new value in array */ - char** tmp = (char**) expand_ptrarr((void**)arr->u.val, arr->nelem); - if (!tmp) - return e_outofmemory(ctx, FLINE); - - arr->u.val = tmp; - if (! (val = STRNDUP(val, vlen))) - return e_outofmemory(ctx, FLINE); - - arr->u.val[arr->nelem++] = val; - - /* set array type if this is the first entry, or check that the types matched. */ - if (arr->nelem == 1) - arr->type = valtype(arr->u.val[0]); - else if (arr->type != valtype(val)) { - return e_syntax(ctx, ctx->tok.lineno, - "array type mismatch while processing array of values"); - } - - if (eat_token(ctx, STRING, 0, FLINE)) return -1; - break; - } - - case LBRACKET: - { /* [ [array], [array] ... ] */ - /* set the array kind if this will be the first entry */ - if (arr->kind == 0) arr->kind = 'a'; - /* check array kind */ - if (arr->kind != 'a') { - return e_syntax(ctx, ctx->tok.lineno, - "array type mismatch while processing array of arrays"); - } - toml_array_t* subarr = create_array_in_array(ctx, arr); - if (!subarr) return -1; - if (parse_array(ctx, subarr)) return -1; - break; - } - - case LBRACE: - { /* [ {table}, {table} ... ] */ - /* set the array kind if this will be the first entry */ - if (arr->kind == 0) arr->kind = 't'; - /* check array kind */ - if (arr->kind != 't') { - return e_syntax(ctx, ctx->tok.lineno, - "array type mismatch while processing array of tables"); - } - toml_table_t* subtab = create_table_in_array(ctx, arr); - if (!subtab) return -1; - if (parse_table(ctx, subtab)) return -1; - break; - } - - default: - return e_syntax(ctx, ctx->tok.lineno, "syntax error"); - } - - if (skip_newlines(ctx, 0)) return -1; - - /* on comma, continue to scan for next element */ - if (ctx->tok.tok == COMMA) { - if (eat_token(ctx, COMMA, 0, FLINE)) return -1; - continue; - } - break; - } - - if (eat_token(ctx, RBRACKET, 1, FLINE)) return -1; - return 0; -} - - -/* handle lines like these: - key = "value" - key = [ array ] - key = { table } -*/ -static int parse_keyval(context_t* ctx, toml_table_t* tab) -{ - token_t key = ctx->tok; - if (eat_token(ctx, STRING, 1, FLINE)) return -1; - - if (ctx->tok.tok == DOT) { - /* handle inline dotted key. - e.g. - physical.color = "orange" - physical.shape = "round" - */ - toml_table_t* subtab = 0; - { - char* subtabstr = normalize_key(ctx, key); - subtab = toml_table_in(tab, subtabstr); - xfree(subtabstr); - } - if (!subtab) { - subtab = create_keytable_in_table(ctx, tab, key); - if (!subtab) return -1; - } - if (next_token(ctx, 1)) return -1; - if (parse_keyval(ctx, subtab)) return -1; - return 0; - } - - if (ctx->tok.tok != EQUAL) { - return e_syntax(ctx, ctx->tok.lineno, "missing ="); - } - - if (next_token(ctx, 0)) return -1; - - switch (ctx->tok.tok) { - case STRING: - { /* key = "value" */ - toml_keyval_t* keyval = create_keyval_in_table(ctx, tab, key); - if (!keyval) return -1; - token_t val = ctx->tok; - - assert(keyval->val == 0); - if (! (keyval->val = STRNDUP(val.ptr, val.len))) - return e_outofmemory(ctx, FLINE); - - if (next_token(ctx, 1)) return -1; - - return 0; - } - - case LBRACKET: - { /* key = [ array ] */ - toml_array_t* arr = create_keyarray_in_table(ctx, tab, key, 0); - if (!arr) return -1; - if (parse_array(ctx, arr)) return -1; - return 0; - } - - case LBRACE: - { /* key = { table } */ - toml_table_t* nxttab = create_keytable_in_table(ctx, tab, key); - if (!nxttab) return -1; - if (parse_table(ctx, nxttab)) return -1; - return 0; - } - - default: - return e_syntax(ctx, ctx->tok.lineno, "syntax error"); - } - return 0; -} - - -typedef struct tabpath_t tabpath_t; -struct tabpath_t { - int cnt; - token_t key[10]; -}; - -/* at [x.y.z] or [[x.y.z]] - * Scan forward and fill tabpath until it enters ] or ]] - * There will be at least one entry on return. - */ -static int fill_tabpath(context_t* ctx) -{ - int lineno = ctx->tok.lineno; - int i; - - /* clear tpath */ - for (i = 0; i < ctx->tpath.top; i++) { - char** p = &ctx->tpath.key[i]; - xfree(*p); - *p = 0; - } - ctx->tpath.top = 0; - - for (;;) { - if (ctx->tpath.top >= 10) - return e_syntax(ctx, lineno, "table path is too deep; max allowed is 10."); - - if (ctx->tok.tok != STRING) - return e_syntax(ctx, lineno, "invalid or missing key"); - - char* key = normalize_key(ctx, ctx->tok); - if (!key) return -1; - ctx->tpath.tok[ctx->tpath.top] = ctx->tok; - ctx->tpath.key[ctx->tpath.top] = key; - ctx->tpath.top++; - - if (next_token(ctx, 1)) return -1; - - if (ctx->tok.tok == RBRACKET) break; - - if (ctx->tok.tok != DOT) - return e_syntax(ctx, lineno, "invalid key"); - - if (next_token(ctx, 1)) return -1; - } - - if (ctx->tpath.top <= 0) - return e_syntax(ctx, lineno, "empty table selector"); - - return 0; -} - - -/* Walk tabpath from the root, and create new tables on the way. - * Sets ctx->curtab to the final table. - */ -static int walk_tabpath(context_t* ctx) -{ - /* start from root */ - toml_table_t* curtab = ctx->root; - - for (int i = 0; i < ctx->tpath.top; i++) { - const char* key = ctx->tpath.key[i]; - - toml_keyval_t* nextval = 0; - toml_array_t* nextarr = 0; - toml_table_t* nexttab = 0; - switch (check_key(curtab, key, &nextval, &nextarr, &nexttab)) { - case 't': - /* found a table. nexttab is where we will go next. */ - break; - - case 'a': - /* found an array. nexttab is the last table in the array. */ - if (nextarr->kind != 't') - return e_internal(ctx, FLINE); - - if (nextarr->nelem == 0) - return e_internal(ctx, FLINE); - - nexttab = nextarr->u.tab[nextarr->nelem-1]; - break; - - case 'v': - return e_keyexists(ctx, ctx->tpath.tok[i].lineno); - - default: - { /* Not found. Let's create an implicit table. */ - int n = curtab->ntab; - toml_table_t** base = (toml_table_t**) expand_ptrarr((void**)curtab->tab, n); - if (0 == base) - return e_outofmemory(ctx, FLINE); - - curtab->tab = base; - - if (0 == (base[n] = (toml_table_t*) CALLOC(1, sizeof(*base[n])))) - return e_outofmemory(ctx, FLINE); - - if (0 == (base[n]->key = STRDUP(key))) - return e_outofmemory(ctx, FLINE); - - nexttab = curtab->tab[curtab->ntab++]; - - /* tabs created by walk_tabpath are considered implicit */ - nexttab->implicit = true; - } - break; - } - - /* switch to next tab */ - curtab = nexttab; - } - - /* save it */ - ctx->curtab = curtab; - - return 0; -} - - -/* handle lines like [x.y.z] or [[x.y.z]] */ -static int parse_select(context_t* ctx) -{ - assert(ctx->tok.tok == LBRACKET); - - /* true if [[ */ - int llb = (ctx->tok.ptr + 1 < ctx->stop && ctx->tok.ptr[1] == '['); - /* need to detect '[[' on our own because next_token() will skip whitespace, - and '[ [' would be taken as '[[', which is wrong. */ - - /* eat [ or [[ */ - if (eat_token(ctx, LBRACKET, 1, FLINE)) return -1; - if (llb) { - assert(ctx->tok.tok == LBRACKET); - if (eat_token(ctx, LBRACKET, 1, FLINE)) return -1; - } - - if (fill_tabpath(ctx)) return -1; - - /* For [x.y.z] or [[x.y.z]], remove z from tpath. - */ - token_t z = ctx->tpath.tok[ctx->tpath.top-1]; - xfree(ctx->tpath.key[ctx->tpath.top-1]); - ctx->tpath.top--; - - /* set up ctx->curtab */ - if (walk_tabpath(ctx)) return -1; - - if (! llb) { - /* [x.y.z] -> create z = {} in x.y */ - toml_table_t* curtab = create_keytable_in_table(ctx, ctx->curtab, z); - if (!curtab) return -1; - ctx->curtab = curtab; - } else { - /* [[x.y.z]] -> create z = [] in x.y */ - toml_array_t* arr = 0; - { - char* zstr = normalize_key(ctx, z); - if (!zstr) return -1; - arr = toml_array_in(ctx->curtab, zstr); - xfree(zstr); - } - if (!arr) { - arr = create_keyarray_in_table(ctx, ctx->curtab, z, 't'); - if (!arr) return -1; - } - if (arr->kind != 't') - return e_syntax(ctx, z.lineno, "array mismatch"); - - /* add to z[] */ - toml_table_t* dest; - { - int n = arr->nelem; - toml_table_t** base = (toml_table_t**) expand_ptrarr((void**)arr->u.tab, n); - if (0 == base) - return e_outofmemory(ctx, FLINE); - - arr->u.tab = base; - - if (0 == (base[n] = CALLOC(1, sizeof(*base[n])))) - return e_outofmemory(ctx, FLINE); - - if (0 == (base[n]->key = STRDUP("__anon__"))) - return e_outofmemory(ctx, FLINE); - - dest = arr->u.tab[arr->nelem++]; - } - - ctx->curtab = dest; - } - - if (ctx->tok.tok != RBRACKET) { - return e_syntax(ctx, ctx->tok.lineno, "expects ]"); - } - if (llb) { - if (! (ctx->tok.ptr + 1 < ctx->stop && ctx->tok.ptr[1] == ']')) { - return e_syntax(ctx, ctx->tok.lineno, "expects ]]"); - } - if (eat_token(ctx, RBRACKET, 1, FLINE)) return -1; - } - - if (eat_token(ctx, RBRACKET, 1, FLINE)) - return -1; - - if (ctx->tok.tok != NEWLINE) - return e_syntax(ctx, ctx->tok.lineno, "extra chars after ] or ]]"); - - return 0; -} - - - - -toml_table_t* toml_parse(char* conf, - char* errbuf, - int errbufsz) -{ - context_t ctx; - - // clear errbuf - if (errbufsz <= 0) errbufsz = 0; - if (errbufsz > 0) errbuf[0] = 0; - - // init context - memset(&ctx, 0, sizeof(ctx)); - ctx.start = conf; - ctx.stop = ctx.start + strlen(conf); - ctx.errbuf = errbuf; - ctx.errbufsz = errbufsz; - - // start with an artificial newline of length 0 - ctx.tok.tok = NEWLINE; - ctx.tok.lineno = 1; - ctx.tok.ptr = conf; - ctx.tok.len = 0; - - // make a root table - if (0 == (ctx.root = CALLOC(1, sizeof(*ctx.root)))) { - e_outofmemory(&ctx, FLINE); - // Do not goto fail, root table not set up yet - return 0; - } - - // set root as default table - ctx.curtab = ctx.root; - - /* Scan forward until EOF */ - for (token_t tok = ctx.tok; ! tok.eof ; tok = ctx.tok) { - switch (tok.tok) { - - case NEWLINE: - if (next_token(&ctx, 1)) goto fail; - break; - - case STRING: - if (parse_keyval(&ctx, ctx.curtab)) goto fail; - - if (ctx.tok.tok != NEWLINE) { - e_syntax(&ctx, ctx.tok.lineno, "extra chars after value"); - goto fail; - } - - if (eat_token(&ctx, NEWLINE, 1, FLINE)) goto fail; - break; - - case LBRACKET: /* [ x.y.z ] or [[ x.y.z ]] */ - if (parse_select(&ctx)) goto fail; - break; - - default: - e_syntax(&ctx, tok.lineno, "syntax error"); - goto fail; - } - } - - /* success */ - for (int i = 0; i < ctx.tpath.top; i++) xfree(ctx.tpath.key[i]); - return ctx.root; - -fail: - // Something bad has happened. Free resources and return error. - for (int i = 0; i < ctx.tpath.top; i++) xfree(ctx.tpath.key[i]); - toml_free(ctx.root); - return 0; -} - - -toml_table_t* toml_parse_file(FILE* fp, - char* errbuf, - int errbufsz) -{ - int bufsz = 0; - char* buf = 0; - int off = 0; - - /* read from fp into buf */ - while (! feof(fp)) { - - if (off == bufsz) { - int xsz = bufsz + 1000; - char* x = expand(buf, bufsz, xsz); - if (!x) { - snprintf(errbuf, errbufsz, "out of memory"); - xfree(buf); - return 0; - } - buf = x; - bufsz = xsz; - } - - errno = 0; - int n = fread(buf + off, 1, bufsz - off, fp); - if (ferror(fp)) { - snprintf(errbuf, errbufsz, "%s", - errno ? strerror(errno) : "Error reading file"); - xfree(buf); - return 0; - } - off += n; - } - - /* tag on a NUL to cap the string */ - if (off == bufsz) { - int xsz = bufsz + 1; - char* x = expand(buf, bufsz, xsz); - if (!x) { - snprintf(errbuf, errbufsz, "out of memory"); - xfree(buf); - return 0; - } - buf = x; - bufsz = xsz; - } - buf[off] = 0; - - /* parse it, cleanup and finish */ - toml_table_t* ret = toml_parse(buf, errbuf, errbufsz); - xfree(buf); - return ret; -} - - -static void xfree_kval(toml_keyval_t* p) -{ - if (!p) return; - xfree(p->key); - xfree(p->val); - xfree(p); -} - -static void xfree_tab(toml_table_t* p); - -static void xfree_arr(toml_array_t* p) -{ - if (!p) return; - - xfree(p->key); - switch (p->kind) { - case 'v': - for (int i = 0; i < p->nelem; i++) xfree(p->u.val[i]); - xfree(p->u.val); - break; - - case 'a': - for (int i = 0; i < p->nelem; i++) xfree_arr(p->u.arr[i]); - xfree(p->u.arr); - break; - - case 't': - for (int i = 0; i < p->nelem; i++) xfree_tab(p->u.tab[i]); - xfree(p->u.tab); - break; - } - - xfree(p); -} - - -static void xfree_tab(toml_table_t* p) -{ - int i; - - if (!p) return; - - xfree(p->key); - - for (i = 0; i < p->nkval; i++) xfree_kval(p->kval[i]); - xfree(p->kval); - - for (i = 0; i < p->narr; i++) xfree_arr(p->arr[i]); - xfree(p->arr); - - for (i = 0; i < p->ntab; i++) xfree_tab(p->tab[i]); - xfree(p->tab); - - xfree(p); -} - - -void toml_free(toml_table_t* tab) -{ - xfree_tab(tab); -} - - -static void set_token(context_t* ctx, tokentype_t tok, int lineno, char* ptr, int len) -{ - token_t t; - t.tok = tok; - t.lineno = lineno; - t.ptr = ptr; - t.len = len; - t.eof = 0; - ctx->tok = t; -} - -static void set_eof(context_t* ctx, int lineno) -{ - set_token(ctx, NEWLINE, lineno, ctx->stop, 0); - ctx->tok.eof = 1; -} - - -/* Scan p for n digits compositing entirely of [0-9] */ -static int scan_digits(const char* p, int n) -{ - int ret = 0; - for ( ; n > 0 && isdigit(*p); n--, p++) { - ret = 10 * ret + (*p - '0'); - } - return n ? -1 : ret; -} - -static int scan_date(const char* p, int* YY, int* MM, int* DD) -{ - int year, month, day; - year = scan_digits(p, 4); - month = (year >= 0 && p[4] == '-') ? scan_digits(p+5, 2) : -1; - day = (month >= 0 && p[7] == '-') ? scan_digits(p+8, 2) : -1; - if (YY) *YY = year; - if (MM) *MM = month; - if (DD) *DD = day; - return (year >= 0 && month >= 0 && day >= 0) ? 0 : -1; -} - -static int scan_time(const char* p, int* hh, int* mm, int* ss) -{ - int hour, minute, second; - hour = scan_digits(p, 2); - minute = (hour >= 0 && p[2] == ':') ? scan_digits(p+3, 2) : -1; - second = (minute >= 0 && p[5] == ':') ? scan_digits(p+6, 2) : -1; - if (hh) *hh = hour; - if (mm) *mm = minute; - if (ss) *ss = second; - return (hour >= 0 && minute >= 0 && second >= 0) ? 0 : -1; -} - - -static int scan_string(context_t* ctx, char* p, int lineno, int dotisspecial) -{ - char* orig = p; - if (0 == strncmp(p, "'''", 3)) { - p = strstr(p + 3, "'''"); - if (0 == p) { - return e_syntax(ctx, lineno, "unterminated triple-s-quote"); - } - - set_token(ctx, STRING, lineno, orig, p + 3 - orig); - return 0; - } - - if (0 == strncmp(p, "\"\"\"", 3)) { - int hexreq = 0; /* #hex required */ - int escape = 0; - int qcnt = 0; /* count quote */ - for (p += 3; *p && qcnt < 3; p++) { - if (escape) { - escape = 0; - if (strchr("btnfr\"\\", *p)) continue; - if (*p == 'u') { hexreq = 4; continue; } - if (*p == 'U') { hexreq = 8; continue; } - if (p[strspn(p, " \t\r")] == '\n') continue; /* allow for line ending backslash */ - return e_syntax(ctx, lineno, "bad escape char"); - } - if (hexreq) { - hexreq--; - if (strchr("0123456789ABCDEF", *p)) continue; - return e_syntax(ctx, lineno, "expect hex char"); - } - if (*p == '\\') { escape = 1; continue; } - qcnt = (*p == '"') ? qcnt + 1 : 0; - } - if (qcnt != 3) { - return e_syntax(ctx, lineno, "unterminated triple-quote"); - } - - set_token(ctx, STRING, lineno, orig, p - orig); - return 0; - } - - if ('\'' == *p) { - for (p++; *p && *p != '\n' && *p != '\''; p++); - if (*p != '\'') { - return e_syntax(ctx, lineno, "unterminated s-quote"); - } - - set_token(ctx, STRING, lineno, orig, p + 1 - orig); - return 0; - } - - if ('\"' == *p) { - int hexreq = 0; /* #hex required */ - int escape = 0; - for (p++; *p; p++) { - if (escape) { - escape = 0; - if (strchr("btnfr\"\\", *p)) continue; - if (*p == 'u') { hexreq = 4; continue; } - if (*p == 'U') { hexreq = 8; continue; } - return e_syntax(ctx, lineno, "bad escape char"); - } - if (hexreq) { - hexreq--; - if (strchr("0123456789ABCDEF", *p)) continue; - return e_syntax(ctx, lineno, "expect hex char"); - } - if (*p == '\\') { escape = 1; continue; } - if (*p == '\n') break; - if (*p == '"') break; - } - if (*p != '"') { - return e_syntax(ctx, lineno, "unterminated quote"); - } - - set_token(ctx, STRING, lineno, orig, p + 1 - orig); - return 0; - } - - /* check for timestamp without quotes */ - if (0 == scan_date(p, 0, 0, 0) || 0 == scan_time(p, 0, 0, 0)) { - // forward thru the timestamp - for ( ; strchr("0123456789.:+-T Z", toupper(*p)); p++); - // squeeze out any spaces at end of string - for ( ; p[-1] == ' '; p--); - // tokenize - set_token(ctx, STRING, lineno, orig, p - orig); - return 0; - } - - /* literals */ - for ( ; *p && *p != '\n'; p++) { - int ch = *p; - if (ch == '.' && dotisspecial) break; - if ('A' <= ch && ch <= 'Z') continue; - if ('a' <= ch && ch <= 'z') continue; - if (strchr("0123456789+-_.", ch)) continue; - break; - } - - set_token(ctx, STRING, lineno, orig, p - orig); - return 0; -} - - -static int next_token(context_t* ctx, int dotisspecial) -{ - int lineno = ctx->tok.lineno; - char* p = ctx->tok.ptr; - int i; - - /* eat this tok */ - for (i = 0; i < ctx->tok.len; i++) { - if (*p++ == '\n') - lineno++; - } - - /* make next tok */ - while (p < ctx->stop) { - /* skip comment. stop just before the \n. */ - if (*p == '#') { - for (p++; p < ctx->stop && *p != '\n'; p++); - continue; - } - - if (dotisspecial && *p == '.') { - set_token(ctx, DOT, lineno, p, 1); - return 0; - } - - switch (*p) { - case ',': set_token(ctx, COMMA, lineno, p, 1); return 0; - case '=': set_token(ctx, EQUAL, lineno, p, 1); return 0; - case '{': set_token(ctx, LBRACE, lineno, p, 1); return 0; - case '}': set_token(ctx, RBRACE, lineno, p, 1); return 0; - case '[': set_token(ctx, LBRACKET, lineno, p, 1); return 0; - case ']': set_token(ctx, RBRACKET, lineno, p, 1); return 0; - case '\n': set_token(ctx, NEWLINE, lineno, p, 1); return 0; - case '\r': case ' ': case '\t': - /* ignore white spaces */ - p++; - continue; - } - - return scan_string(ctx, p, lineno, dotisspecial); - } - - set_eof(ctx, lineno); - return 0; -} - - -const char* toml_key_in(const toml_table_t* tab, int keyidx) -{ - if (keyidx < tab->nkval) return tab->kval[keyidx]->key; - - keyidx -= tab->nkval; - if (keyidx < tab->narr) return tab->arr[keyidx]->key; - - keyidx -= tab->narr; - if (keyidx < tab->ntab) return tab->tab[keyidx]->key; - - return 0; -} - -toml_raw_t toml_raw_in(const toml_table_t* tab, const char* key) -{ - int i; - for (i = 0; i < tab->nkval; i++) { - if (0 == strcmp(key, tab->kval[i]->key)) - return tab->kval[i]->val; - } - return 0; -} - -toml_array_t* toml_array_in(const toml_table_t* tab, const char* key) -{ - int i; - for (i = 0; i < tab->narr; i++) { - if (0 == strcmp(key, tab->arr[i]->key)) - return tab->arr[i]; - } - return 0; -} - - -toml_table_t* toml_table_in(const toml_table_t* tab, const char* key) -{ - int i; - for (i = 0; i < tab->ntab; i++) { - if (0 == strcmp(key, tab->tab[i]->key)) - return tab->tab[i]; - } - return 0; -} - -toml_raw_t toml_raw_at(const toml_array_t* arr, int idx) -{ - if (arr->kind != 'v') - return 0; - if (! (0 <= idx && idx < arr->nelem)) - return 0; - return arr->u.val[idx]; -} - -char toml_array_kind(const toml_array_t* arr) -{ - return arr->kind; -} - -char toml_array_type(const toml_array_t* arr) -{ - if (arr->kind != 'v') - return 0; - - if (arr->nelem == 0) - return 0; - - return arr->type; -} - - -int toml_array_nelem(const toml_array_t* arr) -{ - return arr->nelem; -} - -const char* toml_array_key(const toml_array_t* arr) -{ - return arr ? arr->key : (const char*) NULL; -} - -int toml_table_nkval(const toml_table_t* tab) -{ - return tab->nkval; -} - -int toml_table_narr(const toml_table_t* tab) -{ - return tab->narr; -} - -int toml_table_ntab(const toml_table_t* tab) -{ - return tab->ntab; -} - -const char* toml_table_key(const toml_table_t* tab) -{ - return tab ? tab->key : (const char*) NULL; -} - -toml_array_t* toml_array_at(const toml_array_t* arr, int idx) -{ - if (arr->kind != 'a') - return 0; - if (! (0 <= idx && idx < arr->nelem)) - return 0; - return arr->u.arr[idx]; -} - -toml_table_t* toml_table_at(const toml_array_t* arr, int idx) -{ - if (arr->kind != 't') - return 0; - if (! (0 <= idx && idx < arr->nelem)) - return 0; - return arr->u.tab[idx]; -} - - -int toml_rtots(toml_raw_t src_, toml_timestamp_t* ret) -{ - if (! src_) return -1; - - const char* p = src_; - int must_parse_time = 0; - - memset(ret, 0, sizeof(*ret)); - - int* year = &ret->__buffer.year; - int* month = &ret->__buffer.month; - int* day = &ret->__buffer.day; - int* hour = &ret->__buffer.hour; - int* minute = &ret->__buffer.minute; - int* second = &ret->__buffer.second; - int* millisec = &ret->__buffer.millisec; - - /* parse date YYYY-MM-DD */ - if (0 == scan_date(p, year, month, day)) { - ret->year = year; - ret->month = month; - ret->day = day; - - p += 10; - if (*p) { - // parse the T or space separator - if (*p != 'T' && *p != ' ') return -1; - must_parse_time = 1; - p++; - } - } - - /* parse time HH:MM:SS */ - if (0 == scan_time(p, hour, minute, second)) { - ret->hour = hour; - ret->minute = minute; - ret->second = second; - - /* optionally, parse millisec */ - p += 8; - if (*p == '.') { - char* qq; - p++; - errno = 0; - *millisec = strtol(p, &qq, 0); - if (errno) { - return -1; - } - while (*millisec > 999) { - *millisec /= 10; - } - - ret->millisec = millisec; - p = qq; - } - - if (*p) { - /* parse and copy Z */ - char* z = ret->__buffer.z; - ret->z = z; - if (*p == 'Z' || *p == 'z') { - *z++ = 'Z'; p++; - *z = 0; - - } else if (*p == '+' || *p == '-') { - *z++ = *p++; - - if (! (isdigit(p[0]) && isdigit(p[1]))) return -1; - *z++ = *p++; - *z++ = *p++; - - if (*p == ':') { - *z++ = *p++; - - if (! (isdigit(p[0]) && isdigit(p[1]))) return -1; - *z++ = *p++; - *z++ = *p++; - } - - *z = 0; - } - } - } - if (*p != 0) - return -1; - - if (must_parse_time && !ret->hour) - return -1; - - return 0; -} - - -/* Raw to boolean */ -int toml_rtob(toml_raw_t src, int* ret_) -{ - if (!src) return -1; - int dummy; - int* ret = ret_ ? ret_ : &dummy; - - if (0 == strcmp(src, "true")) { - *ret = 1; - return 0; - } - if (0 == strcmp(src, "false")) { - *ret = 0; - return 0; - } - return -1; -} - - -/* Raw to integer */ -int toml_rtoi(toml_raw_t src, int64_t* ret_) -{ - if (!src) return -1; - - char buf[100]; - char* p = buf; - char* q = p + sizeof(buf); - const char* s = src; - int base = 0; - int64_t dummy; - int64_t* ret = ret_ ? ret_ : &dummy; - - - /* allow +/- */ - if (s[0] == '+' || s[0] == '-') - *p++ = *s++; - - /* disallow +_100 */ - if (s[0] == '_') - return -1; - - /* if 0 ... */ - if ('0' == s[0]) { - switch (s[1]) { - case 'x': base = 16; s += 2; break; - case 'o': base = 8; s += 2; break; - case 'b': base = 2; s += 2; break; - case '\0': return *ret = 0, 0; - default: - /* ensure no other digits after it */ - if (s[1]) return -1; - } - } - - /* just strip underscores and pass to strtoll */ - while (*s && p < q) { - int ch = *s++; - switch (ch) { - case '_': - // disallow '__' - if (s[0] == '_') return -1; - continue; /* skip _ */ - default: - break; - } - *p++ = ch; - } - if (*s || p == q) return -1; - - /* last char cannot be '_' */ - if (s[-1] == '_') return -1; - - /* cap with NUL */ - *p = 0; - - /* Run strtoll on buf to get the integer */ - char* endp; - errno = 0; - *ret = strtoll(buf, &endp, base); - return (errno || *endp) ? -1 : 0; -} - - -int toml_rtod_ex(toml_raw_t src, double* ret_, char* buf, int buflen) -{ - if (!src) return -1; - - char* p = buf; - char* q = p + buflen; - const char* s = src; - double dummy; - double* ret = ret_ ? ret_ : &dummy; - - - /* allow +/- */ - if (s[0] == '+' || s[0] == '-') - *p++ = *s++; - - /* disallow +_1.00 */ - if (s[0] == '_') - return -1; - - /* disallow +.99 */ - if (s[0] == '.') - return -1; - - /* zero must be followed by . or 'e', or NUL */ - if (s[0] == '0' && s[1] && !strchr("eE.", s[1])) - return -1; - - /* just strip underscores and pass to strtod */ - while (*s && p < q) { - int ch = *s++; - switch (ch) { - case '.': - if (s[-2] == '_') return -1; - if (s[0] == '_') return -1; - break; - case '_': - // disallow '__' - if (s[0] == '_') return -1; - continue; /* skip _ */ - default: - break; - } - *p++ = ch; - } - if (*s || p == q) return -1; /* reached end of string or buffer is full? */ - - /* last char cannot be '_' */ - if (s[-1] == '_') return -1; - - if (p != buf && p[-1] == '.') - return -1; /* no trailing zero */ - - /* cap with NUL */ - *p = 0; - - /* Run strtod on buf to get the value */ - char* endp; - errno = 0; - *ret = strtod(buf, &endp); - return (errno || *endp) ? -1 : 0; -} - -int toml_rtod(toml_raw_t src, double* ret_) -{ - char buf[100]; - return toml_rtod_ex(src, ret_, buf, sizeof(buf)); -} - - - - -int toml_rtos(toml_raw_t src, char** ret) -{ - int multiline = 0; - const char* sp; - const char* sq; - - *ret = 0; - if (!src) return -1; - - int qchar = src[0]; - int srclen = strlen(src); - if (! (qchar == '\'' || qchar == '"')) { - return -1; - } - - // triple quotes? - if (qchar == src[1] && qchar == src[2]) { - multiline = 1; - sp = src + 3; - sq = src + srclen - 3; - /* last 3 chars in src must be qchar */ - if (! (sp <= sq && sq[0] == qchar && sq[1] == qchar && sq[2] == qchar)) - return -1; - - /* skip new line immediate after qchar */ - if (sp[0] == '\n') - sp++; - else if (sp[0] == '\r' && sp[1] == '\n') - sp += 2; - - } else { - sp = src + 1; - sq = src + srclen - 1; - /* last char in src must be qchar */ - if (! (sp <= sq && *sq == qchar)) - return -1; - } - - if (qchar == '\'') { - *ret = norm_lit_str(sp, sq - sp, - multiline, - 0, 0); - } else { - *ret = norm_basic_str(sp, sq - sp, - multiline, - 0, 0); - } - - return *ret ? 0 : -1; -} - - -toml_datum_t toml_string_at(const toml_array_t* arr, int idx) -{ - toml_datum_t ret; - memset(&ret, 0, sizeof(ret)); - ret.ok = (0 == toml_rtos(toml_raw_at(arr, idx), &ret.u.s)); - return ret; -} - -toml_datum_t toml_bool_at(const toml_array_t* arr, int idx) -{ - toml_datum_t ret; - memset(&ret, 0, sizeof(ret)); - ret.ok = (0 == toml_rtob(toml_raw_at(arr, idx), &ret.u.b)); - return ret; -} - -toml_datum_t toml_int_at(const toml_array_t* arr, int idx) -{ - toml_datum_t ret; - memset(&ret, 0, sizeof(ret)); - ret.ok = (0 == toml_rtoi(toml_raw_at(arr, idx), &ret.u.i)); - return ret; -} - -toml_datum_t toml_double_at(const toml_array_t* arr, int idx) -{ - toml_datum_t ret; - memset(&ret, 0, sizeof(ret)); - ret.ok = (0 == toml_rtod(toml_raw_at(arr, idx), &ret.u.d)); - return ret; -} - -toml_datum_t toml_timestamp_at(const toml_array_t* arr, int idx) -{ - toml_timestamp_t ts; - toml_datum_t ret; - memset(&ret, 0, sizeof(ret)); - ret.ok = (0 == toml_rtots(toml_raw_at(arr, idx), &ts)); - if (ret.ok) { - ret.ok = !!(ret.u.ts = malloc(sizeof(*ret.u.ts))); - if (ret.ok) { - *ret.u.ts = ts; - } - } - return ret; -} - -toml_datum_t toml_string_in(const toml_table_t* arr, const char* key) -{ - toml_datum_t ret; - memset(&ret, 0, sizeof(ret)); - toml_raw_t raw = toml_raw_in(arr, key); - if (raw) { - ret.ok = (0 == toml_rtos(raw, &ret.u.s)); - } - return ret; -} - -toml_datum_t toml_bool_in(const toml_table_t* arr, const char* key) -{ - toml_datum_t ret; - memset(&ret, 0, sizeof(ret)); - ret.ok = (0 == toml_rtob(toml_raw_in(arr, key), &ret.u.b)); - return ret; -} - -toml_datum_t toml_int_in(const toml_table_t* arr, const char* key) -{ - toml_datum_t ret; - memset(&ret, 0, sizeof(ret)); - ret.ok = (0 == toml_rtoi(toml_raw_in(arr, key), &ret.u.i)); - return ret; -} - -toml_datum_t toml_double_in(const toml_table_t* arr, const char* key) -{ - toml_datum_t ret; - memset(&ret, 0, sizeof(ret)); - ret.ok = (0 == toml_rtod(toml_raw_in(arr, key), &ret.u.d)); - return ret; -} - -toml_datum_t toml_timestamp_in(const toml_table_t* arr, const char* key) -{ - toml_timestamp_t ts; - toml_datum_t ret; - memset(&ret, 0, sizeof(ret)); - ret.ok = (0 == toml_rtots(toml_raw_in(arr, key), &ts)); - if (ret.ok) { - ret.ok = !!(ret.u.ts = malloc(sizeof(*ret.u.ts))); - if (ret.ok) { - *ret.u.ts = ts; - } - } - return ret; -} diff --git a/instrumentation/nginx/src/toml.h b/instrumentation/nginx/src/toml.h deleted file mode 100644 index 19f6f6469..000000000 --- a/instrumentation/nginx/src/toml.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - MIT License - - Copyright (c) 2017 - 2019 CK Tan - https://github.com/cktan/tomlc99 - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. -*/ -#ifndef TOML_H -#define TOML_H - - -#include -#include - - -#ifdef __cplusplus -#define TOML_EXTERN extern "C" -#else -#define TOML_EXTERN extern -#endif - -typedef struct toml_timestamp_t toml_timestamp_t; -typedef struct toml_table_t toml_table_t; -typedef struct toml_array_t toml_array_t; -typedef struct toml_datum_t toml_datum_t; - -/* Parse a file. Return a table on success, or 0 otherwise. - * Caller must toml_free(the-return-value) after use. - */ -TOML_EXTERN toml_table_t* toml_parse_file(FILE* fp, - char* errbuf, - int errbufsz); - -/* Parse a string containing the full config. - * Return a table on success, or 0 otherwise. - * Caller must toml_free(the-return-value) after use. - */ -TOML_EXTERN toml_table_t* toml_parse(char* conf, /* NUL terminated, please. */ - char* errbuf, - int errbufsz); - -/* Free the table returned by toml_parse() or toml_parse_file(). Once - * this function is called, any handles accessed through this tab - * directly or indirectly are no longer valid. - */ -TOML_EXTERN void toml_free(toml_table_t* tab); - - -/* Timestamp types. The year, month, day, hour, minute, second, z - * fields may be NULL if they are not relevant. e.g. In a DATE - * type, the hour, minute, second and z fields will be NULLs. - */ -struct toml_timestamp_t { - struct { /* internal. do not use. */ - int year, month, day; - int hour, minute, second, millisec; - char z[10]; - } __buffer; - int *year, *month, *day; - int *hour, *minute, *second, *millisec; - char* z; -}; - - -/*----------------------------------------------------------------- - * Enhanced access methods - */ -struct toml_datum_t { - int ok; - union { - toml_timestamp_t* ts; /* ts must be freed after use */ - char* s; /* string value. s must be freed after use */ - int b; /* bool value */ - int64_t i; /* int value */ - double d; /* double value */ - } u; -}; - -/* on arrays: */ -/* ... retrieve size of array. */ -TOML_EXTERN int toml_array_nelem(const toml_array_t* arr); -/* ... retrieve values using index. */ -TOML_EXTERN toml_datum_t toml_string_at(const toml_array_t* arr, int idx); -TOML_EXTERN toml_datum_t toml_bool_at(const toml_array_t* arr, int idx); -TOML_EXTERN toml_datum_t toml_int_at(const toml_array_t* arr, int idx); -TOML_EXTERN toml_datum_t toml_double_at(const toml_array_t* arr, int idx); -TOML_EXTERN toml_datum_t toml_timestamp_at(const toml_array_t* arr, int idx); -/* ... retrieve array or table using index. */ -TOML_EXTERN toml_array_t* toml_array_at(const toml_array_t* arr, int idx); -TOML_EXTERN toml_table_t* toml_table_at(const toml_array_t* arr, int idx); - -/* on tables: */ -/* ... retrieve the key in table at keyidx. Return 0 if out of range. */ -TOML_EXTERN const char* toml_key_in(const toml_table_t* tab, int keyidx); -/* ... retrieve values using key. */ -TOML_EXTERN toml_datum_t toml_string_in(const toml_table_t* arr, const char* key); -TOML_EXTERN toml_datum_t toml_bool_in(const toml_table_t* arr, const char* key); -TOML_EXTERN toml_datum_t toml_int_in(const toml_table_t* arr, const char* key); -TOML_EXTERN toml_datum_t toml_double_in(const toml_table_t* arr, const char* key); -TOML_EXTERN toml_datum_t toml_timestamp_in(const toml_table_t* arr, const char* key); -/* .. retrieve array or table using key. */ -TOML_EXTERN toml_array_t* toml_array_in(const toml_table_t* tab, - const char* key); -TOML_EXTERN toml_table_t* toml_table_in(const toml_table_t* tab, - const char* key); - -/*----------------------------------------------------------------- - * lesser used - */ -/* Return the array kind: 't'able, 'a'rray, 'v'alue */ -TOML_EXTERN char toml_array_kind(const toml_array_t* arr); - -/* For array kind 'v'alue, return the type of values - i:int, d:double, b:bool, s:string, t:time, D:date, T:timestamp - 0 if unknown -*/ -TOML_EXTERN char toml_array_type(const toml_array_t* arr); - -/* Return the key of an array */ -TOML_EXTERN const char* toml_array_key(const toml_array_t* arr); - -/* Return the number of key-values in a table */ -TOML_EXTERN int toml_table_nkval(const toml_table_t* tab); - -/* Return the number of arrays in a table */ -TOML_EXTERN int toml_table_narr(const toml_table_t* tab); - -/* Return the number of sub-tables in a table */ -TOML_EXTERN int toml_table_ntab(const toml_table_t* tab); - -/* Return the key of a table*/ -TOML_EXTERN const char* toml_table_key(const toml_table_t* tab); - -/*-------------------------------------------------------------- - * misc - */ -TOML_EXTERN int toml_utf8_to_ucs(const char* orig, int len, int64_t* ret); -TOML_EXTERN int toml_ucs_to_utf8(int64_t code, char buf[6]); -TOML_EXTERN void toml_set_memutil(void* (*xxmalloc)(size_t), - void (*xxfree)(void*)); - - -/*-------------------------------------------------------------- - * deprecated - */ -/* A raw value, must be processed by toml_rto* before using. */ -typedef const char* toml_raw_t; -TOML_EXTERN toml_raw_t toml_raw_in(const toml_table_t* tab, const char* key); -TOML_EXTERN toml_raw_t toml_raw_at(const toml_array_t* arr, int idx); -TOML_EXTERN int toml_rtos(toml_raw_t s, char** ret); -TOML_EXTERN int toml_rtob(toml_raw_t s, int* ret); -TOML_EXTERN int toml_rtoi(toml_raw_t s, int64_t* ret); -TOML_EXTERN int toml_rtod(toml_raw_t s, double* ret); -TOML_EXTERN int toml_rtod_ex(toml_raw_t s, double* ret, char* buf, int buflen); -TOML_EXTERN int toml_rtots(toml_raw_t s, toml_timestamp_t* ret); - - -#endif /* TOML_H */ diff --git a/instrumentation/nginx/src/trace_context.cpp b/instrumentation/nginx/src/trace_context.cpp index 6b935695b..2491e69dc 100644 --- a/instrumentation/nginx/src/trace_context.cpp +++ b/instrumentation/nginx/src/trace_context.cpp @@ -1,5 +1,4 @@ #include "trace_context.h" -#include "nginx_utils.h" static TraceHeader* FindEmptyOrExistingSlot(TraceContext* context, opentelemetry::nostd::string_view traceType) { diff --git a/instrumentation/nginx/src/trace_context.h b/instrumentation/nginx/src/trace_context.h index de128e024..2d4aeca56 100644 --- a/instrumentation/nginx/src/trace_context.h +++ b/instrumentation/nginx/src/trace_context.h @@ -1,8 +1,6 @@ #pragma once #include -#include -#include "script.h" extern "C" { #include diff --git a/instrumentation/nginx/test/Dockerfile b/instrumentation/nginx/test/Dockerfile new file mode 100644 index 000000000..c466eb5f4 --- /dev/null +++ b/instrumentation/nginx/test/Dockerfile @@ -0,0 +1,70 @@ +ARG image=ubuntu:24.04 +FROM $image AS build + +ARG otel_cpp_version=1.17.0 +ARG nginx_version=1.27.3 + +RUN apt-get update && \ + apt-get install --no-install-recommends --no-install-suggests -y \ + build-essential \ + autoconf \ + libtool \ + pkg-config \ + ca-certificates \ + gcc \ + g++ \ + git \ + cmake \ + libcurl4-openssl-dev \ + libpcre3-dev \ + curl \ + apt-transport-https \ + software-properties-common \ + zlib1g-dev \ + libprotobuf-dev \ + protobuf-compiler + +RUN git clone --shallow-submodules --depth 1 --recurse-submodules -b v$otel_cpp_version \ + https://github.com/open-telemetry/opentelemetry-cpp.git \ + && cd opentelemetry-cpp \ + && mkdir build \ + && cd build \ + && cmake -DCMAKE_BUILD_TYPE=Release \ + -DWITH_OTLP_HTTP=ON \ + -DWITH_OTLP_GRPC=OFF \ + -DWITH_PROMETHEUS=OFF \ + -DWITH_ELASTICSEARCH=OFF \ + -DBUILD_TESTING=OFF \ + -DWITH_EXAMPLES=OFF \ + -DWITH_FUNC_TESTS=OFF \ + -DOPENTELEMETRY_INSTALL=ON \ + -DCMAKE_CXX_STANDARD=17 \ + -DBUILD_SHARED_LIBS=OFF \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + .. \ + && cmake --build . --target all -j $(nproc) \ + && cmake --install . --prefix /opentelemetry-cpp/install + +RUN mkdir -p otel-nginx/build && mkdir -p otel-nginx/src +COPY src otel-nginx/src/ +COPY CMakeLists.txt nginx.cmake otel-nginx/ +RUN cd otel-nginx/build \ + && cmake -DCMAKE_BUILD_TYPE=Release \ + -DNGINX_VERSION=$nginx_version \ + -DCMAKE_PREFIX_PATH=/opentelemetry-cpp/install \ + -DCMAKE_INSTALL_PREFIX=/otel-nginx/install \ + .. \ + && make -j$(nproc) \ + && make install + +FROM scratch AS export +COPY --from=build /otel-nginx/install/otel_ngx_module.so . + +FROM build AS run + +COPY test/conf/nginx.conf /otel-nginx/nginx.conf +RUN curl -O https://nginx.org/download/nginx-$nginx_version.tar.gz && tar xf nginx-$nginx_version.tar.gz +RUN cd nginx-$nginx_version && ./configure --with-compat --prefix=/nginx && make -j$(nproc) && make install + + +CMD ["/nginx/sbin/nginx", "-c", "/otel-nginx/nginx.conf"] diff --git a/instrumentation/nginx/test/Dockerfile_alpine b/instrumentation/nginx/test/Dockerfile_alpine new file mode 100644 index 000000000..ab5dba2fe --- /dev/null +++ b/instrumentation/nginx/test/Dockerfile_alpine @@ -0,0 +1,54 @@ +ARG image=alpine:3.20 +FROM $image AS build + +ARG otel_cpp_version=1.17.0 +ARG nginx_version=1.27.3 + +RUN apk update && \ + apk add git cmake build-base curl curl-dev protobuf-dev pcre2-dev + +RUN git clone --shallow-submodules --depth 1 --recurse-submodules -b v$otel_cpp_version \ + https://github.com/open-telemetry/opentelemetry-cpp.git \ + && cd opentelemetry-cpp \ + && mkdir build \ + && cd build \ + && cmake -DCMAKE_BUILD_TYPE=Release \ + -DWITH_OTLP_HTTP=ON \ + -DWITH_OTLP_GRPC=OFF \ + -DWITH_PROMETHEUS=OFF \ + -DWITH_ELASTICSEARCH=OFF \ + -DBUILD_TESTING=OFF \ + -DWITH_EXAMPLES=OFF \ + -DWITH_FUNC_TESTS=OFF \ + -DOPENTELEMETRY_INSTALL=ON \ + -DCMAKE_CXX_STANDARD=17 \ + -DBUILD_SHARED_LIBS=OFF \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DWITH_ABSEIL=ON \ + .. \ + && cmake --build . --target all -j $(nproc) \ + && cmake --install . --prefix /opentelemetry-cpp/install + +RUN mkdir -p otel-nginx/build && mkdir -p otel-nginx/src +COPY src otel-nginx/src/ +COPY CMakeLists.txt nginx.cmake otel-nginx/ +RUN cd otel-nginx/build \ + && cmake -DCMAKE_BUILD_TYPE=Release \ + -DNGINX_VERSION=$nginx_version \ + -DWITH_ABSEIL=ON \ + -DCMAKE_PREFIX_PATH=/opentelemetry-cpp/install \ + -DCMAKE_INSTALL_PREFIX=/otel-nginx/install \ + .. \ + && make -j$(nproc) \ + && make install + +FROM scratch AS export +COPY --from=build /otel-nginx/install/otel_ngx_module.so . + +FROM build AS run + +COPY test/conf/nginx.conf /otel-nginx/nginx.conf +RUN curl -O https://nginx.org/download/nginx-$nginx_version.tar.gz && tar xf nginx-$nginx_version.tar.gz +RUN cd nginx-$nginx_version && ./configure --with-compat --prefix=/nginx && make -j$(nproc) && make install + +CMD ["/nginx/sbin/nginx", "-c", "/otel-nginx/nginx.conf"] diff --git a/instrumentation/nginx/test/Dockerfile_amazonlinux b/instrumentation/nginx/test/Dockerfile_amazonlinux new file mode 100644 index 000000000..79e03bbec --- /dev/null +++ b/instrumentation/nginx/test/Dockerfile_amazonlinux @@ -0,0 +1,67 @@ +ARG image=amazonlinux:2 +FROM $image AS build + +ARG otel_cpp_version=1.17.0 +ARG nginx_version=1.27.3 + +RUN yum check-update && \ + yum install -y git gcc-c++ make tar curl-devel unzip zlib-devel pcre2-devel + +RUN curl -LO https://github.com/Kitware/CMake/releases/download/v3.31.1/cmake-3.31.1-linux-x86_64.tar.gz && tar xf cmake-3.31.1-linux-x86_64.tar.gz && mv cmake-3.31.1-linux-x86_64 cmake + +RUN git clone --shallow-submodules --depth 1 --recurse-submodules -b v21.12 https://github.com/protocolbuffers/protobuf.git +RUN cd protobuf \ + && mkdir build \ + && cd build \ + && /cmake/bin/cmake \ + -DCMAKE_INSTALL_PREFIX=/protobuf/install \ + -Dprotobuf_BUILD_TESTS=OFF \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON .. \ + .. \ + && /cmake/bin/cmake --build . --config Release -j$(nproc) \ + && /cmake/bin/cmake --install . + +RUN git clone --shallow-submodules --depth 1 --recurse-submodules -b v$otel_cpp_version \ + https://github.com/open-telemetry/opentelemetry-cpp.git \ + && cd opentelemetry-cpp \ + && mkdir build \ + && cd build \ + && /cmake/bin/cmake -DCMAKE_BUILD_TYPE=Release \ + -DWITH_OTLP_HTTP=ON \ + -DWITH_OTLP_GRPC=OFF \ + -DWITH_PROMETHEUS=OFF \ + -DWITH_ELASTICSEARCH=OFF \ + -DBUILD_TESTING=OFF \ + -DWITH_EXAMPLES=OFF \ + -DWITH_FUNC_TESTS=OFF \ + -DOPENTELEMETRY_INSTALL=ON \ + -DCMAKE_CXX_STANDARD=14 \ + -DBUILD_SHARED_LIBS=OFF \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DCMAKE_PREFIX_PATH=/protobuf/install \ + .. \ + && /cmake/bin/cmake --build . --target all -j$(nproc) \ + && /cmake/bin/cmake --install . --prefix /opentelemetry-cpp/install + +RUN mkdir -p otel-nginx/build && mkdir -p otel-nginx/src +COPY src otel-nginx/src/ +COPY CMakeLists.txt nginx.cmake otel-nginx/ +RUN cd otel-nginx/build \ + && /cmake/bin/cmake -DCMAKE_BUILD_TYPE=Release \ + -DNGINX_VERSION=$nginx_version \ + -DCMAKE_PREFIX_PATH="/opentelemetry-cpp/install;/protobuf/install" \ + -DCMAKE_INSTALL_PREFIX=/otel-nginx/install \ + .. \ + && make -j$(nproc) \ + && make install + +FROM scratch AS export +COPY --from=build /otel-nginx/install/otel_ngx_module.so . + +FROM build AS run + +COPY test/conf/nginx.conf /otel-nginx/nginx.conf +RUN curl -O https://nginx.org/download/nginx-$nginx_version.tar.gz && tar xf nginx-$nginx_version.tar.gz +RUN cd nginx-$nginx_version && ./configure --with-compat --prefix=/nginx && make -j$(nproc) && make install + +CMD ["/nginx/sbin/nginx", "-c", "/otel-nginx/nginx.conf"] diff --git a/instrumentation/nginx/test/backend/simple_express/Dockerfile b/instrumentation/nginx/test/backend/simple_express/Dockerfile index e76dd6b73..9b1d68217 100644 --- a/instrumentation/nginx/test/backend/simple_express/Dockerfile +++ b/instrumentation/nginx/test/backend/simple_express/Dockerfile @@ -1,4 +1,4 @@ -FROM node:14-alpine +FROM node:20-alpine COPY package.json package-lock.json index.js / RUN npm install --production diff --git a/instrumentation/nginx/test/conf/collector.yml b/instrumentation/nginx/test/conf/collector.yml index 9580a6a93..caaf00f6f 100644 --- a/instrumentation/nginx/test/conf/collector.yml +++ b/instrumentation/nginx/test/conf/collector.yml @@ -1,17 +1,21 @@ receivers: otlp: protocols: - grpc: + http: + endpoint: 0.0.0.0:4318 exporters: - logging: - logLevel: debug + debug: + verbosity: detailed file: path: /trace.json +extensions: + health_check: + endpoint: 0.0.0.0:13133 processors: batch: service: + extensions: [health_check] pipelines: traces: receivers: [otlp] - processors: [batch] - exporters: [logging, file] + exporters: [debug, file] diff --git a/instrumentation/nginx/test/conf/nginx.conf b/instrumentation/nginx/test/conf/nginx.conf index 8da508660..f77f38005 100644 --- a/instrumentation/nginx/test/conf/nginx.conf +++ b/instrumentation/nginx/test/conf/nginx.conf @@ -1,9 +1,12 @@ -load_module /usr/share/nginx/modules/otel_ngx_module.so; +load_module /otel-nginx/install/otel_ngx_module.so; +daemon off; events {} http { - opentelemetry_config /conf/otel-nginx.toml; + opentelemetry_service_name "nginx-proxy"; + opentelemetry_otlp_traces_endpoint "http://collector:4318/v1/traces"; + opentelemetry_span_processor "simple"; opentelemetry_operation_name otel_test; opentelemetry_ignore_paths ignored.php; access_log stderr; @@ -14,7 +17,7 @@ http { } server { - listen 8000; + listen 8080; server_name otel_test; root /var/www/html; diff --git a/instrumentation/nginx/test/conf/otel-nginx.toml b/instrumentation/nginx/test/conf/otel-nginx.toml deleted file mode 100644 index 2493539a9..000000000 --- a/instrumentation/nginx/test/conf/otel-nginx.toml +++ /dev/null @@ -1,19 +0,0 @@ -exporter = "otlp" -processor = "simple" - -[exporters.otlp] -host = "collector" -port = 4317 - -[exporters.jaeger] -host = "localhost" -port = 9090 -transport = "thrift_udp" - -[processors.batch] -max_queue_size = 2048 -schedule_delay_millis = 5000 -max_export_batch_size = 512 - -[service] -name = "nginx-proxy" diff --git a/instrumentation/nginx/test/docker-compose.yml b/instrumentation/nginx/test/docker-compose.yml index 6bfd3fb52..57a9bff05 100644 --- a/instrumentation/nginx/test/docker-compose.yml +++ b/instrumentation/nginx/test/docker-compose.yml @@ -1,28 +1,38 @@ -version: "3" services: collector: - image: otel/opentelemetry-collector:0.26.0 + image: otel/opentelemetry-collector:0.113.0 + command: ["--config=/etc/otel/config.yml"] volumes: - - ${TEST_ROOT:-.}/conf/collector.yml:/etc/otel/config.yaml + - ${TEST_ROOT:-.}/conf/collector.yml:/etc/otel/config.yml - ${TEST_ROOT:-.}/data/trace.json:/trace.json + ports: + - "4318:4318" + - "13133:13133" # health check nginx: image: otel-nginx-test/nginx:latest volumes: - - ${TEST_ROOT:-.}/conf/nginx.conf:/etc/nginx/nginx.conf + - ${TEST_ROOT:-.}/conf/nginx.conf:/otel-nginx/nginx.conf - ${TEST_ROOT:-.}/conf/fastcgi_params:/etc/nginx/fastcgi_params - - ${TEST_ROOT:-.}/conf/otel-nginx.toml:/conf/otel-nginx.toml - ${TEST_ROOT:-.}/backend/files:/var/www/html/files + environment: + - OTEL_EXPORTER_OTLP_ENDPOINT=collector:4318/v1/traces ports: - - "8000:8000" + - "8080:8080" command: - - /usr/sbin/nginx - - -g - - daemon off; + - /nginx/sbin/nginx + - -c + - /otel-nginx/nginx.conf + depends_on: + - node-backend + - php-backend + - collector node-backend: image: otel-nginx-test/express-backend:latest command: node index.js volumes: - ${TEST_ROOT:-.}/backend/simple_express:/app + ports: + - "3500:3500" php-backend: image: bitnami/php-fpm:7.4-prod volumes: diff --git a/instrumentation/nginx/test/instrumentation/lib/TraceProto.ex b/instrumentation/nginx/test/instrumentation/lib/TraceProto.ex new file mode 100644 index 000000000..7d164556c --- /dev/null +++ b/instrumentation/nginx/test/instrumentation/lib/TraceProto.ex @@ -0,0 +1,8 @@ +defmodule TraceProto do + defmodule SpanKind do + def unspecified, do: 0 + def internal, do: 1 + def server, do: 2 + def client, do: 3 + end +end diff --git a/instrumentation/nginx/test/instrumentation/lib/mix/tasks/dockerfiles.ex b/instrumentation/nginx/test/instrumentation/lib/mix/tasks/dockerfiles.ex deleted file mode 100644 index c3300137c..000000000 --- a/instrumentation/nginx/test/instrumentation/lib/mix/tasks/dockerfiles.ex +++ /dev/null @@ -1,223 +0,0 @@ -defmodule Mix.Tasks.Dockerfiles do - use Mix.Task - - @grpc_version "v1.49.2" - @otel_cpp_version "v1.8.1" - - def run([out_dir | combos]) do - out_dir_abs = Path.expand(out_dir) - - Enum.map(combos, fn v -> gen_dockerfile(v) end) - |> Enum.each(fn {job, content} -> - out_path = Path.join(out_dir_abs, filename(job)) - File.write!(out_path, content) - end) - end - - defp filename(%{os: os, version: version, nginx: nginx}) do - "Dockerfile.#{os}-#{version}.#{nginx}" - end - - defp parse_job(identifier) do - [distro, nginx] = String.split(identifier, ":") - [os, version] = String.split(distro, "-") - [ver_major, ver_minor] = String.split(version, ".") |> Enum.map(&String.to_integer/1) - %{os: os, version: version, nginx: nginx, version_major: ver_major, version_minor: ver_minor} - end - - defp gen_dockerfile(identifier) do - job = parse_job(identifier) - - {job, - Enum.join( - [ - header(job), - apt_install_base_pkgs(job), - custom_cmake(job), - custom_nginx(job), - apt_install_custom_pkgs(job), - build_steps(job) - ], - "\n" - )} - end - - defp header(%{os: os, version: version}) do - """ - ARG image=#{os}:#{version} - FROM $image AS build - """ - end - - defp default_packages() do - [ - "build-essential", - "autoconf", - "libtool", - "pkg-config", - "ca-certificates", - "gcc", - "g++", - "git", - "libcurl4-openssl-dev", - "libpcre3-dev", - "gnupg2", - "lsb-release", - "curl", - "apt-transport-https", - "software-properties-common", - "zlib1g-dev" - ] - end - - defp base_packages(%{nginx: "mainline"}), do: default_packages() - defp base_packages(%{nginx: "stable", version_major: ver_maj}) when ver_maj >= 20 do - ["nginx" | default_packages()] - end - defp base_packages(_), do: default_packages() - - defp base_packages_for_version(%{version_major: major}) when major >= 20, do: ["cmake"] - defp base_packages_for_version(_), do: [] - - defp custom_cmake(%{version_major: major}) when major >= 20, do: "" - - defp custom_cmake(%{os: "debian"}), do: "" - - defp custom_cmake(%{os: "ubuntu"}) do - """ - RUN curl -o /etc/apt/trusted.gpg.d/kitware.asc https://apt.kitware.com/keys/kitware-archive-latest.asc \\ - && apt-add-repository "deb https://apt.kitware.com/ubuntu/ `lsb_release -cs` main" - """ - end - - defp mainline_apt(%{os: "debian"}), do: "http://nginx.org/packages/mainline/debian" - defp mainline_apt(%{os: "ubuntu"}), do: "http://nginx.org/packages/mainline/ubuntu" - defp stable_apt(%{os: "debian"}), do: "http://nginx.org/packages/debian" - defp stable_apt(%{os: "ubuntu"}), do: "http://nginx.org/packages/ubuntu" - - defp custom_nginx_step(apt_url) do - """ - RUN curl -o /etc/apt/trusted.gpg.d/nginx_signing.asc https://nginx.org/keys/nginx_signing.key \\ - && apt-add-repository "deb #{apt_url} `lsb_release -cs` nginx" \\ - && /bin/bash -c 'echo -e "Package: *\\nPin: origin nginx.org\\nPin: release o=nginx\\nPin-Priority: 900"' | tee /etc/apt/preferences.d/99nginx - """ - end - - defp custom_nginx(%{nginx: "stable", os: "debian"} = job) do - custom_nginx_step(stable_apt(job)) - end - defp custom_nginx(%{nginx: "mainline", os: "debian"} = job) do - custom_nginx_step(mainline_apt(job)) - end - - defp custom_nginx(%{nginx: "mainline"} = job) do - custom_nginx_step(mainline_apt(job)) - end - - defp custom_nginx(%{nginx: "stable", os: "ubuntu", version_major: 18} = job) do - custom_nginx_step(stable_apt(job)) - end - - defp custom_nginx(_), do: "" - - defp custom_packages_for_version(%{os: "ubuntu", nginx: "stable", version_major: 18}) do - ["cmake", "nginx"] - end - defp custom_packages_for_version(%{version_major: ver_major}) when ver_major < 20, do: ["cmake"] - defp custom_packages_for_version(_), do: [] - - defp custom_packages(%{os: "debian"}) do - ["cmake", "nginx"] - end - defp custom_packages(%{nginx: "mainline"} = job) do - ["nginx" | custom_packages_for_version(job)] - end - - defp custom_packages(job), do: custom_packages_for_version(job) - - defp apt_install_base_pkgs(job) do - packages = base_packages(job) ++ base_packages_for_version(job) - package_install(packages) - end - - defp apt_install_custom_pkgs(job) do - custom_packages(job) - |> package_install() - end - - defp package_install([]), do: "" - - defp package_install(packages) do - """ - RUN apt-get update \\ - && DEBIAN_FRONTEND=noninteractive TZ="Europe/London" \\ - apt-get install --no-install-recommends --no-install-suggests -y \\ - #{combine(packages, " ")} - """ - end - - defp combine(lines, sep) do - Enum.join(lines, sep) - end - - defp build_steps(_) do - """ - RUN git clone --shallow-submodules --depth 1 --recurse-submodules -b #{@grpc_version} \\ - https://github.com/grpc/grpc \\ - && cd grpc \\ - && mkdir -p cmake/build \\ - && cd cmake/build \\ - && cmake \\ - -DgRPC_INSTALL=ON \\ - -DgRPC_BUILD_TESTS=OFF \\ - -DCMAKE_INSTALL_PREFIX=/install \\ - -DCMAKE_BUILD_TYPE=Release \\ - -DgRPC_BUILD_GRPC_NODE_PLUGIN=OFF \\ - -DgRPC_BUILD_GRPC_OBJECTIVE_C_PLUGIN=OFF \\ - -DgRPC_BUILD_GRPC_PHP_PLUGIN=OFF \\ - -DgRPC_BUILD_GRPC_PHP_PLUGIN=OFF \\ - -DgRPC_BUILD_GRPC_PYTHON_PLUGIN=OFF \\ - -DgRPC_BUILD_GRPC_RUBY_PLUGIN=OFF \\ - -DCMAKE_CXX_STANDARD=17 \\ - ../.. \\ - && make -j2 \\ - && make install - - RUN git clone --shallow-submodules --depth 1 --recurse-submodules -b #{@otel_cpp_version} \\ - https://github.com/open-telemetry/opentelemetry-cpp.git \\ - && cd opentelemetry-cpp \\ - && mkdir build \\ - && cd build \\ - && cmake -DCMAKE_BUILD_TYPE=Release \\ - -DCMAKE_INSTALL_PREFIX=/install \\ - -DCMAKE_PREFIX_PATH=/install \\ - -DWITH_OTLP=ON \\ - -DWITH_OTLP_GRPC=ON \\ - -DWITH_OTLP_HTTP=OFF \\ - -DBUILD_TESTING=OFF \\ - -DWITH_EXAMPLES=OFF \\ - -DCMAKE_CXX_STANDARD=17 \\ - -DCMAKE_POSITION_INDEPENDENT_CODE=ON \\ - .. \\ - && make -j2 \\ - && make install - - RUN mkdir -p otel-nginx/build && mkdir -p otel-nginx/src - COPY src otel-nginx/src/ - COPY CMakeLists.txt nginx.cmake otel-nginx/ - RUN cd otel-nginx/build \\ - && cmake -DCMAKE_BUILD_TYPE=Release \\ - -DCMAKE_PREFIX_PATH=/install \\ - -DCMAKE_INSTALL_PREFIX=/usr/share/nginx/modules \\ - .. \\ - && make -j2 \\ - && make install - - FROM scratch AS export - COPY --from=build /otel-nginx/build/otel_ngx_module.so . - - FROM build AS run - CMD ["/usr/sbin/nginx", "-g", "daemon off;"] - """ - end -end \ No newline at end of file diff --git a/instrumentation/nginx/test/instrumentation/mix.exs b/instrumentation/nginx/test/instrumentation/mix.exs index f03e033c7..2ca6e43a0 100644 --- a/instrumentation/nginx/test/instrumentation/mix.exs +++ b/instrumentation/nginx/test/instrumentation/mix.exs @@ -8,13 +8,17 @@ defmodule Instrumentation.MixProject do elixir: "~> 1.11", start_permanent: Mix.env() == :prod, deps: deps() + # elixirc_paths: elixirc_paths(Mix.env()) ] end + # defp elixirc_paths(:test), do: ["lib", "test/helpers"] + # defp elixirc_paths + defp deps do [ - {:httpoison, "1.8.0"}, - {:jason, "1.2.2"} + {:httpoison, "2.2.1"}, + {:jason, "1.4.4"} ] end end diff --git a/instrumentation/nginx/test/instrumentation/mix.lock b/instrumentation/nginx/test/instrumentation/mix.lock index 857e573ca..ed617ba46 100644 --- a/instrumentation/nginx/test/instrumentation/mix.lock +++ b/instrumentation/nginx/test/instrumentation/mix.lock @@ -1,12 +1,12 @@ %{ - "certifi": {:hex, :certifi, "2.5.3", "70bdd7e7188c804f3a30ee0e7c99655bc35d8ac41c23e12325f36ab449b70651", [:rebar3], [{:parse_trans, "~>3.3", [hex: :parse_trans, repo: "hexpm", optional: false]}], "hexpm", "ed516acb3929b101208a9d700062d520f3953da3b6b918d866106ffa980e1c10"}, - "hackney": {:hex, :hackney, "1.17.0", "717ea195fd2f898d9fe9f1ce0afcc2621a41ecfe137fae57e7fe6e9484b9aa99", [:rebar3], [{:certifi, "~>2.5", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~>6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~>1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~>1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "~>3.3", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~>1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "64c22225f1ea8855f584720c0e5b3cd14095703af1c9fbc845ba042811dc671c"}, - "httpoison": {:hex, :httpoison, "1.8.0", "6b85dea15820b7804ef607ff78406ab449dd78bed923a49c7160e1886e987a3d", [:mix], [{:hackney, "~> 1.17", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm", "28089eaa98cf90c66265b6b5ad87c59a3729bea2e74e9d08f9b51eb9729b3c3a"}, + "certifi": {:hex, :certifi, "2.12.0", "2d1cca2ec95f59643862af91f001478c9863c2ac9cb6e2f89780bfd8de987329", [:rebar3], [], "hexpm", "ee68d85df22e554040cdb4be100f33873ac6051387baf6a8f6ce82272340ff1c"}, + "hackney": {:hex, :hackney, "1.20.1", "8d97aec62ddddd757d128bfd1df6c5861093419f8f7a4223823537bad5d064e2", [:rebar3], [{:certifi, "~> 2.12.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~> 6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~> 1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~> 1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.4.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "fe9094e5f1a2a2c0a7d10918fee36bfec0ec2a979994cff8cfe8058cd9af38e3"}, + "httpoison": {:hex, :httpoison, "2.2.1", "87b7ed6d95db0389f7df02779644171d7319d319178f6680438167d7b69b1f3d", [:mix], [{:hackney, "~> 1.17", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm", "51364e6d2f429d80e14fe4b5f8e39719cacd03eb3f9a9286e61e216feac2d2df"}, "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~>0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, - "jason": {:hex, :jason, "1.2.2", "ba43e3f2709fd1aa1dce90aaabfd039d000469c05c56f0b8e31978e03fa39052", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "18a228f5f0058ee183f29f9eae0805c6e59d61c3b006760668d8d18ff0d12179"}, + "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, - "mimerl": {:hex, :mimerl, "1.2.0", "67e2d3f571088d5cfd3e550c383094b47159f3eee8ffa08e64106cdf5e981be3", [:rebar3], [], "hexpm", "f278585650aa581986264638ebf698f8bb19df297f66ad91b18910dfc6e19323"}, - "parse_trans": {:hex, :parse_trans, "3.3.1", "16328ab840cc09919bd10dab29e431da3af9e9e7e7e6f0089dd5a2d2820011d8", [:rebar3], [], "hexpm", "07cd9577885f56362d414e8c4c4e6bdf10d43a8767abb92d24cbe8b24c54888b"}, - "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.6", "cf344f5692c82d2cd7554f5ec8fd961548d4fd09e7d22f5b62482e5aeaebd4b0", [:make, :mix, :rebar3], [], "hexpm", "bdb0d2471f453c88ff3908e7686f86f9be327d065cc1ec16fa4540197ea04680"}, + "mimerl": {:hex, :mimerl, "1.3.0", "d0cd9fc04b9061f82490f6581e0128379830e78535e017f7780f37fea7545726", [:rebar3], [], "hexpm", "a1e15a50d1887217de95f0b9b0793e32853f7c258a5cd227650889b38839fe9d"}, + "parse_trans": {:hex, :parse_trans, "3.4.1", "6e6aa8167cb44cc8f39441d05193be6e6f4e7c2946cb2759f015f8c56b76e5ff", [:rebar3], [], "hexpm", "620a406ce75dada827b82e453c19cf06776be266f5a67cff34e1ef2cbb60e49a"}, + "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, "unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"}, } diff --git a/instrumentation/nginx/test/instrumentation/test/instrumentation_test.exs b/instrumentation/nginx/test/instrumentation/test/instrumentation_test.exs index 706b360bb..122b8b21d 100644 --- a/instrumentation/nginx/test/instrumentation/test/instrumentation_test.exs +++ b/instrumentation/nginx/test/instrumentation/test/instrumentation_test.exs @@ -1,7 +1,8 @@ defmodule InstrumentationTest do use ExUnit.Case - @host "localhost:8000" + @host "localhost:8080" + @collector_healthcheck "localhost:13133" @traces_path "../data/trace.json" def has_line(lines, re) do @@ -21,39 +22,31 @@ defmodule InstrumentationTest do end end - def wait_nginx() do - poll_nginx(30) + def poll_collector(0), do: raise("Timed out waiting for collector") + + def poll_collector(attempts_remaining) do + case HTTPoison.get(@collector_healthcheck) do + {:ok, %HTTPoison.Response{status_code: 200}} -> + :ready + + _ -> + Process.sleep(200) + poll_collector(attempts_remaining - 1) + end end - def wait_until_ready(_, %{:collector => true, :express => true, :nginx => false}) do - wait_nginx() - :ready + def wait_nginx() do + poll_nginx(30) end - def wait_until_ready(port, ctx) do - receive do - {_, {:data, output}} -> - IO.puts(output) - lines = String.split(output, "\n", trim: true) - - has_collector = ctx[:collector] || has_line(lines, ~r/everything is ready/i) - has_express = ctx[:express] || has_line(lines, ~r/simple_express ready/i) - - wait_until_ready( - port, - Map.merge(ctx, %{ - collector: has_collector, - express: has_express, - nginx: false - }) - ) - after - 30_000 -> raise "Timed out waiting for docker containers" - end + def wait_collector() do + poll_collector(30) end - def wait_until_ready(port) do - wait_until_ready(port, %{}) + def wait_until_ready(_port) do + wait_nginx() + wait_collector() + :ready end def read_traces(_file, num_traces, _timeout, traces) when num_traces <= 0, do: traces @@ -72,7 +65,7 @@ defmodule InstrumentationTest do end end - def read_traces(file, num_traces, timeout \\ 1_000) do + def read_traces(file, num_traces, timeout \\ 5_000) do read_traces(file, num_traces, timeout, []) end @@ -82,8 +75,8 @@ defmodule InstrumentationTest do def collect_spans(trace) do [resource_spans] = collect_resource_spans(trace) - [il_spans] = resource_spans["instrumentationLibrarySpans"] - il_spans["spans"] + [scope_spans] = resource_spans["scopeSpans"] + scope_spans["spans"] end def values(map) do @@ -129,9 +122,11 @@ defmodule InstrumentationTest do setup_all do File.chmod!(@traces_path, 0o666) - port = Port.open({:spawn, "docker-compose up"}, [:binary]) + port = Port.open({:spawn, "docker compose up"}, [:binary]) - on_exit(fn -> System.cmd("docker-compose", ["down"]) end) + on_exit(fn -> + System.cmd("docker", ["compose", "down"]) + end) wait_until_ready(port) @@ -162,14 +157,18 @@ defmodule InstrumentationTest do end test "HTTP upstream | span attributes", %{trace_file: trace_file} do - %HTTPoison.Response{status_code: status} = HTTPoison.get!("#{@host}/?foo=bar&x=42", [{"User-Agent", "otel-test"}, {"My-Header", "My-Value"}]) + %HTTPoison.Response{status_code: status} = + HTTPoison.get!("#{@host}/?foo=bar&x=42", [ + {"User-Agent", "otel-test"}, + {"My-Header", "My-Value"} + ]) [trace] = read_traces(trace_file, 1) [span] = collect_spans(trace) assert status == 200 - assert attrib(span, "net.host.port") == 8000 + assert attrib(span, "net.host.port") == 8080 assert attrib(span, "net.peer.ip") =~ ~r/\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/ assert attrib(span, "net.peer.port") > 0 @@ -185,12 +184,15 @@ defmodule InstrumentationTest do assert attrib(span, "http.request.header.user_agent") == nil assert attrib(span, "http.request.header.my_header") == nil - assert span["kind"] == "SPAN_KIND_SERVER" + assert span["kind"] == TraceProto.SpanKind.server() assert span["name"] == "simple_backend" end - test "location with opentelemetry_capture_headers on should capture headers", %{trace_file: trace_file} do - %HTTPoison.Response{status_code: status} = HTTPoison.get!("#{@host}/capture_headers", [{"Request-Header", "Request-Value"}]) + test "location with opentelemetry_capture_headers on should capture headers", %{ + trace_file: trace_file + } do + %HTTPoison.Response{status_code: status} = + HTTPoison.get!("#{@host}/capture_headers", [{"Request-Header", "Request-Value"}]) [trace] = read_traces(trace_file, 1) [span] = collect_spans(trace) @@ -203,8 +205,12 @@ defmodule InstrumentationTest do assert attrib(span, "http.response.header.response_header") == ["Response-Value"] end - test "location with opentelemetry_capture_headers and sensitive header name should redact header value", %{trace_file: trace_file} do - %HTTPoison.Response{status_code: status} = HTTPoison.get!("#{@host}/capture_headers_with_sensitive_header_name", [{"Request-Header", "Foo"}]) + test "location with opentelemetry_capture_headers and sensitive header name should redact header value", + %{trace_file: trace_file} do + %HTTPoison.Response{status_code: status} = + HTTPoison.get!("#{@host}/capture_headers_with_sensitive_header_name", [ + {"Request-Header", "Foo"} + ]) [trace] = read_traces(trace_file, 1) [span] = collect_spans(trace) @@ -217,8 +223,12 @@ defmodule InstrumentationTest do assert attrib(span, "http.response.header.response_header") == ["[REDACTED]"] end - test "location with opentelemetry_capture_headers and sensitive header value should redact header value", %{trace_file: trace_file} do - %HTTPoison.Response{status_code: status} = HTTPoison.get!("#{@host}/capture_headers_with_sensitive_header_value", [{"Bar", "Request-Value"}]) + test "location with opentelemetry_capture_headers and sensitive header value should redact header value", + %{trace_file: trace_file} do + %HTTPoison.Response{status_code: status} = + HTTPoison.get!("#{@host}/capture_headers_with_sensitive_header_value", [ + {"Bar", "Request-Value"} + ]) [trace] = read_traces(trace_file, 1) [span] = collect_spans(trace) @@ -231,7 +241,9 @@ defmodule InstrumentationTest do assert attrib(span, "http.response.header.bar") == ["[REDACTED]"] end - test "location without operation name should use operation name from server", %{trace_file: trace_file} do + test "location without operation name should use operation name from server", %{ + trace_file: trace_file + } do %HTTPoison.Response{status_code: status} = HTTPoison.get!("#{@host}/no_operation_name") [trace] = read_traces(trace_file, 1) @@ -247,7 +259,7 @@ defmodule InstrumentationTest do %HTTPoison.Response{status_code: status, body: body} = HTTPoison.get!(url, [ - {"traceparent", "00-#{input_trace_id}-#{parent_span_id}-00"} + {"traceparent", "00-#{input_trace_id}-#{parent_span_id}-01"} ]) %{"traceparent" => traceparent} = Jason.decode!(body) @@ -296,7 +308,7 @@ defmodule InstrumentationTest do assert attrib(span, "http.status_code") == 200 assert span["parentSpanId"] == "" - assert span["kind"] == "SPAN_KIND_SERVER" + assert span["kind"] == TraceProto.SpanKind.server() assert span["name"] == "php_fpm_backend" end @@ -392,12 +404,12 @@ defmodule InstrumentationTest do assert attrib(span, "http.status_code") == 200 assert span["parentSpanId"] == "" - assert span["kind"] == "SPAN_KIND_SERVER" + assert span["kind"] == TraceProto.SpanKind.server() assert span["name"] == "file_access" end test "Accessing a excluded uri produces no span", %{trace_file: trace_file} do - %HTTPoison.Response{status_code: status, body: body} = + %HTTPoison.Response{status_code: status, body: _body} = HTTPoison.get!("#{@host}/ignored.php") assert_raise RuntimeError, "timed out waiting for traces", fn -> @@ -426,7 +438,7 @@ defmodule InstrumentationTest do %HTTPoison.Response{status_code: status} = HTTPoison.get!("#{@host}/distrust_incoming_spans", [ - {"traceparent", "00-#{input_trace_id}-2a9d49c3e3b7c461-00"} + {"traceparent", "00-#{input_trace_id}-2a9d49c3e3b7c461-01"} ]) [trace] = read_traces(trace_file, 1) @@ -492,7 +504,7 @@ defmodule InstrumentationTest do end test "Accessing 301 redirect does not crash", %{ - trace_file: trace_file + trace_file: _trace_file } do %HTTPoison.Response{status_code: status, headers: headers} = HTTPoison.get!(