Compare commits

..

No commits in common. "main" and "version-0.1.0" have entirely different histories.

19 changed files with 214 additions and 1245 deletions

View file

@ -1,6 +1,6 @@
### Proposed changes
Describe the use case and detail of the change. If this PR addresses an issue on GitHub, make sure to include a link to that issue using `fix` [keyword](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue) here in this description and in corresponding commit message.
Describe the use case and detail of the change. If this PR addresses an issue on GitHub, make sure to include a link to that issue using one of the [supported keywords](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue) here in this description (not in the title of the PR).
### Checklist
@ -9,3 +9,4 @@ Before creating a PR, run through this checklist and mark each as complete.
- [ ] I have read the [`CONTRIBUTING`](https://github.com/nginxinc/nginx-otel/blob/main/CONTRIBUTING.md) document
- [ ] If applicable, I have added tests that prove my fix is effective or that my feature works
- [ ] If applicable, I have checked that any relevant tests pass after adding my changes
- [ ] I have updated any relevant documentation ([`README.md`](https://github.com/nginxinc/nginx-otel/blob/main/README.md) and [`CHANGELOG.md`](https://github.com/nginxinc/nginx-otel/blob/main/CHANGELOG.md))

View file

@ -1,49 +0,0 @@
name: Ubuntu build
on:
push:
branches:
- main
pull_request:
jobs:
build-module:
runs-on: ubuntu-22.04
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y cmake libc-ares-dev
- name: Checkout nginx
uses: actions/checkout@v4
with:
repository: nginx/nginx
path: nginx
- name: Build nginx
working-directory: nginx
run: |
auto/configure --with-compat --with-debug --with-http_ssl_module \
--with-http_v2_module --with-http_v3_module
make -j $(nproc)
- name: Build module
run: |
mkdir build
cd build
cmake -DNGX_OTEL_NGINX_BUILD_DIR=${PWD}/../nginx/objs \
-DNGX_OTEL_DEV=ON ..
make -j $(nproc)
- name: Download otelcol
run: |
LATEST=open-telemetry/opentelemetry-collector-releases/releases/latest
TAG=$(curl -s https://api.github.com/repos/${LATEST} |
jq -r .tag_name)
curl -sLo - https://github.com/${LATEST}/download/\
otelcol_${TAG:1}_linux_amd64.tar.gz | tar -xzv
- name: Install test dependencies
run: pip install -r tests/requirements.txt
- name: Run tests
run: |
pytest tests --maxfail=10 --nginx=nginx/objs/nginx \
--module=build/ngx_otel_module.so --otelcol=./otelcol

View file

@ -6,12 +6,8 @@ set(NGX_OTEL_NGINX_BUILD_DIR ""
set(NGX_OTEL_NGINX_DIR "${NGX_OTEL_NGINX_BUILD_DIR}/.."
CACHE PATH "Nginx source dir")
set(NGX_OTEL_GRPC e241f37befe7ba4688effd84bfbf99b0f681a2f7 # v1.49.4
CACHE STRING "gRPC tag to download or 'package' to use preinstalled")
set(NGX_OTEL_SDK 11d5d9e0d8fd8ba876c8994714cc2647479b6574 # v1.11.0
CACHE STRING "OTel SDK tag to download or 'package' to use preinstalled")
set(NGX_OTEL_PROTO_DIR "" CACHE PATH "OTel proto files root")
set(NGX_OTEL_DEV OFF CACHE BOOL "Enforce compiler warnings")
set(NGX_OTEL_FETCH_DEPS ON CACHE BOOL "Download dependencies")
set(NGX_OTEL_PROTO_DIR "" CACHE PATH "OTel proto files root")
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE RelWithDebInfo)
@ -19,52 +15,27 @@ endif()
set(CMAKE_CXX_VISIBILITY_PRESET hidden)
if(NGX_OTEL_GRPC STREQUAL "package")
find_package(protobuf REQUIRED)
find_package(gRPC REQUIRED)
else()
if(NGX_OTEL_FETCH_DEPS)
include(FetchContent)
FetchContent_Declare(
grpc
GIT_REPOSITORY https://github.com/grpc/grpc
GIT_TAG ${NGX_OTEL_GRPC}
GIT_SUBMODULES third_party/protobuf third_party/abseil-cpp third_party/re2
GIT_TAG 18dda3c586b2607d8daead6b97922e59d867bb7d # v1.46.6
GIT_SUBMODULES third_party/protobuf third_party/abseil-cpp
GIT_SHALLOW ON)
set(gRPC_USE_PROTO_LITE ON CACHE INTERNAL "")
set(gRPC_INSTALL OFF CACHE INTERNAL "")
set(gRPC_USE_SYSTEMD OFF CACHE INTERNAL "")
set(gRPC_DOWNLOAD_ARCHIVES OFF CACHE INTERNAL "")
set(gRPC_CARES_PROVIDER package CACHE INTERNAL "")
set(gRPC_RE2_PROVIDER package CACHE INTERNAL "")
set(gRPC_SSL_PROVIDER package CACHE INTERNAL "")
set(gRPC_ZLIB_PROVIDER package CACHE INTERNAL "")
set(protobuf_INSTALL OFF CACHE INTERNAL "")
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
FetchContent_MakeAvailable(grpc)
# reconsider once https://github.com/grpc/grpc/issues/36023 is done
target_compile_definitions(grpc PRIVATE GRPC_NO_XDS GRPC_NO_RLS)
set_property(DIRECTORY ${grpc_SOURCE_DIR}
PROPERTY EXCLUDE_FROM_ALL YES)
add_library(gRPC::grpc++ ALIAS grpc++)
add_executable(gRPC::grpc_cpp_plugin ALIAS grpc_cpp_plugin)
endif()
if(NGX_OTEL_SDK STREQUAL "package")
find_package(opentelemetry-cpp REQUIRED)
else()
include(FetchContent)
FetchContent_Declare(
otelcpp
GIT_REPOSITORY https://github.com/open-telemetry/opentelemetry-cpp
GIT_TAG ${NGX_OTEL_SDK}
GIT_TAG 57bf8c2b0e85215a61602f559522d08caa4d2fb8 # v1.8.1
GIT_SUBMODULES third_party/opentelemetry-proto
GIT_SHALLOW ON)
@ -74,8 +45,10 @@ else()
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_POLICY_DEFAULT_CMP0063 NEW)
FetchContent_MakeAvailable(otelcpp)
FetchContent_MakeAvailable(grpc otelcpp)
set_property(DIRECTORY ${grpc_SOURCE_DIR}
PROPERTY EXCLUDE_FROM_ALL YES)
set_property(DIRECTORY ${otelcpp_SOURCE_DIR}
PROPERTY EXCLUDE_FROM_ALL YES)
@ -85,6 +58,12 @@ else()
endif()
add_library(opentelemetry-cpp::trace ALIAS opentelemetry_trace)
add_library(gRPC::grpc++ ALIAS grpc++)
add_executable(gRPC::grpc_cpp_plugin ALIAS grpc_cpp_plugin)
else()
find_package(opentelemetry-cpp REQUIRED)
find_package(protobuf REQUIRED)
find_package(gRPC REQUIRED)
endif()
set(PROTO_DIR ${NGX_OTEL_PROTO_DIR})
@ -112,22 +91,19 @@ add_custom_command(
--plugin protoc-gen-grpc=$<TARGET_FILE:gRPC::grpc_cpp_plugin>
${PROTOS}
# remove inconsequential UTF8 check during serialization to aid performance
COMMAND sed -i.bak -E
-e [[/ ::(PROTOBUF_NAMESPACE_ID|google::protobuf)::internal::WireFormatLite::VerifyUtf8String\(/,/\);/d]]
COMMAND sed -i.bak
-e [[/ ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(/,/);/d]]
${PROTO_SOURCES}
DEPENDS ${PROTOS} protobuf::protoc gRPC::grpc_cpp_plugin
VERBATIM)
if (NGX_OTEL_DEV)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_EXTENSIONS OFF)
add_compile_options(-Wall -Wtype-limits -Werror)
endif()
add_compile_options(-Wall -Wtype-limits -Werror)
add_library(ngx_otel_module MODULE
src/http_module.cpp
src/grpc_log.cpp
src/modules.c
${PROTO_SOURCES})
@ -137,21 +113,14 @@ set_target_properties(ngx_otel_module PROPERTIES PREFIX "")
# can't use OTel's WITH_ABSEIL until cmake 3.24, as it triggers find_package()
target_compile_definitions(ngx_otel_module PRIVATE HAVE_ABSEIL)
if (APPLE)
target_link_options(ngx_otel_module PRIVATE -undefined dynamic_lookup)
endif()
target_include_directories(ngx_otel_module PRIVATE
${NGX_OTEL_NGINX_BUILD_DIR}
${NGX_OTEL_NGINX_DIR}/src/core
${NGX_OTEL_NGINX_DIR}/src/event
${NGX_OTEL_NGINX_DIR}/src/event/modules
${NGX_OTEL_NGINX_DIR}/src/event/quic
${NGX_OTEL_NGINX_DIR}/src/os/unix
${NGX_OTEL_NGINX_DIR}/src/http
${NGX_OTEL_NGINX_DIR}/src/http/modules
${NGX_OTEL_NGINX_DIR}/src/http/v2
${NGX_OTEL_NGINX_DIR}/src/http/v3
${PROTO_OUT_DIR})
target_link_libraries(ngx_otel_module

View file

@ -1,74 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to make participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
- Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism
- Focusing on what is best for the community
- Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual attention or
advances
- Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or electronic
address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the moderation team at nginx-oss-community@f5.com. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4,
available at <https://www.contributor-covenant.org/version/1/4/code-of-conduct.html>
For answers to common questions about this code of conduct, see
<https://www.contributor-covenant.org/faq>

View file

@ -1,51 +0,0 @@
# Contributing Guidelines
The following is a set of guidelines for contributing to this project. We really appreciate that you are considering contributing!
#### Table Of Contents
[Getting Started](#getting-started)
[Contributing](#contributing)
[Code Guidelines](#code-guidelines)
[Code of Conduct](https://github.com/nginxinc/nginx-otel/blob/main/CODE_OF_CONDUCT.md)
## Getting Started
Follow our [Getting Started Guide](https://github.com/nginxinc/nginx-otel/blob/main/README.md) to get this project up and running.
<!-- ### Project Structure (OPTIONAL) -->
## Contributing
### Report a Bug
To report a bug, open an issue on GitHub with the label `bug` using the available bug report issue template. Please ensure the bug has not already been reported. **If the bug is a potential security vulnerability, please report it using our [security policy](https://github.com/nginxinc/nginx-otel/blob/main/SECURITY.md).**
### Suggest a Feature or Enhancement
To suggest a new feature or other improvement, create an issue on GitHub and choose the type 'Feature request'. Please fill in the template as provided.
### Open a Pull Request
- Fork the repo, create a branch, implement your changes, add any relevant tests, submit a PR when your changes are **tested** and ready for review.
- Fill in [our pull request template](https://github.com/nginxinc/nginx-otel/blob/main/.github/pull_request_template.md).
## Code Guidelines
### NGINX Code Guidelines
Before diving into the NGINX codebase or contributing, it's important to understand the fundamental principles and techniques outlined in the [NGINX Development Guide] (http://nginx.org/en/docs/dev/development_guide.html).
### Git Guidelines
- Keep a clean, concise and meaningful git commit history on your branch (within reason), rebasing locally and squashing before submitting a PR.
- Follow below guidelines for writing commit messages:
- In the subject line, use the present tense ("Add feature" not "Added feature").
- In the subject line, use the imperative mood ("Move cursor to..." not "Moves cursor to...").
- End subject line with a period.
- Limit the subject line to 72 characters or less.
- Reference issues in the subject line and/or body.
- Add more detailed description in the body of the git message (`git commit -a` to give you more space and time in your text editor to write a good message instead of `git commit -am`).

16
NOTICE Normal file
View file

@ -0,0 +1,16 @@
NGINX OTel.
Copyright 2017-2023 NGINX, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

216
README.md
View file

@ -1,77 +1,51 @@
# NGINX Native OpenTelemetry (OTel) Module
# nginx_otel
## What is OpenTelemetry
OpenTelemetry (OTel) is an observability framework for monitoring, tracing, troubleshooting, and optimizing applications. OTel enables the collection of telemetry data from a deployed application stack.
This project provides support for OpenTelemetry distributed tracing in Nginx, offering:
## What is the NGINX Native OTel Module
The `ngx_otel_module` dynamic module enables NGINX Open Source or NGINX Plus to send telemetry data to an OTel collector. It provides support for [W3C trace context](https://www.w3.org/TR/trace-context/) propagation, OpenTelemetry Protocol (OTLP)/gRPC trace exports and offers several benefits over exiting OTel modules, including:
- Lightweight and high-performance incoming HTTP request tracing
- [W3C trace context](https://www.w3.org/TR/trace-context/) propagation
- OTLP/gRPC trace export
- Fully Dynamic Variable-Based Sampling
### Better Performance ###
3rd-party OTel implementations reduce performance of request processing by as much as 50% when tracing is enabled. The NGINX Native module limits this impact to approximately 10-15%.
## Building
### Easy Provisioning ###
Setup and configuration can be done right in NGINX configuration files.
### Dynamic, Variable-Based Control ###
The ability to control trace parameters dynamically using cookies, tokens, and variables. Please see our [Ratio-based Tracing](#ratio-based-tracing) example for more details.
Additionally, [NGINX Plus](https://www.nginx.com/products/nginx/), available as part of a [commercial subscription](https://www.nginx.com/products/), enables dynamic control of sampling parameters via the [NGINX Plus API](http://nginx.org/en/docs/http/ngx_http_api_module.html) and [key-value store](http://nginx.org/en/docs/http/ngx_http_keyval_module.html) modules.
## Installing
Prebuilt packages of the module are available for easy installation. Follow these steps to install NGINX Open Source with the OTel module. See list of [compatible operating systems](https://nginx.org/en/linux_packages.html#distributions).
### Adding Package Repositories and Installing NGINX Open Source
Follow the official NGINX Open Source [installation steps](https://nginx.org/en/linux_packages.html#instructions) to set up package repositories for your specific operating system and install NGINX.
**Important:** To ensure module compatibility, you must use officially distributed NGINX binaries. Compatibility with community distributed binaries, commonly available through various operating system vendors, is not guaranteed.
### Installing the OTel Module from Packages
Once remote package repositories have been added and local package records have been updated, you may install the OTel module (`nginx-module-otel`) for your specific operating system. As an example, run the following commands to install on:
#### RedHat, RHEL and Derivatives
Install build tools and dependencies:
```bash
sudo yum install nginx-module-otel
$ sudo apt install cmake build-essential libssl-dev zlib1g-dev libpcre3-dev
$ sudo apt install pkg-config libc-ares-dev libre2-dev # for gRPC
```
#### Debian, Ubuntu and derivatives
Configure Nginx:
```bash
sudo apt install nginx-module-otel
$ ./configure --with-compat
```
### Enabling the OTel Module
Following the installation steps above will install the module into `/etc/nginx/modules` by default. Load the module by adding the following line to the top of the main NGINX configuration file, located at `/etc/nginx/nginx.conf`.
```nginx
load_module modules/ngx_otel_module.so;
Configure and build Nginx OTel module:
```bash
$ mkdir build
$ cd build
$ cmake -DNGX_OTEL_NGINX_BUILD_DIR=/path/to/configured/nginx/objs ..
$ make
```
## Configuring the Module
For a complete list of directives, embedded variables, default span attributes and sample configurations, please refer to the [`ngx_otel_module` documentation](https://nginx.org/en/docs/ngx_otel_module.html).
## Examples
Use these examples to configure some common use-cases for OTel tracing.
## Getting Started
### Simple Tracing
This example sends telemetry data for all http requests.
Dumping all the requests could be useful even in non-distributed environment.
```nginx
http {
otel_exporter {
endpoint localhost:4317;
}
otel_trace on;
server {
location / {
proxy_pass http://backend;
}
}
}
http {
otel_trace on;
server {
location / {
proxy_pass http://backend;
}
}
}
```
### Parent-based Tracing
In this example, we inherit trace contexts from incoming requests and record spans only if a parent span is sampled. We also propagate trace contexts and sampling decisions to upstream servers.
```nginx
http {
@ -87,7 +61,6 @@ http {
```
### Ratio-based Tracing
In this ratio-based example, tracing is configured for a percentage of traffic (in this case 10%):
```nginx
http {
@ -114,80 +87,105 @@ http {
}
```
## Collecting and Viewing Traces
There are several methods and available software packages for viewing traces. For a quick start, [Jaeger](https://www.jaegertracing.io/) provides an all-in-one container to collect, process and view OTel trace data. Follow [these steps](https://www.jaegertracing.io/docs/next-release/deployment/#all-in-one) to download, install, launch and use Jaeger's OTel services.
## How to Use
## Building
Follow these steps to build the `ngx_otel_module` dynamic module on Ubuntu or Debian based systems:
### Directives
Install build tools and dependencies.
```bash
sudo apt install cmake build-essential libssl-dev zlib1g-dev libpcre3-dev
sudo apt install pkg-config libc-ares-dev libre2-dev # for gRPC
```
#### Available in `http/server/location` contexts
For the next step, you will need the `configure` script that is packaged with the NGINX source code. There are several methods for obtaining NGINX sources. You may choose to [download](http://hg.nginx.org/nginx/archive/tip.tar.gz) them or clone them directly from the [NGINX Github repository](https://github.com/nginx/nginx).
**`otel_trace`** `on | off | “$var“;`
**Important:** To ensure compatibility, the `ngx_otel_module` and the NGINX binary that it will be used with, will need to be built using the same NGINX source code and operating system. We will build and install NGINX from obtained sources in a later step. When obtaining NGINX sources from Github, please ensure that you switch to the branch that you intend to use with the module binary. For simplicity, we will assume that the `main` branch will be used for the remainder of this tutorial.
The argument is a “complex value”, which should result in `on`/`off` or `1`/`0`. Default is `off`.
```bash
git clone https://github.com/nginx/nginx.git
```
**`otel_trace_context`** `ignore | extract | inject | propagate;`
Configure NGINX to generate files necessary for dynamic module compilation. These files will be placed into the `nginx/objs` directory.
Defines how to propagate traceparent/tracestate headers. `extract` uses existing trace context from request. `inject` adds new context to request, rewriting existing headers if any. `propagate` updates existing context (i.e. combines `extract` and `inject`). `ignore` skips context headers processing. Default is `ignore`.
**Important:** If you did not obtain NGINX source code via the clone method in the previous step, you will need to adjust paths in the following commands to conform to your specific directory structure.
```bash
cd nginx
auto/configure --with-compat
```
**`otel_span_name`** `name;`
Exit the NGINX directory and clone the `ngx_otel_module` repository.
```bash
cd ..
git clone https://github.com/nginxinc/nginx-otel.git
```
Default is requests location name.
Configure and build the NGINX OTel module.
**`otel_span_attr`** `name “$var”;`
**Important**: replace the path in the `cmake` command with the path to the `nginx/objs` directory from above.
```bash
cd nginx-otel
mkdir build
cd build
cmake -DNGX_OTEL_NGINX_BUILD_DIR=/path/to/configured/nginx/objs ..
make
```
If name starts with `http.(request|response).header.` the type of added attribute will be `string[]` to match semantic conventions (i.e. header value will be represented as a single element array). Otherwise, the attribute type will be `string`.
Compilation will produce a binary named `ngx_otel_module.so`.
#### Available in `http` context
## Installing from Built Binaries
***Important:*** The built `ngx_otel_module.so` dynamic module binary will ONLY be compatible with the same version of NGINX source code that was used to build it. To guarantee proper operation, you will need to build and install NGINX from sources obtained in previous steps on the same operating system.
**`otel_exporter`**`;`
Follow [instructions](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-open-source/#compiling-and-installing-from-source) related to compiling and installing NGINX. Skip procedures for downloading source code.
By default, this will install NGINX into `/usr/local/nginx`. The following steps assume this directory structure.
Copy the `ngx_otel_module.so` dynamic module binary to `/usr/local/nginx/modules`.
Load the module by adding the following line to the top of the main NGINX configuration file, located at `/usr/local/nginx/conf/nginx.conf`.
Defines how to export tracing data. There can only be one `otel_exporter` directive in a given `http` context.
```nginx
load_module modules/ngx_otel_module.so;
otel_exporter {
endpoint “host:port“;
interval 5s; # max interval between two exports
batch_size 512; # max number of spans to be sent in one batch per worker
batch_count 4; # max number of pending batches per worker, over the limit spans are dropped
}
```
# Community
- Our Slack channel [#nginx-opentelemetry-module](https://nginxcommunity.slack.com/archives/C05NMNAQDU6), is the go-to place to start asking questions and sharing your thoughts.
**`otel_service_name`** `name;`
- Our [GitHub issues page](https://github.com/nginxinc/nginx-otel/issues) offers space for a more technical discussion at your own pace.
Sets `service.name` attribute of OTel resource. By default, it is set to `unknown_service:nginx`.
# Contributing
Get involved with the project by contributing! Please see our [contributing guide](CONTRIBUTING.md) for details.
### Available in `otel_exporter` context
# Change Log
See our [release page](https://github.com/nginxinc/nginx-otel/releases) to keep track of updates.
**`endpoint`** `"host:post";`
Defines exporter endpoint `host` and `port`. Only one endpoint per `otel_exporter` can be specified.
**`interval`** `5s;`
Maximum interval between two exports. Default is `5s`.
**`batch_size`** `512;`
Maximum number of spans to be sent in one batch per worker. Detault is 512.
**`batch_count`** `4;`
Maximum number of pending batches per worker, over the limit spans are dropped. Default is 4.
### Variables
`$otel_trace_id` - trace id.
`$otel_span_id` - current span id.
`$otel_parent_id` - parent span id.
`$otel_parent_sampled` - `sampled` flag of parent span, `1`/`0`.
### Default span [attributes](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md)
`http.method`
`http.target`
`http.route`
`http.scheme`
`http.flavor`
`http.user_agent`
`http.request_content_length`
`http.response_content_length`
`http.status_code`
`net.host.name`
`net.host.port`
`net.sock.peer.addr`
`net.sock.peer.port`
## License
# License
[Apache License, Version 2.0](https://github.com/nginxinc/nginx-otel/blob/main/LICENSE)
&copy; [F5, Inc.](https://www.f5.com/) 2023

View file

@ -1,14 +0,0 @@
# Security Policy
## Latest Versions
We advise users to run or update to the most recent release of this project. Older versions of this project may not have all enhancements and/or bug fixes applied to them.
## Reporting a Vulnerability
The F5 Security Incident Response Team (F5 SIRT) has an email alias that makes it easy to report potential security vulnerabilities.
- If youre an F5 customer with an active support contract, please contact [F5 Technical Support](https://www.f5.com/services/support).
- If you arent an F5 customer, please report any potential or current instances of security vulnerabilities with any F5 product to the F5 Security Incident Response Team at F5SIRT@f5.com
For more information visit [https://www.f5.com/services/support/report-a-vulnerability](https://www.f5.com/services/support/report-a-vulnerability)

3
config
View file

@ -1,9 +1,10 @@
ngx_addon_name=ngx_otel_module
cmake -D NGX_OTEL_NGINX_BUILD_DIR=$NGX_OBJS \
-D NGX_OTEL_FETCH_DEPS=OFF \
-D NGX_OTEL_PROTO_DIR=$NGX_OTEL_PROTO_DIR \
-D CMAKE_LIBRARY_OUTPUT_DIRECTORY=$PWD/$NGX_OBJS \
-D "CMAKE_C_FLAGS=$NGX_CC_OPT" \
-D "CMAKE_CXX_FLAGS=$NGX_CC_OPT" \
-D "CMAKE_MODULE_LINKER_FLAGS=$NGX_LD_OPT" \
$NGX_OTEL_CMAKE_OPTS \
-S $ngx_addon_dir -B $NGX_OBJS/otel || exit 1

View file

@ -111,21 +111,18 @@ public:
int attrSize{0};
};
BatchExporter(const Target& target,
size_t batchSize, size_t batchCount,
const std::map<StrView, StrView>& resourceAttrs) :
batchSize(batchSize), client(target)
BatchExporter(StrView target,
size_t batchSize, size_t batchCount, StrView serviceName) :
batchSize(batchSize), client(std::string(target))
{
free.reserve(batchCount);
while (batchCount-- > 0) {
free.emplace_back();
auto resourceSpans = free.back().add_resource_spans();
for (auto& attr : resourceAttrs) {
auto kv = resourceSpans->mutable_resource()->add_attributes();
kv->set_key(std::string(attr.first));
kv->mutable_value()->set_string_value(std::string(attr.second));
}
auto attr = resourceSpans->mutable_resource()->add_attributes();
attr->set_key("service.name");
attr->mutable_value()->set_string_value(std::string(serviceName));
auto scopeSpans = resourceSpans->add_scope_spans();
scopeSpans->mutable_scope()->set_name("nginx");

View file

@ -1,107 +0,0 @@
#include "ngx.hpp"
#include "grpc_log.hpp"
#include <google/protobuf/stubs/common.h>
#include <grpcpp/grpcpp.h>
#if GOOGLE_PROTOBUF_VERSION < 4022000
#include <google/protobuf/stubs/logging.h>
class ProtobufLog {
public:
ProtobufLog() { google::protobuf::SetLogHandler(protobufLogHandler); }
~ProtobufLog() { google::protobuf::SetLogHandler(NULL); }
private:
static void protobufLogHandler(google::protobuf::LogLevel logLevel,
const char* filename, int line, const std::string& msg)
{
using namespace google::protobuf;
ngx_uint_t level = logLevel == LOGLEVEL_FATAL ? NGX_LOG_EMERG :
logLevel == LOGLEVEL_ERROR ? NGX_LOG_ERR :
logLevel == LOGLEVEL_WARNING ? NGX_LOG_WARN :
/*LOGLEVEL_INFO*/ NGX_LOG_INFO;
ngx_log_error(level, ngx_cycle->log, 0, "OTel/protobuf: %s",
msg.c_str());
}
};
#else
#include <absl/log/globals.h>
#include <absl/log/initialize.h>
#include <absl/log/log_sink_registry.h>
class NgxLogSink : absl::LogSink {
public:
NgxLogSink()
{
absl::InitializeLog();
absl::AddLogSink(this);
// Disable logging to stderr
absl::SetStderrThreshold(static_cast<absl::LogSeverity>(100));
}
~NgxLogSink() override { absl::RemoveLogSink(this); }
void Send(const absl::LogEntry& entry) override
{
auto severity = entry.log_severity();
ngx_uint_t level =
severity == absl::LogSeverity::kFatal ? NGX_LOG_EMERG :
severity == absl::LogSeverity::kError ? NGX_LOG_ERR :
severity == absl::LogSeverity::kWarning ? NGX_LOG_WARN :
/*absl::LogSeverity::kInfo*/ NGX_LOG_INFO;
ngx_str_t message { entry.text_message().size(),
(u_char*)entry.text_message().data() };
ngx_log_error(level, ngx_cycle->log, 0, "OTel/grpc: %V", &message);
}
};
typedef NgxLogSink ProtobufLog;
#endif
#if (GRPC_CPP_VERSION_MAJOR < 1) || \
(GRPC_CPP_VERSION_MAJOR == 1 && GRPC_CPP_VERSION_MINOR < 65)
#include <grpc/support/log.h>
class GrpcLog {
public:
GrpcLog() { gpr_set_log_function(grpcLogHandler); }
~GrpcLog() { gpr_set_log_function(NULL); }
private:
static void grpcLogHandler(gpr_log_func_args* args)
{
ngx_uint_t level =
args->severity == GPR_LOG_SEVERITY_ERROR ? NGX_LOG_ERR :
args->severity == GPR_LOG_SEVERITY_INFO ? NGX_LOG_INFO :
/*GPR_LOG_SEVERITY_DEBUG*/ NGX_LOG_DEBUG;
ngx_log_error(level, ngx_cycle->log, 0, "OTel/grpc: %s",
args->message);
}
ProtobufLog protoLog;
};
#else
// newer gRPC implies newer protobuf, and both use Abseil for logging
typedef NgxLogSink GrpcLog;
#endif
void initGrpcLog()
{
static GrpcLog init;
}

View file

@ -1,3 +0,0 @@
#pragma once
void initGrpcLog();

View file

@ -1,13 +1,16 @@
#include "ngx.hpp"
extern "C" {
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_http.h>
}
#include "grpc_log.hpp"
#include <grpc/support/log.h>
#include <google/protobuf/stubs/logging.h>
#include "str_view.hpp"
#include "trace_context.hpp"
#include "batch_exporter.hpp"
#include <fstream>
extern ngx_module_t gHttpModule;
namespace {
@ -17,7 +20,7 @@ struct OtelCtx {
TraceContext current;
};
struct MainConfBase {
struct MainConf {
ngx_str_t endpoint;
ngx_msec_t interval;
size_t batchSize;
@ -26,13 +29,6 @@ struct MainConfBase {
ngx_str_t serviceName;
};
struct MainConf : MainConfBase {
std::map<StrView, StrView> resourceAttrs;
bool ssl;
std::string trustedCert;
Target::HeaderVec headers;
};
struct SpanAttr {
ngx_str_t name;
ngx_http_complex_value_t value;
@ -47,10 +43,7 @@ struct LocationConf {
};
char* setExporter(ngx_conf_t* cf, ngx_command_t* cmd, void* conf);
char* addResourceAttr(ngx_conf_t* cf, ngx_command_t* cmd, void* conf);
char* addSpanAttr(ngx_conf_t* cf, ngx_command_t* cmd, void* conf);
char* setTrustedCertificate(ngx_conf_t* cf, ngx_command_t* cmd, void* conf);
char* addExporterHeader(ngx_conf_t* cf, ngx_command_t* cmd, void* conf);
namespace Propagation {
@ -71,17 +64,14 @@ ngx_command_t gCommands[] = {
{ ngx_string("otel_exporter"),
NGX_HTTP_MAIN_CONF|NGX_CONF_BLOCK|NGX_CONF_NOARGS,
setExporter },
{ ngx_string("otel_resource_attr"),
NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE2,
addResourceAttr },
setExporter,
NGX_HTTP_MAIN_CONF_OFFSET },
{ ngx_string("otel_service_name"),
NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE1,
ngx_conf_set_str_slot,
NGX_HTTP_MAIN_CONF_OFFSET,
offsetof(MainConfBase, serviceName) },
offsetof(MainConf, serviceName) },
{ ngx_string("otel_trace"),
NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1,
@ -116,33 +106,25 @@ ngx_command_t gExporterCommands[] = {
NGX_CONF_TAKE1,
ngx_conf_set_str_slot,
0,
offsetof(MainConfBase, endpoint) },
{ ngx_string("trusted_certificate"),
NGX_CONF_TAKE1,
setTrustedCertificate },
{ ngx_string("header"),
NGX_CONF_TAKE2,
addExporterHeader },
offsetof(MainConf, endpoint) },
{ ngx_string("interval"),
NGX_CONF_TAKE1,
ngx_conf_set_msec_slot,
0,
offsetof(MainConfBase, interval) },
offsetof(MainConf, interval) },
{ ngx_string("batch_size"),
NGX_CONF_TAKE1,
ngx_conf_set_size_slot,
0,
offsetof(MainConfBase, batchSize) },
offsetof(MainConf, batchSize) },
{ ngx_string("batch_count"),
NGX_CONF_TAKE1,
ngx_conf_set_size_slot,
0,
offsetof(MainConfBase, batchCount) },
offsetof(MainConf, batchCount) },
ngx_null_command
};
@ -159,30 +141,6 @@ ngx_str_t toNgxStr(StrView str)
return ngx_str_t{str.size(), (u_char*)str.data()};
}
bool iremovePrefix(ngx_str_t* str, StrView p)
{
if (str->len >= p.size() &&
ngx_strncasecmp(str->data, (u_char*)p.data(), p.size()) == 0) {
str->data += p.size();
str->len -= p.size();
return true;
}
return false;
}
MainConf* getMainConf(ngx_conf_t* cf)
{
return static_cast<MainConf*>(
(MainConfBase*)ngx_http_conf_get_module_main_conf(cf, gHttpModule));
}
MainConf* getMainConf(ngx_cycle_t* cycle)
{
return static_cast<MainConf*>(
(MainConfBase*)ngx_http_cycle_get_module_main_conf(cycle, gHttpModule));
}
LocationConf* getLocationConf(ngx_http_request_t* r)
{
return (LocationConf*)ngx_http_get_module_loc_conf(r, gHttpModule);
@ -285,21 +243,15 @@ ngx_int_t setHeader(ngx_http_request_t* r, StrView name, StrView value)
return NGX_OK;
}
auto headers = &r->headers_in.headers;
if (!headers->pool && ngx_list_init(headers, r->pool, 2,
sizeof(ngx_table_elt_t)) != NGX_OK) {
return NGX_ERROR;
}
header = (ngx_table_elt_t*)ngx_list_push(headers);
header = (ngx_table_elt_t*)ngx_list_push(&r->headers_in.headers);
if (header == NULL) {
return NGX_ERROR;
}
*header = {};
header->hash = hash;
header->key = toNgxStr(name);
header->lowcase_key = header->key.data;
header->next = NULL;
}
header->value = toNgxStr(value);
@ -546,6 +498,28 @@ ngx_int_t onRequestEnd(ngx_http_request_t* r)
return NGX_DECLINED;
}
void grpcLogHandler(gpr_log_func_args* args)
{
ngx_uint_t level = args->severity == GPR_LOG_SEVERITY_ERROR ? NGX_LOG_ERR :
args->severity == GPR_LOG_SEVERITY_INFO ? NGX_LOG_INFO :
/*GPR_LOG_SEVERITY_DEBUG*/ NGX_LOG_DEBUG;
ngx_log_error(level, ngx_cycle->log, 0, "OTel/grpc: %s", args->message);
}
void protobufLogHandler(google::protobuf::LogLevel logLevel,
const char* filename, int line, const std::string& msg)
{
using namespace google::protobuf;
ngx_uint_t level = logLevel == LOGLEVEL_FATAL ? NGX_LOG_EMERG :
logLevel == LOGLEVEL_ERROR ? NGX_LOG_ERR :
logLevel == LOGLEVEL_WARNING ? NGX_LOG_WARN :
/*LOGLEVEL_INFO*/ NGX_LOG_INFO;
ngx_log_error(level, ngx_cycle->log, 0, "OTel/protobuf: %s", msg.c_str());
}
ngx_int_t initModule(ngx_conf_t* cf)
{
auto cmcf = (ngx_http_core_main_conf_t*)ngx_http_conf_get_module_main_conf(
@ -567,14 +541,16 @@ ngx_int_t initModule(ngx_conf_t* cf)
*h = onRequestEnd;
initGrpcLog();
gpr_set_log_function(grpcLogHandler);
google::protobuf::SetLogHandler(protobufLogHandler);
return NGX_OK;
}
ngx_int_t initWorkerProcess(ngx_cycle_t* cycle)
{
auto mcf = getMainConf(cycle);
auto mcf = (MainConf*)ngx_http_cycle_get_module_main_conf(
cycle, gHttpModule);
// no 'http' or 'otel_exporter' blocks
if (mcf == NULL || mcf->endpoint.len == 0) {
@ -582,17 +558,11 @@ ngx_int_t initWorkerProcess(ngx_cycle_t* cycle)
}
try {
Target target;
target.endpoint = std::string(toStrView(mcf->endpoint));
target.ssl = mcf->ssl;
target.trustedCert = mcf->trustedCert;
target.headers = mcf->headers;
gExporter.reset(new BatchExporter(
target,
toStrView(mcf->endpoint),
mcf->batchSize,
mcf->batchCount,
mcf->resourceAttrs));
toStrView(mcf->serviceName)));
} catch (const std::exception& e) {
ngx_log_error(NGX_LOG_CRIT, cycle->log, 0,
"OTel worker init error: %s", e.what());
@ -613,7 +583,8 @@ ngx_int_t initWorkerProcess(ngx_cycle_t* cycle)
"OTel flush error: %s", e.what());
}
auto mcf = getMainConf((ngx_cycle_t*)ngx_cycle);
auto mcf = (MainConf*)ngx_http_cycle_get_module_main_conf(
ngx_cycle, gHttpModule);
ngx_add_timer(ev, mcf->interval);
};
@ -641,7 +612,7 @@ void exitWorkerProcess(ngx_cycle_t* cycle)
char* setExporter(ngx_conf_t* cf, ngx_command_t* cmd, void* conf)
{
auto mcf = getMainConf(cf);
auto mcf = (MainConf*)conf;
if (mcf->endpoint.len) {
return (char*)"is duplicate";
@ -658,7 +629,7 @@ char* setExporter(ngx_conf_t* cf, ngx_command_t* cmd, void* conf)
continue;
}
if (cf->args->nelts != static_cast<unsigned>(ffs(cmd->type))) {
if (cf->args->nelts != 2) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid number of arguments in \"%V\" "
"directive of \"otel_exporter\"", name);
@ -691,12 +662,6 @@ char* setExporter(ngx_conf_t* cf, ngx_command_t* cmd, void* conf)
return rv;
}
if (iremovePrefix(&mcf->endpoint, "https://")) {
mcf->ssl = true;
} else {
iremovePrefix(&mcf->endpoint, "http://");
}
if (mcf->endpoint.len == 0) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"\"otel_exporter\" requires \"endpoint\"");
@ -706,124 +671,31 @@ char* setExporter(ngx_conf_t* cf, ngx_command_t* cmd, void* conf)
return NGX_CONF_OK;
}
char* addResourceAttr(ngx_conf_t* cf, ngx_command_t* cmd, void* conf)
{
auto mcf = getMainConf(cf);
try {
auto args = (ngx_str_t*)cf->args->elts;
mcf->resourceAttrs[toStrView(args[1])] = toStrView(args[2]);
} catch (const std::exception& e) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "OTel: %s", e.what());
return (char*)NGX_CONF_ERROR;
}
return NGX_CONF_OK;
}
char* setTrustedCertificate(ngx_conf_t* cf, ngx_command_t* cmd, void* conf)
{
auto path = ((ngx_str_t*)cf->args->elts)[1];
auto mcf = getMainConf(cf);
if (ngx_get_full_name(cf->pool, &cf->cycle->conf_prefix, &path) != NGX_OK) {
return (char*)NGX_CONF_ERROR;
}
try {
std::ifstream file{(const char*)path.data, std::ios::binary};
if (!file.is_open()) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, ngx_errno,
"failed to open \"%V\"", &path);
return (char*)NGX_CONF_ERROR;
}
file.exceptions(std::ios::failbit | std::ios::badbit);
file.peek(); // trigger early error for dirs
size_t size = file.seekg(0, std::ios::end).tellg();
file.seekg(0);
mcf->trustedCert.resize(size);
file.read(&mcf->trustedCert[0], size);
} catch (const std::exception& e) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"failed to read \"%V\": %s", &path, e.what());
return (char*)NGX_CONF_ERROR;
}
return NGX_CONF_OK;
}
char* addExporterHeader(ngx_conf_t* cf, ngx_command_t* cmd, void* conf)
{
auto args = (ngx_str_t*)cf->args->elts;
// don't force on users lower case name requirement of gRPC
ngx_strlow(args[1].data, args[1].data, args[1].len);
try {
// validate header here to avoid runtime assert failure in gRPC
auto name = toStrView(args[1]);
if (!Target::validateHeaderName(name)) {
return (char*)"has invalid header name";
}
auto value = toStrView(args[2]);
if (!Target::validateHeaderValue(value)) {
return (char*)"has invalid header value";
}
getMainConf(cf)->headers.emplace_back(name, value);
} catch (const std::exception& e) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "OTel: %s", e.what());
return (char*)NGX_CONF_ERROR;
}
return NGX_CONF_OK;
}
void* createMainConf(ngx_conf_t* cf)
{
auto cln = ngx_pool_cleanup_add(cf->pool, sizeof(MainConf));
if (cln == NULL) {
auto mcf = (MainConf*)ngx_pcalloc(cf->pool, sizeof(MainConf));
if (mcf == NULL) {
return NULL;
}
MainConf* mcf;
try {
mcf = new (cln->data) MainConf{};
} catch (const std::exception& e) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "OTel: %s", e.what());
return NULL;
}
cln->handler = [](void* data) {
((MainConf*)data)->~MainConf();
};
mcf->interval = NGX_CONF_UNSET_MSEC;
mcf->batchSize = NGX_CONF_UNSET_SIZE;
mcf->batchCount = NGX_CONF_UNSET_SIZE;
return static_cast<MainConfBase*>(mcf);
return mcf;
}
char* initMainConf(ngx_conf_t* cf, void* conf)
{
auto mcf = getMainConf(cf);
auto mcf = (MainConf*)conf;
ngx_conf_init_msec_value(mcf->interval, 5000);
ngx_conf_init_size_value(mcf->batchSize, 512);
ngx_conf_init_size_value(mcf->batchCount, 4);
try {
if (mcf->serviceName.data == NULL) {
mcf->resourceAttrs.emplace("service.name", "unknown_service:nginx");
} else {
mcf->resourceAttrs["service.name"] = toStrView(mcf->serviceName);
}
} catch (const std::exception& e) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "OTel: %s", e.what());
return (char*)NGX_CONF_ERROR;
if (mcf->serviceName.data == NULL) {
mcf->serviceName = ngx_string("unknown_service:nginx");
}
return NGX_CONF_OK;
@ -961,7 +833,7 @@ char* mergeLocationConf(ngx_conf_t* cf, void* parent, void* child)
conf->spanAttrs = prev->spanAttrs;
}
auto mcf = getMainConf(cf);
auto mcf = (MainConf*)ngx_http_conf_get_module_main_conf(cf, gHttpModule);
if (mcf->endpoint.len == 0 && conf->trace) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,

View file

@ -1,7 +0,0 @@
#pragma once
extern "C" {
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_http.h>
}

View file

@ -8,27 +8,6 @@
namespace otel_proto_trace = opentelemetry::proto::collector::trace::v1;
struct Target {
typedef std::vector<std::pair<std::string, std::string>> HeaderVec;
std::string endpoint;
bool ssl;
std::string trustedCert;
HeaderVec headers;
static bool validateHeaderName(StrView name)
{
return grpc_header_key_is_legal(
grpc_slice_from_static_buffer(name.data(), name.size()));
}
static bool validateHeaderValue(StrView value)
{
return grpc_header_nonbin_value_is_legal(
grpc_slice_from_static_buffer(value.data(), value.size()));
}
};
class TraceServiceClient {
public:
typedef otel_proto_trace::ExportTraceServiceRequest Request;
@ -38,18 +17,10 @@ public:
typedef std::function<void (Request, Response, grpc::Status)>
ResponseCb;
TraceServiceClient(const Target& target) : headers(target.headers)
TraceServiceClient(const std::string& target)
{
std::shared_ptr<grpc::ChannelCredentials> creds;
if (target.ssl) {
grpc::SslCredentialsOptions options;
options.pem_root_certs = target.trustedCert;
creds = grpc::SslCredentials(options);
} else {
creds = grpc::InsecureChannelCredentials();
}
auto channel = grpc::CreateChannel(target.endpoint, creds);
auto channel = grpc::CreateChannel(
target, grpc::InsecureChannelCredentials());
channel->GetState(true); // trigger 'connecting' state
stub = TraceService::NewStub(channel);
@ -59,10 +30,6 @@ public:
{
std::unique_ptr<ActiveCall> call{new ActiveCall{}};
for (auto& header : headers) {
call->context.AddMetadata(header.first, header.second);
}
call->request = std::move(req);
call->cb = std::move(cb);
@ -132,8 +99,6 @@ private:
ResponseCb cb;
};
Target::HeaderVec headers;
std::unique_ptr<TraceService::Stub> stub;
grpc::CompletionQueue queue;

View file

@ -1,101 +0,0 @@
import jinja2
import logging
from OpenSSL import crypto
import os
import pytest
import subprocess
import time
pytest_plugins = [
"trace_service",
]
def pytest_addoption(parser):
parser.addoption("--nginx", required=True)
parser.addoption("--module", required=True)
parser.addoption("--otelcol")
parser.addoption("--globals", default="")
def self_signed_cert(name):
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 2048)
cert = crypto.X509()
cert.get_subject().CN = name
cert.set_issuer(cert.get_subject())
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(365 * 86400) # 365 days
cert.set_pubkey(k)
cert.sign(k, "sha512")
return (
crypto.dump_privatekey(crypto.FILETYPE_PEM, k),
crypto.dump_certificate(crypto.FILETYPE_PEM, cert),
)
@pytest.fixture(scope="session")
def logger():
logging.basicConfig(level=logging.INFO)
return logging.getLogger(__name__)
@pytest.fixture(scope="module")
def testdir(tmp_path_factory):
return tmp_path_factory.mktemp("nginx")
@pytest.fixture(scope="module")
def nginx_config(request, pytestconfig, testdir, logger):
tmpl = jinja2.Environment().from_string(request.module.NGINX_CONFIG)
params = getattr(request, "param", {})
params["globals"] = (
f"pid {testdir}/nginx.pid;\n"
+ "error_log stderr info;\n"
+ f"error_log {testdir}/error.log info;\n"
+ f"load_module {os.path.abspath(pytestconfig.option.module)};\n"
+ pytestconfig.option.globals
)
params["http_globals"] = f"root {testdir};\n" + "access_log off;\n"
conf = tmpl.render(params)
logger.debug(conf)
return conf
@pytest.fixture(scope="module")
def nginx(testdir, pytestconfig, nginx_config, cert, logger, otelcol):
(testdir / "nginx.conf").write_text(nginx_config)
logger.info("Starting nginx...")
proc = subprocess.Popen(
[
pytestconfig.option.nginx,
"-p",
str(testdir),
"-c",
"nginx.conf",
"-e",
"error.log",
]
)
logger.debug(f"args={' '.join(proc.args)}")
logger.debug(f"pid={proc.pid}")
while not (testdir / "nginx.pid").exists():
time.sleep(0.1)
assert proc.poll() is None, "Can't start nginx"
yield proc
logger.info("Stopping nginx...")
proc.terminate()
try:
proc.wait(timeout=5)
except subprocess.TimeoutExpired:
proc.kill()
assert "[alert]" not in (testdir / "error.log").read_text()
@pytest.fixture(scope="module")
def cert(testdir):
key, cert = self_signed_cert("localhost")
(testdir / "localhost.key").write_text(key.decode("utf-8"))
(testdir / "localhost.crt").write_text(cert.decode("utf-8"))
yield (key, cert)

View file

@ -1,6 +0,0 @@
pytest~=8.3
jinja2~=3.1
pyopenssl~=24.3
niquests~=3.11
grpcio~=1.68
opentelemetry-proto~=1.28

View file

@ -1,331 +0,0 @@
from collections import namedtuple
import niquests
import pytest
import socket
import time
import urllib3
NGINX_CONFIG = """
{{ globals }}
daemon off;
events {
}
http {
{{ http_globals }}
ssl_certificate localhost.crt;
ssl_certificate_key localhost.key;
otel_exporter {
endpoint {{ endpoint or "127.0.0.1:14317" }};
interval {{ interval or "1ms" }};
batch_size 3;
batch_count 3;
{{ exporter_opts }}
}
otel_trace on;
{{ resource_attrs }}
server {
listen 127.0.0.1:18443 ssl;
listen 127.0.0.1:18443 quic;
listen 127.0.0.1:18080;
http2 on;
server_name localhost;
location /ok {
return 200 "OK";
}
location /err {
return 500 "ERR";
}
location /custom {
otel_span_name custom_location;
otel_span_attr http.request.completion
$request_completion;
otel_span_attr http.response.header.content.type
$sent_http_content_type;
otel_span_attr http.request $request;
return 200 "OK";
}
location /vars {
otel_trace_context extract;
add_header "X-Otel-Trace-Id" $otel_trace_id;
add_header "X-Otel-Span-Id" $otel_span_id;
add_header "X-Otel-Parent-Id" $otel_parent_id;
add_header "X-Otel-Parent-Sampled" $otel_parent_sampled;
return 204;
}
location /ignore {
proxy_pass http://127.0.0.1:18080/notrace;
}
location /extract {
otel_trace_context extract;
proxy_pass http://127.0.0.1:18080/notrace;
}
location /inject {
otel_trace_context inject;
proxy_pass http://127.0.0.1:18080/notrace;
}
location /propagate {
otel_trace_context propagate;
proxy_pass http://127.0.0.1:18080/notrace;
}
location /notrace {
otel_trace off;
add_header "X-Otel-Traceparent" $http_traceparent;
add_header "X-Otel-Tracestate" $http_tracestate;
return 204;
}
}
}
"""
TraceContext = namedtuple("TraceContext", ["trace_id", "span_id", "state"])
parent_ctx = TraceContext(
trace_id="0af7651916cd43dd8448eb211c80319c",
span_id="b9c7c989f97918e1",
state="congo=ucfJifl5GOE,rojo=00f067aa0ba902b7",
)
def trace_headers(ctx):
return (
{
"Traceparent": f"00-{ctx.trace_id}-{ctx.span_id}-01",
"Tracestate": ctx.state,
}
if ctx
else {"Traceparent": None, "Tracestate": None}
)
def get_attr(span, name):
for value in (a.value for a in span.attributes if a.key == name):
return getattr(value, value.WhichOneof("value"))
@pytest.fixture
def client(nginx):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
with niquests.Session(multiplexed=True) as s:
yield s
def test_http09(trace_service, nginx):
def get_http09(host, port, path):
with socket.create_connection((host, port)) as sock:
sock.sendall(f"GET {path}\n".encode())
resp = sock.recv(1024).decode("utf-8")
return resp
assert get_http09("127.0.0.1", 18080, "/ok") == "OK"
span = trace_service.get_span()
assert span.name == "/ok"
@pytest.mark.parametrize("http_ver", ["1.1", "2.0", "3.0"])
@pytest.mark.parametrize(
("path", "status"),
[("/ok", 200), ("/err", 500)],
)
def test_default_attributes(client, trace_service, http_ver, path, status):
scheme, port = ("http", 18080) if http_ver == "1.1" else ("https", 18443)
if http_ver == "3.0":
client.quic_cache_layer.add_domain("127.0.0.1", port)
r = client.get(f"{scheme}://127.0.0.1:{port}{path}", verify=False)
span = trace_service.get_span()
assert span.name == path
assert get_attr(span, "http.method") == "GET"
assert get_attr(span, "http.target") == path
assert get_attr(span, "http.route") == path
assert get_attr(span, "http.scheme") == scheme
assert get_attr(span, "http.flavor") == http_ver
assert get_attr(span, "http.user_agent") == (
f"niquests/{niquests.__version__}"
)
assert get_attr(span, "http.request_content_length") == 0
assert get_attr(span, "http.response_content_length") == len(r.text)
assert get_attr(span, "http.status_code") == status
assert get_attr(span, "net.host.name") == "localhost"
assert get_attr(span, "net.host.port") == port
assert get_attr(span, "net.sock.peer.addr") == "127.0.0.1"
assert get_attr(span, "net.sock.peer.port") in range(1024, 65536)
def test_custom_attributes(client, trace_service):
assert client.get("http://127.0.0.1:18080/custom").status_code == 200
span = trace_service.get_span()
assert span.name == "custom_location"
assert get_attr(span, "http.request.completion") == "OK"
value = get_attr(span, "http.response.header.content.type")
assert value.values[0].string_value == "text/plain"
assert get_attr(span, "http.request") == "GET /custom HTTP/1.1"
def test_trace_off(client, trace_service):
assert client.get("http://127.0.0.1:18080/notrace").status_code == 204
time.sleep(0.01) # wait for spans
assert len(trace_service.batches) == 0
@pytest.mark.parametrize("parent", [None, parent_ctx])
def test_variables(client, trace_service, parent):
r = client.get("http://127.0.0.1:18080/vars", headers=trace_headers(parent))
span = trace_service.get_span()
if parent:
assert span.trace_id.hex() == parent.trace_id
assert span.parent_span_id.hex() == parent.span_id
assert span.trace_state == parent.state
assert r.headers.get("X-Otel-Trace-Id") == span.trace_id.hex()
assert r.headers.get("X-Otel-Span-Id") == span.span_id.hex()
assert r.headers.get("X-Otel-Parent-Id") or "" == span.parent_span_id.hex()
assert r.headers.get("X-Otel-Parent-Sampled") == ("1" if parent else "0")
@pytest.mark.parametrize("parent", [None, parent_ctx])
@pytest.mark.parametrize(
"path", ["/ignore", "/extract", "/inject", "/propagate"]
)
def test_context(client, trace_service, parent, path):
headers = trace_headers(parent)
r = client.get(f"http://127.0.0.1:18080{path}", headers=headers)
span = trace_service.get_span()
if path in ["/extract", "/propagate"] and parent:
assert span.trace_id.hex() == parent.trace_id
assert span.parent_span_id.hex() == parent.span_id
assert span.trace_state == parent.state
if path in ["/inject", "/propagate"]:
headers = trace_headers(
TraceContext(
span.trace_id.hex(),
span.span_id.hex(),
span.trace_state or None,
)
)
assert r.headers.get("X-Otel-Traceparent") == headers["Traceparent"]
assert r.headers.get("X-Otel-Tracestate") == headers["Tracestate"]
@pytest.mark.parametrize(
"nginx_config",
[{"interval": "200ms", "endpoint": "http://127.0.0.1:14317"}],
indirect=True,
)
@pytest.mark.parametrize("batch_count", [1, 3])
def test_batches(client, trace_service, batch_count):
batch_size = 3
for _ in range(
batch_count * batch_size + 1
): # +1 request to trigger batch sending
assert client.get("http://127.0.0.1:18080/ok").status_code == 200
time.sleep(0.01)
assert len(trace_service.batches) == batch_count
for batch in trace_service.batches:
assert (
get_attr(batch[0].resource, "service.name")
== "unknown_service:nginx"
)
assert len(batch[0].scope_spans[0].spans) == batch_size
time.sleep(0.3) # wait for +1 request to be flushed
trace_service.batches.clear()
@pytest.mark.parametrize(
"nginx_config",
[
{
"resource_attrs": """
otel_service_name "test_service";
otel_resource_attr my.name "my name";
otel_resource_attr my.service "my service";
""",
}
],
indirect=True,
)
def test_custom_resource_attributes(client, trace_service):
assert client.get("http://127.0.0.1:18080/ok").status_code == 200
batch = trace_service.get_batch()
assert get_attr(batch.resource, "service.name") == "test_service"
assert get_attr(batch.resource, "my.name") == "my name"
assert get_attr(batch.resource, "my.service") == "my service"
@pytest.mark.parametrize(
"nginx_config",
[
{
"exporter_opts": """
header X-API-TOKEN api.value;
header Authorization "Basic value";
""",
}
],
indirect=True,
)
@pytest.mark.parametrize("trace_service", ["skip_otelcol"], indirect=True)
def test_exporter_headers(client, trace_service):
assert client.get("http://127.0.0.1:18080/ok").status_code == 200
assert trace_service.get_span().name == "/ok"
headers = dict(trace_service.last_metadata)
assert headers["x-api-token"] == "api.value"
assert headers["authorization"] == "Basic value"
@pytest.mark.parametrize(
"nginx_config",
[
{
"endpoint": "https://localhost:14318",
"exporter_opts": "trusted_certificate localhost.crt;",
}
],
indirect=True,
)
def test_tls_export(client, trace_service):
assert client.get("http://127.0.0.1:18080/ok").status_code == 200
assert trace_service.get_span().name == "/ok"

View file

@ -1,107 +0,0 @@
import concurrent
import grpc
from opentelemetry.proto.collector.trace.v1 import trace_service_pb2
from opentelemetry.proto.collector.trace.v1 import trace_service_pb2_grpc
import pytest
import subprocess
import time
class TraceService(trace_service_pb2_grpc.TraceServiceServicer):
batches = []
def Export(self, request, context):
self.batches.append(request.resource_spans)
self.last_metadata = context.invocation_metadata()
return trace_service_pb2.ExportTracePartialSuccess()
def get_batch(self):
for _ in range(10):
if len(self.batches):
break
time.sleep(0.001)
assert len(self.batches) == 1
assert len(self.batches[0]) == 1
return self.batches.pop()[0]
def get_span(self):
batch = self.get_batch()
assert len(batch.scope_spans) == 1
assert len(batch.scope_spans[0].spans) == 1
return batch.scope_spans[0].spans.pop()
@pytest.fixture(scope="module")
def trace_service(request, pytestconfig, logger, cert):
server = grpc.server(concurrent.futures.ThreadPoolExecutor())
trace_service = TraceService()
trace_service_pb2_grpc.add_TraceServiceServicer_to_server(
trace_service, server
)
trace_service.use_otelcol = (
pytestconfig.option.otelcol
and getattr(request, "param", "") != "skip_otelcol"
)
listen_addr = f"127.0.0.1:{24317 if trace_service.use_otelcol else 14317}"
server.add_insecure_port(listen_addr)
if not trace_service.use_otelcol:
creds = grpc.ssl_server_credentials([cert])
server.add_secure_port("127.0.0.1:14318", creds)
listen_addr += " and 127.0.0.1:14318"
logger.info(f"Starting trace service at {listen_addr}...")
server.start()
yield trace_service
logger.info("Stopping trace service...")
server.stop(grace=None)
@pytest.fixture(scope="module")
def otelcol(pytestconfig, testdir, logger, trace_service, cert):
if not trace_service.use_otelcol:
yield
return
(testdir / "otel-config.yaml").write_text(
f"""receivers:
otlp:
protocols:
grpc:
endpoint: 127.0.0.1:14317
otlp/tls:
protocols:
grpc:
endpoint: 127.0.0.1:14318
tls:
cert_file: {testdir}/localhost.crt
key_file: {testdir}/localhost.key
exporters:
otlp:
endpoint: 127.0.0.1:24317
tls:
insecure: true
service:
pipelines:
traces:
receivers: [otlp, otlp/tls]
exporters: [otlp]
telemetry:
metrics:
# prevent otelcol from opening 8888 port
level: none"""
)
logger.info("Starting otelcol at 127.0.0.1:14317...")
proc = subprocess.Popen(
[pytestconfig.option.otelcol, "--config", testdir / "otel-config.yaml"]
)
time.sleep(1) # give some time to get ready
assert proc.poll() is None, "Can't start otelcol"
yield
logger.info("Stopping otelcol...")
proc.terminate()
try:
proc.wait(timeout=5)
except subprocess.TimeoutExpired:
proc.kill()