Nginx OTel module.
This commit is contained in:
parent
a0389d8296
commit
3430e85c34
8 changed files with 1042 additions and 0 deletions
108
src/trace_service_client.hpp
Normal file
108
src/trace_service_client.hpp
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include <grpcpp/grpcpp.h>
|
||||
#include <grpcpp/alarm.h>
|
||||
#include <opentelemetry/proto/collector/trace/v1/trace_service.grpc.pb.h>
|
||||
|
||||
namespace otel_proto_trace = opentelemetry::proto::collector::trace::v1;
|
||||
|
||||
class TraceServiceClient {
|
||||
public:
|
||||
typedef otel_proto_trace::ExportTraceServiceRequest Request;
|
||||
typedef otel_proto_trace::ExportTraceServiceResponse Response;
|
||||
typedef otel_proto_trace::TraceService TraceService;
|
||||
|
||||
typedef std::function<void (Request, Response, grpc::Status)>
|
||||
ResponseCb;
|
||||
|
||||
TraceServiceClient(const std::string& target)
|
||||
{
|
||||
auto channel = grpc::CreateChannel(
|
||||
target, grpc::InsecureChannelCredentials());
|
||||
channel->GetState(true); // trigger 'connecting' state
|
||||
|
||||
stub = TraceService::NewStub(channel);
|
||||
}
|
||||
|
||||
void send(Request& req, ResponseCb cb)
|
||||
{
|
||||
std::unique_ptr<ActiveCall> call{new ActiveCall{}};
|
||||
|
||||
call->request = std::move(req);
|
||||
call->cb = std::move(cb);
|
||||
|
||||
++pending;
|
||||
|
||||
// post actual RPC to worker thread to minimize load on caller
|
||||
gpr_timespec past{};
|
||||
call->sendAlarm.Set(&queue, past, call.release());
|
||||
}
|
||||
|
||||
void run()
|
||||
{
|
||||
void* tag = NULL;
|
||||
bool ok = false;
|
||||
|
||||
while (queue.Next(&tag, &ok)) {
|
||||
assert(ok);
|
||||
|
||||
if (tag == &shutdownAlarm) {
|
||||
shutdown = true;
|
||||
} else {
|
||||
std::unique_ptr<ActiveCall> call{(ActiveCall*)tag};
|
||||
|
||||
if (!call->sent) {
|
||||
--pending;
|
||||
|
||||
call->responseReader = stub->AsyncExport(
|
||||
&call->context, call->request, &queue);
|
||||
call->sent = true;
|
||||
|
||||
call->responseReader->Finish(
|
||||
&call->response, &call->status, call.get());
|
||||
call.release();
|
||||
} else {
|
||||
call->cb(std::move(call->request),
|
||||
std::move(call->response), std::move(call->status));
|
||||
}
|
||||
}
|
||||
|
||||
// It's not clear if gRPC guarantees any order for expired alarms,
|
||||
// so we use 'pending' counter to ensure CQ shutdown happens last.
|
||||
// https://github.com/grpc/grpc/issues/31398
|
||||
if (shutdown && pending == 0) {
|
||||
queue.Shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void stop()
|
||||
{
|
||||
gpr_timespec past{};
|
||||
shutdownAlarm.Set(&queue, past, &shutdownAlarm);
|
||||
}
|
||||
|
||||
private:
|
||||
struct ActiveCall {
|
||||
grpc::Alarm sendAlarm;
|
||||
bool sent;
|
||||
|
||||
grpc::ClientContext context;
|
||||
Request request;
|
||||
Response response;
|
||||
grpc::Status status;
|
||||
std::unique_ptr<grpc::ClientAsyncResponseReader<Response>>
|
||||
responseReader;
|
||||
|
||||
ResponseCb cb;
|
||||
};
|
||||
|
||||
std::unique_ptr<TraceService::Stub> stub;
|
||||
grpc::CompletionQueue queue;
|
||||
|
||||
grpc::Alarm shutdownAlarm;
|
||||
std::atomic<int> pending{0};
|
||||
bool shutdown{false};
|
||||
};
|
||||
Loading…
Add table
Add a link
Reference in a new issue