Reputation: 1328
I can only find old and incomplete examples of using opentracing/jaeger with Kafka. I want to run an example locally as a proof of concept - opentracing spans to kafka.
I managed to get some of this working, but on jeager-query
service I keep getting:
"msg":"Failed to init storage factory","error":"kafka: client has run out of available brokers to talk to (Is your cluster reachable?)"
I'm not sure if I need to use some sort of storage i.e. cassandra too?
version: '3.8'
services:
zookeeper:
image: confluentinc/cp-zookeeper
networks:
- kafka-net
container_name: zookeeper
environment:
- ZOOKEEPER_CLIENT_PORT=2181
ports:
- 2181:2181
kafka:
image: confluentinc/cp-kafka
networks:
- kafka-net
container_name: kafka
environment:
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_LISTENERS-INTERNAL://kafka:29092,EXTERNAL://localhost:9092
- KAFKA_ADVERTISED=INTERNAL://kafka:29092,EXTERNAL://localhost:9092
- KAFKA_ADVERTISED_LISTENERS=INTERNAL://kafka:29092,EXTERNAL://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME=INTERNAL
ports:
- 9092:9092
depends_on:
- zookeeper
restart: on-failure
jaeger-collector:
image: jaegertracing/jaeger-collector
container_name: jaeger-collector
networks:
- kafka-net
ports:
- "14250:14250"
# - "14267:14267"
- "14268:14268" # HTTP collector port to receive spans
- "14269:14269" # HTTP health check port
restart: on-failure
environment:
LOG_LEVEL: "debug"
SPAN_STORAGE_TYPE: "kafka"
KAFKA_TOPIC: "somekafkatag"
KAFKA_BROKERS: "kafka:9092"
KAFKA_PRODUCER_BROKERS: "kafka:29092"
jaeger-agent:
image: jaegertracing/jaeger-agent
container_name: jaeger-agent
networks:
- kafka-net
command: ["--reporter.grpc.host-port=jaeger-collector:14250"]
ports:
- "5775:5775/udp"
- "6831:6831/udp"
- "6832:6832/udp"
- "5778:5778"
environment:
LOG_LEVEL: "debug"
SPAN_STORAGE_TYPE: "kafka"
restart: on-failure
depends_on:
- jaeger-collector
jaeger-ingester:
image: jaegertracing/jaeger-ingester
container_name: jaeger-ingester
networks:
- kafka-net
ports:
- "14270:14270" # HTTP health check port: http://localhost:14270/
- "14271:14271" # Metrics port: http://localhost:14271/metrics
restart: on-failure
command: ["--kafka.producer.brokers=kafka:9092"]
environment:
LOG_LEVEL: "debug"
INGESTER_PARALLELISM: "1"
INGESTER_DEADLOCKINTERVAL: "0ms"
SPAN_STORAGE_TYPE: "kafka"
KAFKA_CONSUMER_BROKERS: "kafka:9092"
METRICS_BACKEND: "expvar"
KAFKA_BROKERS: "kafka:29092"
jaeger-query:
image: jaegertracing/jaeger-query
container_name: jaeger-query
networks:
- kafka-net
ports:
- "16686:16686" # Jaeger UI port
- "16687:16687" # HTTP health check port: http://localhost:16687/
restart: on-failure
environment:
LOG_LEVEL: "debug"
SPAN_STORAGE_TYPE: "kafka"
KAFKA_CONSUMER_BROKERS: "kafka:9092"
KAFKA_BROKERS: "kafka:29092"
networks:
kafka-net:
driver: bridge
Upvotes: 2
Views: 1960
Reputation: 191
Jaeger all in one with kafka with tracing interceptors for kafka-connect
version: '3'
services:
jaeger:
image: jaegertracing/all-in-one:latest
hostname: jaeger
container_name: jaeger
restart: always
environment:
- COLLECTOR_ZIPKIN_HTTP_PORT=9411
- POSTGRES_PASSWORD=password
- POSTGRES_DB=nextcloud
ports:
- 5775:5775/udp
- 6831:6831/udp
- 6832:6832/udp
- 5778:5778
- 16686:16686
- 14268:14268
- 14250:14250
- 9411:9411
zookeeper:
image: confluentinc/cp-zookeeper:6.1.0
hostname: zookeeper
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
broker:
image: confluentinc/cp-server:6.1.0
hostname: broker
container_name: broker
depends_on:
- zookeeper
- jaeger
ports:
- "9092:9092"
- "9101:9101"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9101
KAFKA_JMX_HOSTNAME: localhost
KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
schema-registry:
image: confluentinc/cp-schema-registry:6.1.0
hostname: schema-registry
container_name: schema-registry
depends_on:
- broker
ports:
- "8081:8081"
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092'
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
connect:
image: cnfldemos/kafka-connect-datagen:0.4.0-6.1.0
hostname: connect
container_name: connect
depends_on:
- broker
- schema-registry
- jaeger
ports:
- "8083:8083"
environment:
CONNECT_BOOTSTRAP_SERVERS: 'broker:29092'
CONNECT_REST_ADVERTISED_HOST_NAME: connect
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000
CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
# CLASSPATH required due to CC-2422
CLASSPATH: /usr/share/java/*
CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "globaltracer.CustomProducerInterceptor"
CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "globaltracer.CustomConsumerInterceptor"
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components,/usr/share/java/kafka"
CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR
JAEGER_SERVICE_NAME: kafka-connect
JAEGER_AGENT_HOST: jaeger
JAEGER_AGENT_PORT: 6831
JAEGER_SAMPLER_TYPE: const
JAEGER_SAMPLER_PARAM: 1
JAEGER_REPORTER_LOG_SPANS: 'true'
control-center:
image: confluentinc/cp-enterprise-control-center:6.1.0
hostname: control-center
container_name: control-center
depends_on:
- broker
- schema-registry
- connect
- ksqldb-server
ports:
- "9021:9021"
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092'
CONTROL_CENTER_CONNECT_CLUSTER: 'connect:8083'
CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://ksqldb-server:8088"
CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://localhost:8088"
CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
CONTROL_CENTER_REPLICATION_FACTOR: 1
CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1
CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1
CONFLUENT_METRICS_TOPIC_REPLICATION: 1
PORT: 9021
ksqldb-server:
image: confluentinc/cp-ksqldb-server:6.1.0
hostname: ksqldb-server
container_name: ksqldb-server
depends_on:
- broker
- connect
ports:
- "8088:8088"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
KSQL_BOOTSTRAP_SERVERS: "broker:29092"
KSQL_HOST_NAME: ksqldb-server
KSQL_LISTENERS: "http://0.0.0.0:8088"
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081"
KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
KSQL_KSQL_CONNECT_URL: "http://connect:8083"
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_REPLICATION_FACTOR: 1
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: 'true'
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true'
ksqldb-cli:
image: confluentinc/cp-ksqldb-cli:6.1.0
container_name: ksqldb-cli
depends_on:
- broker
- connect
- ksqldb-server
entrypoint: /bin/sh
tty: true
ksql-datagen:
image: confluentinc/ksqldb-examples:6.1.0
hostname: ksql-datagen
container_name: ksql-datagen
depends_on:
- ksqldb-server
- broker
- schema-registry
- connect
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b broker:29092 1 40 && \
echo Waiting for Confluent Schema Registry to be ready... && \
cub sr-ready schema-registry 8081 40 && \
echo Waiting a few seconds for topic creation to finish... && \
sleep 11 && \
tail -f /dev/null'"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
STREAMS_BOOTSTRAP_SERVERS: broker:29092
STREAMS_SCHEMA_REGISTRY_HOST: schema-registry
STREAMS_SCHEMA_REGISTRY_PORT: 8081
rest-proxy:
image: confluentinc/cp-kafka-rest:6.1.0
depends_on:
- broker
- schema-registry
ports:
- 8082:8082
hostname: rest-proxy
container_name: rest-proxy
environment:
KAFKA_REST_HOST_NAME: rest-proxy
KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092'
KAFKA_REST_LISTENERS: "http://0.0.0.0:8082"
KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
Upvotes: 0
Reputation: 1328
I was missing a lot of information. I managed to get it working:
version: '3.8'
services:
zookeeper:
image: confluentinc/cp-zookeeper
networks:
- kafka-net
container_name: zookeeper
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ports:
- 2181:2181
cassandra:
hostname: cassandra
image: cassandra
networks:
- kafka-net
environment:
MAX_HEAP_SIZE: 1G
HEAP_NEWSIZE: 256M
ports:
- "9042:9042"
cassandra-schema:
image: jaegertracing/jaeger-cassandra-schema
networks:
- kafka-net
depends_on:
- cassandra
kafka:
image: confluentinc/cp-kafka
networks:
- kafka-net
container_name: kafka
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
ALLOW_PLAINTEXT_LISTENER: "yes"
KAFKA_LISTENERS-INTERNAL: //kafka:29092,EXTERNAL://localhost:9092
KAFKA_ADVERTISED: INTERNAL://kafka:29092,EXTERNAL://localhost:9092
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:29092,EXTERNAL://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
ports:
- 9092:9092
- 29092:29092
depends_on:
- zookeeper
restart: on-failure
jaeger-collector:
image: jaegertracing/jaeger-collector
container_name: jaeger-collector
networks:
- kafka-net
ports:
- "14250:14250"
- "14267:14267"
- "14268:14268" # HTTP collector port to receive spans
- "14269:14269" # HTTP health check port
restart: on-failure
environment:
LOG_LEVEL: "debug"
SPAN_STORAGE_TYPE: "kafka"
KAFKA_BROKERS: "kafka:9092"
KAFKA_PRODUCER_BROKERS: "kafka:29092"
jaeger-agent:
image: jaegertracing/jaeger-agent
container_name: jaeger-agent
networks:
- kafka-net
command: ["--reporter.grpc.host-port=jaeger-collector:14250"]
ports:
- "5775:5775/udp"
- "6831:6831/udp"
- "6832:6832/udp"
- "5778:5778"
environment:
LOG_LEVEL: "debug"
SPAN_STORAGE_TYPE: "kafka"
restart: on-failure
depends_on:
- jaeger-collector
jaeger-ingester:
image: jaegertracing/jaeger-ingester
container_name: jaeger-ingester
networks:
- kafka-net
ports:
- "14270:14270" # HTTP health check port: http://localhost:14270/
- "14271:14271" # Metrics port: http://localhost:14270/metrics
restart: on-failure
environment:
LOG_LEVEL: debug
INGESTER_PARALLELISM: 1
INGESTER_DEADLOCKINTERVAL: ms
SPAN_STORAGE_TYPE: cassandra
CASSANDRA_SERVERS: cassandra
CASSANDRA_KEYSPACE: jaeger_v1_dc1
METRICS_BACKEND: expvar
KAFKA_CONSUMER_BROKERS: kafka:29092
KAFKA_CONSUMER_TOPIC: jaeger-spans
depends_on:
- cassandra-schema
jaeger-query:
image: jaegertracing/jaeger-query
container_name: jaeger-query
networks:
- kafka-net
ports:
- "16686:16686" # Jaeger UI port
- "16687:16687" # HTTP health check port: http://localhost:16687/
restart: on-failure
depends_on:
- cassandra-schema
environment:
LOG_LEVEL: debug
SPAN_STORAGE_TYPE: cassandra
CASSANDRA_SERVERS: cassandra
CASSANDRA_KEYSPACE: jaeger_v1_dc1
JAEGER_ENDPOINT: http://jaeger-collector:14268/api/traces
networks:
kafka-net:
driver: bridge
Upvotes: 1
Reputation: 5158
You can see that jaeger-query configuration includes: SPAN_STORAGE_TYPE: "kafka"
The error indicates that a kafka client used by jaeger-query to store spans in Kafka cannot in fact reach Kafka, and therefore the jaeger storage factory fails to initialize.
This can be either because Kafka failed to start (did you check)? Or a misconfig of the network in your docker.
Upvotes: 1