-
Notifications
You must be signed in to change notification settings - Fork 26
/
Copy pathkafka-config.yml.sample
224 lines (196 loc) · 10.6 KB
/
kafka-config.yml.sample
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
integrations:
# This instance gives an example of autodiscovery of brokers with a bootstrap broker, which is the recommended configuration.
# Collecting both metrics and inventory
- name: nri-kafka
env:
# A cluster name is required to uniquely identify this collection result in Insights
CLUSTER_NAME: "testcluster1"
# Kafka API version to target. Defaults to 1.0.0, which will work for all post-1.0.0 versions.
# It cannot be autodetected, so it must be set to the actual version in use in the cluster.
# Some features and metrics are not available in versions older than 1.0.0.
KAFKA_VERSION: "1.0.0"
# Use bootstrap broker for discovery. The integration will connect to a fixed broker, described below, and retrieve
# all other brokers in the cluster from it.
AUTODISCOVER_STRATEGY: "bootstrap"
# Bootstrap broker arguments. These configure a connection to a single broker. The rest of the brokers in the cluster
# will be discovered using that connection.
BOOTSTRAP_BROKER_HOST: "localhost"
BOOTSTRAP_BROKER_KAFKA_PORT: 9092
BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT # Currently support PLAINTEXT and SSL
# When bootstrap broker is used for discovery, JMX credentials and port must be the same for all brokers.
BOOTSTRAP_BROKER_JMX_PORT: 9999
BOOTSTRAP_BROKER_JMX_USER: admin
BOOTSTRAP_BROKER_JMX_PASSWORD: password
# Setting this to true will make the integration retrieve data only from the broker configured as bootstrap,
# and no other brokers will be discovered from it.
# Users will typically want to set this to false (default), unless a different discovery mechanism, such as Docker
# or Kubernetes, is used on top of the integration.
LOCAL_ONLY_COLLECTION: false
# Below are the fields used to fine tune/toggle topic metric collection. Can be one of:
# - "all", where all topics found in all brokers will be collected.
# - "none", which will disable topic collection alltogether.
# - "list", which allows a fixed list of topics to be defined as topic_list: '["topic1", "topic2"]'
# - "regex", which allows to define a regex to select which topics will be monitored using topic_regex: 'topic\d+'
#
# TOPIC_MODE: "all"
# TOPIC_MODE: "none"
TOPIC_MODE: "regex"
TOPIC_REGEX: 'topic\d+'
# TOPIC_MODE: "list"
# TOPIC_LIST: `["topic1", "topic2", "topic3"]`
# Topic size and offset can also be collected from brokers in the cluster. This operations are intensive and can take a while
# to collect for a larger number of topics. It is recommended to only enable this features when collecting topics
# selectively, i.e. topic_mode is not "all" or a very permissive regex.
# If the fields are omitted they will default to false.
COLLECT_TOPIC_SIZE: false
COLLECT_TOPIC_OFFSET: false
METRICS: "true"
interval: 15s
inventory_source: config/kafka
# Additionally, custom labels can be added to further identify your data
labels:
env: production
role: kafka
# Example kafka-metrics-zookeeper-discovery. This gives an example of autodiscovery of brokers with zookeeper
# It collects only metrics
- name: nri-kafka
env:
METRICS: "true"
# A cluster name is required to uniquely identify this collection result in Insights
CLUSTER_NAME: "testcluster1"
# Override the kafka API version to target. Defaults to 1.0.0, which will work for all post-1.0.0 versions. Older versions of the API may be missing features.
KAFKA_VERSION: "1.0.0"
# How to find brokers. Either "bootstrap" or "zookeeper"
AUTODISCOVER_STRATEGY: "zookeeper"
# A list of zookeeper hosts to discover brokers from.
# Only required and used if `autodiscover_mechanism` is "zookeeper"
#
# The "zookeeper_hosts" field is a JSON array, each entry in the array connection information for a Zookeeper node.
# Each entry should have the following fields:
# - host: The IP or Hostname of a Zookeeper node, if the New Relic agent is installed on a Zookeeper node "localhost" is an acceptable value
# - port: The port Zookeeper is listening on for incoming requests. If omitted, a default port of 2181 will be used.
ZOOKEEPER_HOSTS: '[{"host": "localhost", "port": 2181}]'
# The only supported value is digest. If empty, no authentication is used.
# ZOOKEEPER_AUTH_SCHEME: "digest"
# Only honored if ZOOKEEPER_AUTH_SCHEME is not empty.
# If using "user" authentication, the credentials must be specified as a string of the form "<user>:<password>"
# Example: 'zookeeperuser:zookeeperpass'
# ZOOKEEPER_AUTH_SECRET: "username:password"
# If the Kafka configuration files are not in the root node of Zookeeper, an alternative root node can be specified.
# The alternative root must have a leading slash.
ZOOKEEPER_PATH: "/kafka-root"
# It is common to use the same JMX configuration across a Kafka cluster
# The default username and password are the credentials that will be used to make
# a JMX connection to each broker found by Zookeeper. Theses values will also
# be used when connecting to a consumer and/or producer if the "username" or "password"
# field are omitted.
DEFAULT_JMX_USER: "username"
DEFAULT_JMX_PASSWORD: "password"
TOPIC_MODE: "all"
interval: 15s
labels:
env: production
role: kafka
# Example kafka-inventory. This gives an example of collecting only inventory with the integration
- name: nri-kafka
env:
INVENTORY: "true"
CLUSTER_NAME: "testcluster2"
ZOOKEEPER_HOSTS: '[{"host": "localhost", "port": 2181}]'
# The only supported value is digest. If empty, no authentication is used.
# ZOOKEEPER_AUTH_SCHEME: "digest"
# Only honored if ZOOKEEPER_AUTH_SCHEME is not empty.
# If using "user" authentication, the credentials must be specified as a string of the form "<user>:<password>"
# Example: 'zookeeperuser:zookeeperpass'
# ZOOKEEPER_AUTH_SECRET: "username:password"
# Below are the fields used to fine tune/toggle topic inventory collection.
# In order to collect topics the "topic_mode" field must be set to "all", "list", or "regex"
TOPIC_MODE: 'all'
interval: 15s
labels:
env: production
role: kafka
inventory_source: config/kafka
# Example kafka-consumer-offsets. This gives an example configuration for collecting consumer offsets for the cluster
- name: nri-kafka
env:
METRICS: "false"
INVENTORY: "false"
CONSUMER_OFFSET: "true"
CLUSTER_NAME: "testcluster3"
AUTODISCOVER_STRATEGY: "bootstrap"
BOOTSTRAP_BROKER_HOST: "localhost"
BOOTSTRAP_BROKER_KAFKA_PORT: 9092
BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT
# A regex pattern that matches the consumer groups to collect metrics from
CONSUMER_GROUP_REGEX: '.*'
interval: 15s
labels:
env: production
role: kafka
# Example configuration for collecting JMX metrics form consumers and producers.
# Please note that we use JMX to collect these metrics, which means that only producers and consumers written in Java
# are supported for collection.
- name: nri-kafka
env:
METRICS: "true"
CLUSTER_NAME: "testcluster3"
# In order to collect Java producer and consumer metrics the "producers" and "consumers" fields should be filled out.
# Both fields are JSON arrays with each entry being a separate JAVA producer or consumer, in it's respective field.
# Each entry should have the following fields:
# - name: This is the client.id of the producer/consumer as it appears in Kafka. If omitted, metrics from all clients in the JMX host:port will be reported.
# - host: The IP or Hostname of the producer/consumer. If omitted, will use the value of the "default_jmx_host" field
# - port: The port in which JMX is setup for on the producer/consumer. If omitted will, use the value of the "default_jmx_port" field
# - username: The username used to connect to JMX. If omitted, will use the value of the "default_jmx_user" field
# - password: The password used to connect to JMX. If omitted, will use the value of the "default_jmx_password" field
# Example: {"name": "myProducer", "host": "localhost", "port": 24, "username": "me', "password": "secret"}
PRODUCERS: '[{"host": "localhost", "port": 24, "username": "me", "password": "secret"}]'
CONSUMERS: '[{"host": "localhost", "port": 24, "username": "me", "password": "secret"}]'
# If several producers/consumers are on the same host an agent can be installed on that host and the
# "default_jmx_host" and "default_jmx_port" field can be set once and used for all producers/consumers that
# do not have the "host" or "port" field respectively.
# When defaults are set it is also possible to use the string "default" to gather metrics from all producers /
# consumers in the "default_jmx_host:default_jmx_port". Example:
# PRODUCERS: default
# These fields can be removed if each producer/consumer has it's own "host" and/or "port" field filled out.
DEFAULT_JMX_HOST: "localhost"
DEFAULT_JMX_PORT: "9999"
interval: 15s
labels:
env: production
role: kafka
# Example kafka-kerberos-auth collecting only metrics
- name: nri-kafka
env:
METRICS: "true"
# A cluster name is required to uniquely identify this collection result in Insights
CLUSTER_NAME: "testcluster1"
AUTODISCOVER_STRATEGY: "bootstrap"
# Bootstrap broker arguments. These configure a connection to a single broker. The rest of the brokers in the cluster
# will be discovered using that connection.
BOOTSTRAP_BROKER_HOST: "localhost"
BOOTSTRAP_BROKER_KAFKA_PORT: 9092
BOOTSTRAP_BROKER_KAFKA_PROTOCOL: PLAINTEXT # Currently support PLAINTEXT and SSL
BOOTSTRAP_BROKER_JMX_PORT: 9999
# JMX user and password default to `default_jmx_user` and `default_jmx_password` if unset
BOOTSTRAP_BROKER_JMX_USER: admin
BOOTSTRAP_BROKER_JMX_PASSWORD: password
# Kerberos authentication arguments
SASL_MECHANISM: GSSAPI
SASL_GSSAPI_REALM: SOMECORP.COM
SASL_GSSAPI_SERVICE_NAME: Kafka
SASL_GSSAPI_USERNAME: kafka
SASL_GSSAPI_KEY_TAB_PATH: /etc/newrelic-infra/kafka.keytab
SASL_GSSAPI_KERBEROS_CONFIG_PATH: /etc/krb5.conf
# disables FAST negotiation that causes issues with Active Directory
# sasl_gssapi_disable_fast_negotiation: false
# Only collect metrics from the bootstrap broker configured. The integration will not attempt to collect metrics
# for any other broker, nor will it collect cluster-level metrics like topic metrics. This is useful for things
# like deployment to kubernetes, where a single integration instance is desired per broker.
LOCAL_ONLY_COLLECTION: false
TOPIC_MODE: "all"
COLLECT_TOPIC_SIZE: false
interval: 15s
labels:
env: production
role: kafka