# Telegraf Configuration # # Telegraf is entirely plugin driven. All metrics are gathered from the # declared inputs, and sent to the declared outputs. # # Plugins must be declared in here to be active. # To deactivate a plugin, comment out the name and any variables. # # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. # # Environment variables can be used anywhere in this config file, simply surround # them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), # for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 #dc = "kafka-tag" # rack = "1a" ## Environment variables can be used as tags, and throughout the config file # user = "$USER" # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs interval = "10s" ## Rounds collection interval to 'interval' ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true ## Telegraf will send metrics to outputs in batches of at most ## metric_batch_size metrics. ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 ## Maximum number of unwritten metrics per output. Increasing this value ## allows for longer periods of output downtime without dropping metrics at the ## cost of higher maximum memory usage. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. ## Each plugin will sleep for a random time within jitter before collecting. ## This can be used to avoid many plugins querying things like sysfs at the ## same time, which can have a measurable effect on the system. collection_jitter = "0s" ## Default flushing interval for all outputs. Maximum flush_interval will be ## flush_interval + flush_jitter flush_interval = "10s" ## Jitter the flush interval by a random amount. This is primarily to avoid ## large write spikes for users running a large number of telegraf instances. ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" ## By default or when set to "0s", precision will be set to the same ## timestamp order as the collection interval, with the maximum being 1s. ## ie, when interval = "10s", precision will be "1s" ## when interval = "250ms", precision will be "1ms" ## Precision will NOT be used for service inputs. It is up to each individual ## service input to set the timestamp at the appropriate precision. ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" ## Log at debug level. debug = true ## Log only error level messages. # quiet = false ## Log target controls the destination for logs and can be one of "file", ## "stderr" or, on Windows, "eventlog". When set to "file", the output file ## is determined by the "logfile" setting. # logtarget = "file" ## Name of the file to be logged to when using the "file" logtarget. If set to ## the empty string then logs are written to stderr. # logfile = "" ## The logfile will be rotated after the time interval specified. When set ## to 0 no time based rotation is performed. Logs are rotated only when ## written to, if there is no log activity rotation may be delayed. # logfile_rotation_interval = "0d" ## The logfile will be rotated when it becomes larger than the specified ## size. When set to 0 no size based rotation is performed. # logfile_rotation_max_size = "0MB" ## Maximum number of rotated archives to keep, any older logs are deleted. ## If set to -1, no archives are removed. # logfile_rotation_max_archives = 5 ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false ############################################################################### # OUTPUT PLUGINS # ############################################################################### # # Configuration for Elasticsearch to send metrics to. [[outputs.elasticsearch]] # ## The full HTTP endpoint URL for your Elasticsearch instance # ## Multiple urls can be specified as part of the same cluster, # ## this means that only ONE of the urls will be written to each interval. urls = [ "http://dev-rhel6-v6integ.mycom-osi.com:9200" ] # required. # ## Elasticsearch client timeout, defaults to "5s" if not set. # timeout = "5s" # ## Set to true to ask Elasticsearch a list of all cluster nodes, # ## thus it is not necessary to list all nodes in the urls config option. # enable_sniffer = false # ## Set the interval to check if the Elasticsearch nodes are available # ## Setting to "0s" will disable the health check (not recommended in production) # health_check_interval = "10s" # ## HTTP basic authentication details # # username = "telegraf" # # password = "mypassword" # # ## Index Config # ## The target index for metrics (Elasticsearch will create if it not exists). # ## You can use the date specifiers below to create indexes per time frame. # ## The metric timestamp will be used to decide the destination index name # # %Y - year (2016) # # %y - last two digits of year (00..99) # # %m - month (01..12) # # %d - day of month (e.g., 01) # # %H - hour (00..23) # # %V - week of the year (ISO week) (01..53) # ## Additionally, you can specify a tag name using the notation {{tag_name}} # ## which will be used as part of the index name. If the tag does not exist, # ## the default tag value will be used. # # index_name = "telegraf-{{host}}-%Y.%m.%d" # # default_tag_value = "none" index_name = "telegraf" # required. # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # # ## Template Config # ## Set to true if you want telegraf to manage its index template. # ## If enabled it will create a recommended index template for telegraf indexes # manage_template = true # ## The template name used for telegraf indexes # template_name = "telegraf" # ## Set to true if you want telegraf to overwrite an existing template # overwrite_template = false # remove the tag we used from routing tagexclude = ["output_dest"] # only write items with the output_dest=elastic tag [outputs.elasticsearch.tagpass] output_dest = ["elastic"] ## kafka broker output plugin [[outputs.kafka]] ## URLs of kafka brokers brokers = ["dev-rhel6-v6integ.mycom-osi.com:9092"] ## Kafka topic for producer messages topic = "sFlow5-topic" #namepass = ["sflow"] ## The value of this tag will be used as the topic. If not set the 'topic' ## option is used. # topic_tag = "" ## If true, the 'topic_tag' will be removed from to the metric. # exclude_topic_tag = false ## Optional Client id # client_id = "sflow5-telegraf" ## Set the minimal supported Kafka version. Setting this enables the use of new ## Kafka features and APIs. Of particular interested, lz4 compression ## requires at least version 0.10.0.0. ## ex: version = "1.1.0" version = "0.11.0.0" ## Optional topic suffix configuration. ## If the section is omitted, no suffix is used. ## Following topic suffix methods are supported: ## measurement - suffix equals to separator + measurement's name ## tags - suffix equals to separator + specified tags' values ## interleaved with separator ## Suffix equals to "_" + measurement name # [outputs.kafka.topic_suffix] # method = "measurement" # separator = "_" ## Suffix equals to "__" + measurement's "foo" tag value. ## If there's no such a tag, suffix equals to an empty string # [outputs.kafka.topic_suffix] # method = "tags" # keys = ["foo"] # separator = "__" ## Suffix equals to "_" + measurement's "foo" and "bar" ## tag values, separated by "_". If there is no such tags, ## their values treated as empty strings. # [outputs.kafka.topic_suffix] # method = "tags" # keys = ["foo", "bar"] # separator = "_" ## The routing tag specifies a tagkey on the metric whose value is used as ## the message key. The message key is used to determine which partition to ## send the message to. This tag is prefered over the routing_key option. routing_tag = "host" ## The routing key is set as the message key and used to determine which ## partition to send the message to. This value is only used when no ## routing_tag is set or as a fallback when the tag specified in routing tag ## is not found. ## ## If set to "random", a random value will be generated for each message. ## ## When unset, no message key is added and each message is routed to a random ## partition. ## ## ex: routing_key = "random" ## routing_key = "telegraf" # routing_key = "" ## CompressionCodec represents the various compression codecs recognized by ## Kafka in messages. ## 0 : No compression ## 1 : Gzip compression ## 2 : Snappy compression ## 3 : LZ4 compression # compression_codec = 0 ## RequiredAcks is used in Produce Requests to tell the broker how many ## replica acknowledgements it must see before responding ## 0 : the producer never waits for an acknowledgement from the broker. ## This option provides the lowest latency but the weakest durability ## guarantees (some data will be lost when a server fails). ## 1 : the producer gets an acknowledgement after the leader replica has ## received the data. This option provides better durability as the ## client waits until the server acknowledges the request as successful ## (only messages that were written to the now-dead leader but not yet ## replicated will be lost). ## -1: the producer gets an acknowledgement after all in-sync replicas have ## received the data. This option provides the best durability, we ## guarantee that no messages will be lost as long as at least one in ## sync replica remains. # required_acks = -1 ## The maximum number of times to retry sending a metric before failing ## until the next flush. # max_retry = 3 ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false ## Optional SASL Config # sasl_username = "kafka" # sasl_password = "secret" ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "json" # don't handle anything tagged with output_dest=elastic [outputs.elasticsearch.tagdrop] output_dest = ["elastic"] ############################################################################### # INPUT PLUGINS # ############################################################################### # # SFlow V5 Protocol Listener [[inputs.sflow]] ## Address to listen for sFlow packets. ## example: service_address = "udp://:6343" ## service_address = "udp4://:6343" ## service_address = "udp6://:6343" service_address = "udp://:6343" ## Set the size of the operating system's receive buffer. ## example: read_buffer_size = "64KiB" # read_buffer_size = "" [[inputs.kafka_consumer]] ## Kafka brokers. brokers = ["dev-rhel6-v6integ.mycom-osi.com:9092"] ## Topics to consume. topics = ["sFlow5-topic"] ## When set this tag will be added to all metrics with the topic as the value. # topic_tag = "" ## Optional Client id # client_id = "sflow5-telegraf" ## Set the minimal supported Kafka version. Setting this enables the use of new ## Kafka features and APIs. Must be 0.10.2.0 or greater. ## ex: version = "1.1.0" version = "0.11.0.0" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false ## SASL authentication credentials. These settings should typically be used ## with TLS encryption enabled using the "enable_tls" option. # sasl_username = "kafka" # sasl_password = "secret" ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 ## Name of the consumer group. consumer_group = "sflow_metrics_consumers" ## Initial offset position; one of "oldest" or "newest". # offset = "oldest" ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". # balance_strategy = "range" ## Maximum length of a message to consume, in bytes (default 0/unlimited); ## larger messages are dropped max_message_len = 1000000 ## Maximum messages to read from the broker that have not been written by an ## output. For best throughput set based on the number of metrics within ## each message and the size of the output's metric_batch_size. ## ## For example, if each message from the queue contains 10 metrics and the ## output metric_batch_size is 1000, setting this to 100 will ensure that a ## full batch is collected and the write is triggered immediately without ## waiting until the next flush_interval. # max_undelivered_messages = 1000 ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md #data_format = "influx" # add the output_dest=elastic tag [inputs.kafka_consumer.tags] output_dest = "elastic"