Commit f71415de authored by Mikaël Mourcia's avatar Mikaël Mourcia

Modify influxdb conf template to fit influxdb 1.0.x needs

parent 670a4a7e
Pipeline #632 skipped
### Welcome to the InfluxDB configuration file.
# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com
# The data includes raft id (random 8 bytes), os, arch, version, and metadata.
# We don't track ip addresses of servers reporting. This is only used
# to track the number of instances running and the versions, which
# is very helpful for us.
# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com
# The data includes a random ID, os, arch, version, the number of series and other
# usage data. No data from user databases is ever transmitted.
# Change this option to true to disable reporting.
reporting-disabled = false
......@@ -21,19 +19,17 @@ reporting-disabled = false
###
[meta]
# Controls if this node should run the metaservice and participate in the Raft group
enabled = true
# Where the metadata/raft database is stored
dir = "/var/lib/influxdb/meta"
bind-address = ":8088"
retention-autocreate = true
election-timeout = "1s"
heartbeat-timeout = "1s"
leader-lease-timeout = "500ms"
commit-timeout = "50ms"
cluster-tracing = false
# If log messages are printed for the meta service
logging-enabled = true
pprof-enabled = false
# The default duration for leases.
lease-duration = "1m0s"
###
### [data]
......@@ -50,36 +46,13 @@ reporting-disabled = false
dir = "/var/lib/influxdb/data"
# The following WAL settings are for the b1 storage engine used in 0.9.2. They won't
# apply to any new shards created after upgrading to a version > 0.9.3.
max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB.
wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush.
wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed.
# These are the WAL settings for the storage engine >= 0.9.3
wal-dir = "/var/lib/influxdb/wal"
wal-logging-enabled = true
data-logging-enabled = true
# When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to
# flush to the index
# wal-ready-series-size = 25600
# Flush and compact a partition once this ratio of series are over the ready size
# wal-compaction-threshold = 0.6
# Force a flush and compaction if any series in a partition gets above this size in bytes
# wal-max-series-size = 2097152
# Force a flush of all series and full compaction if there have been no writes in this
# amount of time. This is useful for ensuring that shards that are cold for writes don't
# keep a bunch of data cached in memory and in the WAL.
# wal-flush-cold-interval = "10m"
# Force a partition to flush its largest series if it reaches this approximate size in
# bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory.
# The more memory you have, the bigger this can be.
# wal-partition-size-threshold = 20971520
# Trace logging provides more verbose output around the tsm engine. Turning
# this on can provide more useful output for debugging tsm engine issues.
# trace-logging-enabled = false
# Whether queries should be logged before execution. Very useful for troubleshooting, but will
# log any sensitive data contained within a query.
......@@ -111,47 +84,23 @@ reporting-disabled = false
# MaxPointsPerBlock is the maximum number of points in an encoded
# block in a TSM file. Larger numbers may yield better compression
# but could incur a performance peanalty when querying
# but could incur a performance penalty when querying
# max-points-per-block = 1000
###
### [hinted-handoff]
###
### Controls the hinted handoff feature, which allows nodes to temporarily
### store queued data when one node of a cluster is down for a short period
### of time.
###
[hinted-handoff]
enabled = true
dir = "/var/lib/influxdb/hh"
max-size = 1073741824
max-age = "168h"
retry-rate-limit = 0
# Hinted handoff will start retrying writes to down nodes at a rate of once per second.
# If any error occurs, it will backoff in an exponential manner, until the interval
# reaches retry-max-interval. Once writes to all nodes are successfully completed the
# interval will reset to retry-interval.
retry-interval = "1s"
retry-max-interval = "1m"
# Interval between running checks for data that should be purged. Data is purged from
# hinted-handoff queues for two reasons. 1) The data is older than the max age, or
# 2) the target node has been dropped from the cluster. Data is never dropped until
# it has reached max-age however, for a dropped node or not.
purge-interval = "1h"
###
### [cluster]
### [coordinator]
###
### Controls non-Raft cluster behavior, which generally includes how data is
### shared across shards.
### Controls the clustering service configuration.
###
[cluster]
shard-writer-timeout = "5s" # The time within which a remote shard must respond to a write request.
write-timeout = "10s" # The time within which a write request must complete on the cluster.
[coordinator]
write-timeout = "10s"
max-concurrent-queries = 0
query-timeout = "0"
log-queries-after = "0"
max-select-point = 0
max-select-series = 0
max-select-buckets = 0
###
### [retention]
......@@ -213,12 +162,28 @@ reporting-disabled = false
[http]
enabled = true
bind-address = ":{{ cps_idb_port }}"
auth-enabled = true
auth-enabled = false
log-enabled = true
write-tracing = false
pprof-enabled = false
https-enabled = false
https-certificate = "/etc/ssl/influxdb.pem"
### Use a separate private key location.
# https-private-key = ""
max-row-limit = 10000
realm = "InfluxDB"
###
### [subsciber]
###
### Controls the subscriptions, which can be used to fork a copy of all data
### received by the InfluxDB host.
###
[subsciber]
enabled = true
http-timeout = "30s"
###
### [[graphite]]
......@@ -232,44 +197,40 @@ reporting-disabled = false
# bind-address = ":2003"
# protocol = "tcp"
# consistency-level = "one"
# name-separator = "."
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-size = 5000 # will flush if this many points get buffered
# batch-pending = 10 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
# udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
## "name-schema" configures tag names for parsing the metric name from graphite protocol;
## separated by `name-separator`.
## The "measurement" tag is special and the corresponding field will become
## the name of the metric.
## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0" as
## {
## measurement: "cpu",
## tags: {
## "type": "server",
## "host": "localhost,
## "device": "cpu0"
## }
## }
# name-schema = "type.host.measurement.device"
## If set to true, when the input metric name has more fields than `name-schema` specified,
## the extra fields will be ignored.
## Otherwise an error will be logged and the metric rejected.
# ignore-unnamed = true
### This string joins multiple matching 'measurement' values providing more control over the final measurement name.
# separator = "."
### Default tags that will be added to all metrics. These can be overridden at the template level
### or by tags extracted from metric
# tags = ["region=us-east", "zone=1c"]
### Each template line requires a template pattern. It can have an optional
### filter before the template and separated by spaces. It can also have optional extra
### tags following the template. Multiple tags should be separated by commas and no spaces
### similar to the line protocol format. There can be only one default template.
# templates = [
# "*.app env.service.resource.measurement",
# # Default template
# "server.*",
# ]
###
### [collectd]
###
### Controls the listener for collectd data.
### Controls one or many listeners for collectd data.
###
[collectd]
[[collectd]]
enabled = false
# bind-address = ""
# database = ""
......@@ -287,10 +248,10 @@ reporting-disabled = false
###
### [opentsdb]
###
### Controls the listener for OpenTSDB data.
### Controls one or many listeners for OpenTSDB data.
###
[opentsdb]
[[opentsdb]]
enabled = false
# bind-address = ":4242"
# database = "opentsdb"
......@@ -324,10 +285,10 @@ reporting-disabled = false
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
batch-size = 1000 # will flush if this many points get buffered
batch-pending = 5 # number of batches that may be pending in memory
batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
# read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
# set the expected UDP payload size; lower values tend to yield better performance, default is max UDP size 65536
# udp-payload-size = 65536
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment