Raspberry Pi2 delay in writing from Influxdb to Grafana

I’m having a 42 minutes delay when wirting form influxdb to grafana on a raspberry pi. I have tried different setting but nothing is working for me.

Thanks for the help.

This is what it looks to me on grafana.

This is my influxdb.config

### Welcome to the InfluxDB configuration file.

# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com
# The data includes raft id (random 8 bytes), os, arch, version, and metadata.
# We don't track ip addresses of servers reporting. This is only used
# to track the number of instances running and the versions, which
# is very helpful for us.
# Change this option to true to disable reporting.
reporting-disabled = false

###
### [meta]
###
### Controls the parameters for the Raft consensus group that stores metadata
### about the InfluxDB cluster.
###

[meta]
  dir = "/var/lib/influxdb/meta"
  hostname = "localhost"
  bind-address = ":8088"
  retention-autocreate = true
  election-timeout = "1s"
  heartbeat-timeout = "1s"
  leader-lease-timeout = "500ms"
  commit-timeout = "50ms"
  cluster-tracing = false

  # If enabled, when a Raft cluster loses a peer due to a `DROP SERVER` command,
  # the leader will automatically ask a non-raft peer node to promote to a raft
  # peer. This only happens if there is a non-raft peer node available to promote.
  # This setting only affects the local node, so to ensure if operates correctly, be sure to set
  # it in the config of every node.
  # raft-promotion-enabled = true

###
### [data]
###
### Controls where the actual shard data for InfluxDB lives and how it is
### flushed from the WAL. "dir" may need to be changed to a suitable place
### for your system, but the WAL settings are an advanced configuration. The
### defaults should work for most systems.
###

[data]
  dir = "/var/lib/influxdb/data"

  # Controls the storage engine used for new shards. Engines available are b1,
  # bz1, and tsm1. b1 was the original default engine from 0.9.0 to 0.9.2. bz1
  # has been the default engine since 0.9.3. tsm1 was introduced in 0.9.5 and is
  # currently EXPERIMENTAL. Consequently, data written into the tsm1 engine may
  # need to be wiped between upgrades.
   engine ="tsm1"

  # The following WAL settings are for the b1 storage engine used in 0.9.2. They won't
  # apply to any new shards created after upgrading to a version > 0.9.3.
  # max-wal-size = 104857600
  # max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB.
  # wal-flush-interval = "10m0s" # Maximum time data can sit in WAL before a flush.
  # wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed.

  # These are the WAL settings for the storage engine >= 0.9.3
  wal-dir = "/var/lib/influxdb/wal"
  wal-logging-enabled = true
  data-logging-enabled = false

  # When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to
  # flush to the index
  # wal-ready-series-size = 25600

  # Flush and compact a partition once this ratio of series are over the ready size
  # wal-compaction-threshold = 0.6

  # Force a flush and compaction if any series in a partition gets above this size in bytes
  # wal-max-series-size = 2097152

  # Force a flush of all series and full compaction if there have been no writes in this
  # amount of time. This is useful for ensuring that shards that are cold for writes don't
  # keep a bunch of data cached in memory and in the WAL.
  # wal-flush-cold-interval = "10m"

  # Force a partition to flush its largest series if it reaches this approximate size in
  # bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory.
  # The more memory you have, the bigger this can be.
  # wal-partition-size-threshold = 20971520

  # Whether queries should be logged before execution. Very useful for troubleshooting, but will
  # log any sensitive data contained within a query.
  query-log-enabled = false

  # Settings for the TSM engine

  # CacheMaxMemorySize is the maximum size a shard's cache can
  # reach before it starts rejecting writes.
  cache-max-memory-size = 524288000

  # CacheSnapshotMemorySize is the size at which the engine will
  # snapshot the cache and write it to a TSM file, freeing up memory
  cache-snapshot-memory-size = 26214400

  # CacheSnapshotWriteColdDuration is the length of time at
  # which the engine will snapshot the cache and write it to
  # a new TSM file if the shard hasn't received writes or deletes
  cache-snapshot-write-cold-duration = "1h"

  # MinCompactionFileCount is the minimum number of TSM files
  # that need to exist before a compaction cycle will run
  compact-min-file-count = 3

  # CompactFullWriteColdDuration is the duration at which the engine
  # will compact all TSM files in a shard if it hasn't received a
  # write or delete
  compact-full-write-cold-duration = "24h"

  # MaxPointsPerBlock is the maximum number of points in an encoded
  # block in a TSM file. Larger numbers may yield better compression
  # but could incur a performance peanalty when querying
  max-points-per-block = 0

###
### [hinted-handoff]
###
### Controls the hinted handoff feature, which allows nodes to temporarily
### store queued data when one node of a cluster is down for a short period
### of time.
###

[hinted-handoff]
	#  enabled = true
	#  dir = "/var/lib/influxdb/hh"
	#  max-size = 1073741824
	#  max-age = "168h"
	#  retry-rate-limit = 0

  # Hinted handoff will start retrying writes to down nodes at a rate of once per second.
  # If any error occurs, it will backoff in an exponential manner, until the interval
  # reaches retry-max-interval. Once writes to all nodes are successfully completed the
  # interval will reset to retry-interval.
 	# retry-interval = "1s"
  	# retry-max-interval = "1m"

  # Interval between running checks for data that should be purged. Data is purged from
  # hinted-handoff queues for two reasons. 1) The data is older than the max age, or
  # 2) the target node has been dropped from the cluster. Data is never dropped until
  # it has reached max-age however, for a dropped node or not.
  # purge-interval = "1h"

###
### [cluster]
###
### Controls non-Raft cluster behavior, which generally includes how data is
### shared across shards.
###

[cluster]
  # shard-writer-timeout = "5s" # The time within which a remote shard must respond to a write request. 
  # write-timeout = "10s" # The time within which a write request must complete on the cluster.

###
### [retention]
###
### Controls the enforcement of retention policies for evicting old data.
###

[retention]
enabled = true
check-interval = "10m"
advanced-period = "30m"

###
### [shard-precreation]
###
### Controls the precreation of shards, so they are available before data arrives.
### Only shards that, after creation, will have both a start- and end-time in the
### future, will ever be created. Shards are never precreated that would be wholly
### or partially in the past.

[shard-precreation]
 enabled = true
 check-interval = "5m"
 advance-period = "10m"

###
### Controls the system self-monitoring, statistics and diagnostics.
###
### The internal database for monitoring data is created automatically if
### if it does not already exist. The target retention within this database
### is called 'monitor' and is also created with a retention period of 7 days
### and a replication factor of 1, if it does not exist. In all cases the
### this retention policy is configured as the default for the database.

[monitor]
#  store-enabled = true # Whether to record statistics internally.
#  store-database = "_internal" # The destination database for recorded statistics
#  store-interval = "10s" # The interval at which to record statistics 
  

###
### [admin]
###
### Controls the availability of the built-in, web-based admin interface. If HTTPS is
### enabled for the admin interface, HTTPS must also be enabled on the [http] service.
###

[admin]
  enabled = true
  bind-address = ":8083"
  https-enabled = false
  https-certificate = "/etc/ssl/influxdb.pem"

###
### [http]
###
### Controls how the HTTP endpoints are configured. These are the primary
### mechanism for getting data into and out of InfluxDB.
###

[http]
  enabled = true
  bind-address = ":8086"
  auth-enabled = false
  log-enabled = false
  write-tracing = false
  pprof-enabled = false
  https-enabled = false
  https-certificate = "/etc/ssl/influxdb.pem"

###
### [[graphite]]
###
### Controls one or many listeners for Graphite data.
###

[[graphite]]
  enabled = false
  # database = "graphite"
  # bind-address = ":2003"
  # protocol = "tcp"
  # consistency-level = "one"
  # name-separator = "."

  # These next lines control how batching works. You should have this enabled
  # otherwise you could get dropped metrics or poor performance. Batching
  # will buffer points in memory if you have many coming in.

  # batch-size = 1000 # will flush if this many points get buffered
  # batch-pending = 5 # number of batches that may be pending in memory
  # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
  # udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.

  ## "name-schema" configures tag names for parsing the metric name from graphite protocol;
  ## separated by `name-separator`.
  ## The "measurement" tag is special and the corresponding field will become
  ## the name of the metric.
  ## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0" as
  ## {
  ##     measurement: "cpu",
  ##     tags: {
  ##         "type": "server",
  ##         "host": "localhost,
  ##         "device": "cpu0"
  ##     }
  ## }
  # name-schema = "type.host.measurement.device"

  ## If set to true, when the input metric name has more fields than `name-schema` specified,
  ## the extra fields will be ignored.
  ## Otherwise an error will be logged and the metric rejected.
  # ignore-unnamed = true

###
### [collectd]
###
### Controls the listener for collectd data.
###

[collectd]
  enabled = false
  # bind-address = ""
  # database = ""
  # typesdb = ""

  # These next lines control how batching works. You should have this enabled
  # otherwise you could get dropped metrics or poor performance. Batching
  # will buffer points in memory if you have many coming in.

  # batch-size = 1000 # will flush if this many points get buffered
  # batch-pending = 5 # number of batches that may be pending in memory
  # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
  # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.

###
### [opentsdb]
###
### Controls the listener for OpenTSDB data.
###

[opentsdb]
  enabled = false
  # bind-address = ":4242"
  # database = "opentsdb"
  # retention-policy = ""
  # consistency-level = "one"
  # tls-enabled = false
  # certificate= ""
  # log-point-errors = true # Log an error for every malformed point.

  # These next lines control how batching works. You should have this enabled
  # otherwise you could get dropped metrics or poor performance. Only points
  # metrics received over the telnet protocol undergo batching.

  # batch-size = 1000 # will flush if this many points get buffered
  # batch-pending = 5 # number of batches that may be pending in memory
  # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit

###
### [[udp]]
###
### Controls the listeners for InfluxDB line protocol data via UDP.
###

[[udp]]
  enabled = false
  # bind-address = ""
  # database = "udp"
  # retention-policy = ""

  # These next lines control how batching works. You should have this enabled
  # otherwise you could get dropped metrics or poor performance. Batching
  # will buffer points in memory if you have many coming in.

  # batch-size = 1000 # will flush if this many points get buffered
  # batch-pending = 5 # number of batches that may be pending in memory
  # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
  # read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
 
  # set the expected UDP payload size; lower values tend to yield better performance, default is max UDP size 65536
  # udp-payload-size = 65536

###
### [continuous_queries]
###
### Controls how continuous queries are run within InfluxDB.
###

[continuous_queries]
  log-enabled = false
  # enabled = true
  # run-interval = "1s" # interval for how often continuous queries will be checked if they need to run