Hello
I’ll start off by describing what I want to achieve, I want to write a python script which calls the API of ethermine.org, then that data must be input into influxdb using telegraf and then displayed on grafana. I got the idea from Mining | LiteFoote , however I am now at a point where I am completely stuck and even after hours of googling I do not know how to continue.
I have influx,grafana and telegraf installed on an ec2 AWS instance. I set up influx, grafana and telegraf, will put my telegraf config below. I have my config stored at the default location /etc/telegraf/telegraf.conf (deleted what was inside the file and replaced it with my own text).
I tried testing the config like they did on the litefoote website however I get permission errors.
I tried running telegraf --config /etc/telegraf/telegraf.conf --input-filter exec --debug
or with --test , and I get the error [inputs.exec] Error in plugin: exec: fork/exec /home/ubuntu/main.py: permission denied for command ‘/home/ubuntu/main.py’:
So I tried googling and thus I tried running
sudo chown telegraf:telegraf /home/ubuntu/main.py
and chown telegraf main.py
, but none worked so I am not sure what to attempt now.
Here is a screenshot of the litefoot website
I tried ignoring the permission error and just went to setting up grafana, but I do not get any data displayed(I may have set up the from field wrong as I think my setup is slightly different to the litefoot website, but I am not sure)
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
## output, and will flush this buffer on a successful write. Oldest metrics
## are dropped first when this buffer fills.
## This buffer only fills when writes fail to output plugin(s).
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Logging configuration:
## Run telegraf with debug log messages.
debug = false
## Run telegraf in quiet mode (error log messages only).
quiet = false
## Specify the log file name. The empty string means to log to stderr.
logfile = ""
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
[[outputs.influxdb_v2]]
## The URLs of the InfluxDB cluster nodes.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
## urls exp: http://127.0.0.1:8086
urls = ["http://13.244.97.239:8086"]
## Token for authentication.
token = "$INFLUX_TOKEN"
## Organization is the name of the organization you wish to write to; must exist.
organization = "altus"
## Destination bucket to write into.
bucket = "altus"
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## If true, collect raw CPU time metrics.
collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states.
report_active = false
[[inputs.disk]]
## By default stats will be gathered for all mount points.
## Set mount_points will restrict the stats to only the specified mount points.
# mount_points = ["/"]
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
[[inputs.diskio]]
[[inputs.mem]]
[[inputs.net]]
[[inputs.processes]]
[[inputs.swap]]
[[inputs.system]]
[[inputs.exec]]
commands = ["/home/ubuntu/main.py"]
data_format = "json"
interval = "120s"
name_suffix = "-ethermine"