Hello!
I’ve been experiencing bugs with the monitor’s check and notification in a task. I created a task manually with a check of type threshold.
It doesn’t matter the threshold that I put in the trigger, it always triggers the notification. (I put if the numeric value surpasses 80 send a trigger, but it’s sending a notification even at value 0)
This is my current code:
import "strings"
import "regexp"
import "influxdata/influxdb/monitor"
import "influxdata/influxdb/schema"
import "pagerduty"
import "experimental"
option task = {name: "Perte equipement", every: 10m, offset: 1s}
option v = {timeRangeStart: -1m, timeRangeStop: now()}
notification = {
_notification_rule_id: "perte",
_notification_rule_name: "Perte Equipement",
_notification_endpoint_id: "",
_notification_endpoint_name: "",
}
check = {
_check_id: "0000000000000001",
_check_name: "High Packet Loss",
_type: "threshold",
tags: {},
}
messageFn = (r) =>
"Type d'alerte : Perte d'équipement
Equipement concerné : ${r.hostname}
Description : L'équipement ne répond plus au ping
Valeur: ${r.percent_packet_loss}"
Url =
from(bucket: "demo")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) => r["_measurement"] == "ping")
|> filter(fn: (r) => r["_field"] == "percent_packet_loss")
|> rename(columns: {url: "agent_host"})
|> aggregateWindow(every: 1m, fn: mean, createEmpty: false)
AgentHost =
from(bucket: "demo")
|> range(start: -1m)
|> filter(fn: (r) => r._measurement == "snmp" and r._field == "uptime")
|> map(fn: (r) => ({agent_host: r["agent_host"], hostname: r["hostname"]}))
task_data =
join(tables: {Url: Url, AgentHost: AgentHost}, on: ["agent_host"])
|> experimental.group(columns: ["hostname"], mode: "extend")
|> last()
|> aggregateWindow(every: 1m, fn: mean, createEmpty: false)
trigger = (r) => r["percent_packet_loss"] >= 80.0
task_data
|> schema["fieldsAsCols"]()
|> monitor["check"](data: check, messageFn: messageFn, crit: trigger)
|> monitor["notify"](
data: notification,
endpoint:
pagerduty["endpoint"]()(
mapFn: (r) =>
({
routingKey: "SUPERSECRETROUTINGKEY",
client: "influxdata",
clientURL: "http://influx.question.com/orgs/00000000000000000/alert-history",
class: r._check_name,
group: r["_source_measurement"],
severity: pagerduty["severityFromLevel"](level: "crit"),
eventAction: pagerduty["actionFromLevel"](level: "crit"),
source: notification["_notification_rule_name"],
summary: r["_message"],
timestamp: time(v: r["_source_timestamp"]),
}),
),
)
And I always receive the notification even though the value I’m monitoring (percent_packet_loss) doesn’t surpass the threshold.
Any help will be appreciated.
Things that I’ve tried:
- Change trigger’s value and importance.
- Change _type’s check to custom.
- Try to create the task from the notebook’s feature but because of the query the UI doesn’t let me export the alert task.