Hmm @mtr,
You can definitely write it to InfluxDB.
I’m not really sure I understand what you mean, but imma try and give it a go.
You could maybe use stateDuration() to calculate the request duration?
Here’s my failed attempt at getting that data into the shape expected by the UI for histograms:
import "join"
import "experimental"
import "strings"
import "array"
import "regexp"
//b0_bound=0.25,b0_count=178,b1_bound=0.5,b1_count=98
// if your data above was written as fields your output would look like this
// after you filter for all of those fields.
data = array.from(rows: [
{"_time": 2022-06-17T17:52:18.681Z, "_field": "b0_bound", "_value": 0.25},
{"_time": 2022-06-17T17:52:18.681Z, "_field": "b0_count", "_value": 178.0},
{"_time": 2022-06-17T17:52:18.681Z, "_field": "b1_bound", "_value": 0.5},
{"_time": 2022-06-17T17:52:18.681Z, "_field": "b1_count", "_value": 98.0},
{"_time": 2022-06-18T17:52:18.681Z, "_field": "b0_bound", "_value": 0.25},
{"_time": 2022-06-18T17:52:18.681Z, "_field": "b0_count", "_value": 179.0},
{"_time": 2022-06-18T17:52:18.681Z, "_field": "b1_bound", "_value": 0.5},
{"_time": 2022-06-18T17:52:18.681Z, "_field": "b1_count", "_value": 99.0},
])
|> group(columns: ["_field", "_time"], mode:"by")
sum = data
|> filter( fn: (r) => r._field =~ regexp.compile(v: "count"))
|> group(columns: ["_field"], mode:"by")
|> map(fn: (r) => ({ r with index: 1.0 }))
|> cumulativeSum(columns: ["index"])
|> drop(columns: ["_time"])
|> yield(name: "sum")
bounds = data
|> filter( fn: (r) => r._field =~ regexp.compile(v: "bound"))
|> set(key: "_field", value: "le" )
|> map(fn: (r) => ({ r with index: 1.0 }))
|> cumulativeSum(columns: ["index"])
|> yield(name: "bounds")
join(
tables: {bounds: bounds, sum: sum},
on: ["index"],
method: "inner",
)
|> yield(name: "join")