How to use a scalar value with a column of data

Hi I have the following variables in one query:

import "math"

latencyAverage = from(bucket:"telegraf")
    |> range(start: v.timeRangeStart, stop: v.timeRangeStop )
    |> filter(fn: (r) => r.host == "PRG-Vmontes-VElisa")
    |> filter(fn: (r) => r._measurement == "nping_UDP")
    |> filter(fn: (r) => r["nping-dest"] == "qt-brazil.tut.systems")
    |> filter(fn: (r) => (r._field == "nping-avg"))
    |> mean()
    |> first()
    |> rename(columns: { "_value": "latencyAverage_values" })
    |> yield(name:"latencyAverage")

jitter = from(bucket:"telegraf")
    |> range(start: v.timeRangeStart, stop: v.timeRangeStop )
    |> filter(fn: (r) => r.host == "PRG-Vmontes-VElisa")
    |> filter(fn: (r) => r._measurement == "nping_UDP")
    |> filter(fn: (r) => r["nping-dest"] == "qt-brazil.tut.systems")
    |> filter(fn: (r) => (r._field == "nping-avg") or (r._field == "nping-pl_prct"), onEmpty: "keep")
    |> map(fn: (r) => ({r with latency_values:  float(v:r._value)}))
    
    |> map(fn: (r) => ({r with packetLoss_values:  float(v:r["nping-pl_prct"])}))
    |> filter(fn: (r) => r.latency_values <= 49 or r.packetLoss_values <=1)
    |> map(fn: (r) => ({r with latency_values: float(v: r["nping-avg"]), 
                            percentage_values: float(v: r["nping-pl_prct"])}))
    |> aggregateWindow(every:5m, fn: mean, createEmpty: true)    
    |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
    |> map(fn: (r) => ({r with finalResult: r["nping-avg"]/1.0}))
    |> yield(name: "jitter")

latencyAverage returns this:

and jitter returns this:

what i cannot do is use the value from latencyAverage in the map inside the jitter equation:
I tried:

    |> map(fn: (r) => ({r with finalResult: r["nping-avg"]/latencyAverage}))

I also tried:

    |> map(fn: (r) => ({r with finalResult: r["nping-avg"]/latencyAverage.latencyAverage_values}))

When I do it, I get no result and sometimes timeouts.

what I am basically trying to ask, is how can i use that latencyAverage _value in this second variable jitter?

I cannot use join because it loads the cpu to 200% when I try to query the whole example.

I am sharing that working code here just to show that JOIN is something I tried but is not efficient

import "math"

tigoLatency = from(bucket:"telegraf")
    |> range(start: v.timeRangeStart, stop: v.timeRangeStop )
    //|> filter(fn: (r) => r.host== "PRG-Vmontes-VElisa" )
    //|> filter(fn: (r) => r.host== "${ISP}" )
    |> filter(fn: (r) => r._measurement == "nping_UDP")
    |> filter(fn: (r) => r["nping-dest"] == "qt-brazil.tut.systems")
    |> filter(fn: (r) => r._field == "nping-avg" )
    |> map(fn: (r) => ({r with latency_values:  float(v:r._value)}))
    |> filter(fn: (r) => r.latency_values < 120)
    |> toFloat()
    |> aggregateWindow(every: 30m, fn: count)
    |> group()
    //|> group(columns: ["host"])
    |> rename(columns: { "_value": "latency_values" })
    |> set(key: "status", value:"latency_column")
    //|> yield (name:"tigoLatency")

tigoPacketLoss = from(bucket:"telegraf")
    |> range(start: v.timeRangeStart, stop: v.timeRangeStop )
    //|> filter(fn: (r) => r.host== "PRG-Vmontes-VElisa" )
    //|> filter(fn: (r) => r.host== "${ISP}" )
    |> filter(fn: (r) => r._measurement == "nping_UDP")
    |> filter(fn: (r) => r["nping-dest"] == "qt-brazil.tut.systems")
    |> filter(fn: (r) => r._field == "nping-pl_prct" )
    |> map(fn: (r) => ({r with packetLoss_values: r._value }))
    |> filter(fn: (r) => r.packetLoss_values <= 1)  
    |> toFloat()
    |> aggregateWindow(every: 30m, fn: count)
    |> group()
    //|> group(columns: ["host"])
    |> rename(columns: { "_value": "packetLoss_values" })
    |> set(key: "status", value:"packetLoss_column")
    //|> yield (name:"tigoPacketLoss")

tigoSamples = from(bucket:"telegraf")
    |> range(start: v.timeRangeStart, stop: v.timeRangeStop )
    |> filter(fn: (r) => r._measurement == "nping_UDP")
    |> filter(fn: (r) => r["nping-dest"] == "qt-brazil.tut.systems")
    |> filter(fn: (r) => r._field == "nping-pl_prct" )
    |> aggregateWindow(every: 30m, fn: count)
    |> group(columns:["_time"])
    |> group()
    |> map(fn: (r) => ({r with samples: float(v: r._value) }))
    |> set(key: "status", value:"sampleColumn")
    //|> sort(columns: ["_time"], desc: false)
    //|> limit(n: 10000 , offset: 1)    
    //|> yield (name:"tigoSamples")

//######################////////////////// JITTER /
entryA = from(bucket:"telegraf")
    |> range(start: v.timeRangeStart, stop: v.timeRangeStop )
    //|> filter(fn: (r) => r.host== "PRG-Vmontes-VElisa" )
    //|> filter(fn: (r) => r.host== "${ISP}" )
    |> filter(fn: (r) => r["nping-dest"] == "qt-brazil.tut.systems")
    |> filter(fn: (r) => r._measurement == "nping_UDP" and r._field == "nping-avg")
    |> aggregateWindow(every: 5m, fn: mean)
    |> set(key: "status", value:"present")
    //|> yield(name: "A")

entryB = from(bucket:"telegraf")
    |> range(start: v.timeRangeStart, stop: v.timeRangeStop )
    //|> filter(fn: (r) => r.host== "PRG-Vmontes-VElisa" )
    //|> filter(fn: (r) => r.host== "${ISP}" )
    |> filter(fn: (r) => r["nping-dest"] == "qt-brazil.tut.systems")
    |> filter(fn: (r) => r._measurement == "nping_UDP" and r._field == "nping-avg")
    |> aggregateWindow(every: 5m, fn: mean)
    |> timeShift(duration: -5m, columns: ["_time"])
    |> set(key: "status", value:"future")
    //|> yield(name: "B")

final = join(
    tables: {present: entryA, future: entryB},
    on: ["_time", "host"],
    method: "inner"
    )
    //|> group()
    |> map(fn: (r) => ({r with _value: float(v: r._value_future - r._value_present) }))
    |> filter(fn: (r) => (r._value > -100 )) 
    |> map(fn: (r) => ({r with _value: math.abs(x: r._value ) }))
    |> filter(fn: (r) => (r._value < 5 )) 
    |> aggregateWindow(every: 30m, fn: count)
    //|> group(columns: ["host"])
    //|> group()
    |> map(fn: (r) => ({r with jitter_values: float(v: r._value) }))
    //|> yield(name: "final_jitter")    

// ##################### ISP QUALITY #######################

ispLatPl = join(
    tables: {latency_values: tigoLatency, packetLoss_values: tigoPacketLoss},
    on: ["_time", "host"],
    method: "inner"
  )
  |> map(fn: (r) => ({ r with latency_packetLoss_values: float(v: r.latency_values + r.packetLoss_values) }))
  |> set(key: "status", value:"latency_packetLoss_column")
  //|> yield(name: "latency_packetLoss_table")

ispLatPLJitter = join(
    tables: {latency_packetLoss_values: ispLatPl, jitter_values: final},
    on: ["_time", "host"],
    method: "inner"
    )
    |> group()
    |> map(fn: (r) => ({ r with latency_packetLoss_jitter: float(v: r.latency_packetLoss_values + r.jitter_values) }))
    |> set(key: "status", value:"latency_packetLoss_jitter_column")
    //|> drop(columns: ["_start_latency_values", "_stop_latency_values", "_start_packetLoss_values", "_stop_packetLoss_values",
     //   "host_latency_values", "host_packetLoss_values", "name_latency_values", "name_packetLoss_values",
       // "type_latency_values", "type_packetLoss_values", "url_latency_values", "url_packetLoss_values",
       // "status_latency_values", "status_packetLoss_values", "status_jitter_values", "status_latency_packetLoss_values",
        //"_field_latency_values", "_field_packetLoss_values", "type", "url", "_measurement_latency_values", 
       // "_measurement_packetLoss_values", "_measurement", "name" ])
    //|> yield(name: "latency_packetLoss_jitter_table")

ispQuality_1  = ispLatPLJitter   
    |> sort(columns: ["_time"], desc: false)
    |> limit(n: 10000 , offset: 1)    
    |> sort(columns:["host"])
    |> toFloat()
    |> set(key: "status", value:"isp_quality_1_column")
    //|> yield(name: "ispQuality_1")

ispQuality = join(
    tables: {latency_packetLoss_jitter: ispQuality_1, samples: tigoSamples},
    on: ["_time", "host"],
    method: "inner"
    )
    |> map(fn: (r) => ({r with result: ( if r.samples == 0.0 then 0.0 else r.latency_packetLoss_jitter / (r.samples*3.0)) }))
    |> drop(columns: ["url", "type", "name", "host_samples", "_measurement",
        "_field_samples", "_field_latency_packetLoss_jitter", "_start_latency_packetLoss_jitter", "_start_samples",
        "_stop_latency_packetLoss_jitter", "_stop_samples", "status_latency_packetLoss_jitter", "status_samples",
        "latency_values", "jitter_values", "latency_packetLoss_values", "packetLoss_values", "_value",
        "_time_latency_packetLoss_jitter", "_field", "status", "_start_future", "_start_latency_values",
        "_start_packetLoss_values", "_start_present", "_stop_future", "_stop_latency_values", "_stop_packetLoss_values",
        "_stop_present", "_field_future", "_field_latency_values", "_field_packetLoss_values", "_field_present",
        "_measurement_future", "_measurement_latency_values", "_measurement_packetLoss_values", "_measurement_present",
        "name_future", "name_latency_values", "name_packetLoss_values", "name_present", "status_latency_values",
        "status_packetLoss_values", "type_future", "type_latency_values", "type_packetLoss_values", "type_present",
        "url_future", "url_latency_values", "url_packetLoss_values", "url_present",
        "latency_packetLoss_jitter", "samples", "_value_samples", "_value_latency_packetLoss_jitter",
        "nping-dest_latency_values", "nping-dest_packetLoss_values", "nping-dest_future", "nping-dest_present",
        "nping-dest", "status"])
    |> set(key: "status", value:"isp_quality_column")
    |> group(columns: ["host"])
    |> rename(columns: {"result": " "})
    |> yield(name: "ispQuality")

could anyone help me please?

Hello @Sergio_B,
You want to use the following function to extract the scalar:

For example:

import "experimental/array"
 
data = array.from(rows: [{_time: 2020-01-01T00:00:00Z, mytag: "t0", _field: "f0", _value: 1.0},
{_time: 2020-01-01T00:00:00Z, mytag: "t1", _field: "f1", _value: 1.5},
{_time: 2020-01-02T00:00:00Z, mytag: "t0", _field: "f0", _value: 2.0},
{_time: 2020-01-02T00:00:00Z, mytag: "t1", _field: "f1", _value: 2.5}])
|> group(columns: ["mytag"], mode:"by")
|> filter(fn: (r) => r.mytag == "t0")
 
 
x = data |> findRecord(fn: (key) => true, idx: 0)
data |> group() |> limit(n:1, offset: 0) |> map(fn: (r) => ({ r with myRecord: x._value}))

thank you very much, I was able to do the following:


meanLatency = from(bucket:"tigo-bucket")
    |> range(start: v.timeRangeStart, stop: v.timeRangeStop )
    |> filter(fn: (r) =>r["_measurement"] == "ping_geo")
    |> filter(fn: (r) => r["_field"] == "avg_lat")
    |> filter(fn: (r) => r["cmts"] == "CMTS")
    |> filter(fn: (r) => r["code"] == "LPZ")
    |> filter(fn: (r) => (r["dest"] == "10.136.240.17") or ((r["dest"] == "10.136.240.8") or (r["dest"] == "10.143.240.4") or (r["dest"] == "10.143.240.8") or (r["dest"] == "10.128.11.99") or (r["dest"] == "10.136.240.9"))) 
    |> filter(fn: (r) => r["host"] == "homequaprdapp")
    //|> hourSelection(start: 23, stop: 2)
    |> mean()
    |> map(fn: (r) => ({r with _value: float(v: r._value) }))
    //|> yield (name:"sergio")

meanLatencyValue = meanLatency
    |> findColumn(
        fn: (key) => key._field == "avg_lat" ,
        column: "_value",
    )