Sorry It’s a quite too long so I will put only the part of the code and the error.
import streamlit as st
import pandas as pd
from PIL import Image
import pages.productOverview
import pages.productionData
from utils import * #from utils.Product import *
from influxdb_client import InfluxDBClient, Point, WriteOptions
org = "Cebi"
bucket = "POC"
token = "T3DsWbLlhb69htTLQzOcPIAyDsZH-n03tHHuQOEzQXN80-zH5nxC032FtAZSJJ-oekZXZYCeKHnaDX5ZXGwNRw=="
#query2 = 'from(bucket: "Fifo")|> range(start: -2d)|> filter(fn: (r) => (r._measurement == "Fifo_int") and (r._Produit_fini == "27500980" )'
#establish a connection
client = InfluxDBClient(url="http://localhost:8086", token=token, org=org)
#instantiate the WriteAPI and QueryAPI
query_api = client.query_api()
PAGES = {
"Product overview": pages.productOverview,
"Production data": pages.productionData
}
DATE_COLUMN = 'date'
DATA_URL = ('data/2020_use_2.csv')
DATA_URL_EXTERNAL_MOV = 'data/FIF_EXTERNES.csv' #External Movement in 2020 => to make it dynamic
DATA_URL_INTERNAL_MOV = 'data/FIF_INTERNES.csv' #Internal Movement in 2020 => to make it dynamic
st.markdown(
f"""
<style>
.reportview-container .main .block-container{{
max-width: 1200px;
}}
</style>
""",
unsafe_allow_html=True,
)
@st.cache(show_spinner=False)
def load_data_external_mov():
data = pd.read_csv(DATA_URL_EXTERNAL_MOV, sep=";", header=0)#, encoding='utf-8' nrows=nrows,
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis='columns', inplace=True)
#data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])
return data
@st.cache(show_spinner=False)
def load_data_internal_mov():
data = pd.read_csv(DATA_URL_INTERNAL_MOV, sep=";", header=0)#, encoding='utf-8' nrows=nrows,
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis='columns', inplace=True)
#data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])
return data
def load_data(nrows):
data = pd.read_csv(DATA_URL, nrows=nrows, sep=";", header=0)#, encoding='utf-8'
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis='columns', inplace=True)
#data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])
return data
def get_Cebi_line(ref,VW_line_code):
lineCode = {
'26904394': ("69742","69732"),
'26904395': ("99999","99998")
}
a=lineCode.get(ref, "Invalid line code and/or reference")
return a[int(VW_line_code)]
def decodeVW(DMC):
res=[]
yearCode = {'0': "2017",'1': "2018",'2': "2019",'3': "2020",'4': "2021",'5': "2022",'6': "2023",'7': "2024",'8': "2025",'9': "2026",
'A': "2027",'B': "2028",'C': "2029"
}
#monthCode = {
# '0': "January",'1': "February",'2': "March",'3': "April",'4': "May",'5': "June",'6': "July",'7': "August",'8': "September",'9': "October",
# 'A': "November",'B': "December"
#}
monthCode = {
'0': "01",'1': "02",'2': "03",'3': "04",'4': "05",'5': "06",'6': "07",'7': "08",'8': "09",'9': "10",
'A': "11",'B': "12"
}
dayCode = {
'1': "1",'2': "2",'3': "3",'4': "4",'5': "5",'6': "6",'7': "7",'8': "8",'9': "9",'A': "10",'B': "11",'C': "12",'D': "13",'E': "14",
'F': "15",'G': "16",'H': "17",'I': "18",'J': "19",'K': "20",'L': "21",'M': "22",'N': "23",'O': "24",'P': "25",'Q': "26",'R': "27",
'S': "28",'T': "29",'U': "30",'V': "31"
}
#lineCode = {
# '1': "1",'2': "2",'3': "3",'4': "4",'5': "5",'6': "6",'7': "7",'8': "8",'9': "9",'A': "10",'B': "11",'C': "12",'D': "13",'E': "14",
# 'F': "15",'G': "16",'H': "17",'I': "18",'J': "19",'K': "20",'L': "21",'M': "22",'N': "23",'O': "24",'P': "25",'Q': "26",'R': "27",
# 'S': "28",'T': "29",'U': "30",'V': "31",'W': "32",'X': "33",'Y': "34",'Z': "35"
#}
lineCode = {
'0': "0",'1': "1",'2': "2",'3': "3",'4': "4",'5': "5",'6': "6",'7': "7",'8': "8",'9': "9",'A': "10",'B': "11",'C': "12",'D': "13",'E': "14",
'F': "15",'G': "16",'H': "17",'I': "18",'J': "19",'K': "20",'L': "21",'M': "22",'N': "23",'O': "24",'P': "25",'Q': "26",'R': "27",
'S': "28",'T': "29",'U': "30",'V': "31",'W': "32",'X': "33",'Y': "34",'Z': "35"
}
#yearCode.get(DMC[1], "Invalid year")
res.append(yearCode.get(DMC[0], "Invalid year"))
res.append(monthCode.get(DMC[1], "Invalid month"))
res.append(dayCode.get(DMC[2], "Invalid day"))
res.append(DMC[3:6])
res.append(lineCode.get(DMC[6], "Invalid line"))
return res
def decode_Piece(PieceNo):
if (PieceNo >= 1) and (PieceNo <= 9) :
{
'69732': ("69731", "69730"),
'75481': ("75480"),
'69704': ("69703", "69702", "69701", "69700")
}
return line.get(markingLine, "No information for this line yet!")
def get_Cebi_previous_lines(markingLine):
line = {
'69732': ("69731", "69730"),
'75481': ("75480"),
'69704': ("69703", "69702", "69701", "69700")
}
return line.get(markingLine, "No information for this line yet!")
def get_BOM(data, dateProd, machineNb):
filtered_data=data[data['machine_nb']==machineNb]
filtered_data2=filtered_data[filtered_data['date_material_use']<int(dateProd)]
list_articlenb = filtered_data.articlenb.value_counts(dropna=True)
#st.write(list_articlenb)
list_date = []
list_fifo_uniq = []
list_hour = []
list_machine = []
for x in list_articlenb.index:
#st.write(x)
x_df = filtered_data2[filtered_data2['articlenb']==x]
dateSorted = x_df.sort_values(by=['date_material_use','timestamp'], ascending=False)
#st.write(dateSorted)
last_fifos= dateSorted[dateSorted['date_material_use']==dateSorted['date_material_use'].iloc[0]]
last_fifos_unique=last_fifos.fifo.unique()
if (last_fifos_unique.size==1):
#st.write("ref", x, "last use", dateSorted['date_material_use'].iloc[0], "-", dateSorted['timestamp'].iloc[0], "fifo", last_fifos_unique[0])
list_date.append(dateSorted['date_material_use'].iloc[0])
list_hour.append(dateSorted['timestamp'].iloc[0])
list_machine.append(dateSorted['machine_nb'].iloc[0])
list_fifo_uniq.append(last_fifos_unique[0])
else:
st.write("be careful : several fifo number for this ref - please contact the dev'")
st.write("ref", x, "last use", dateSorted['date_material_use'].iloc[0], "-", dateSorted['timestamp'].iloc[0], "fifo", last_fifos_unique[0])
#st.write(last_fifos_unique)
pd_values=list_articlenb.to_frame()
pd_values.insert(1, "last date of use", list_date, True)
pd_values.insert(2, "last hour of use", list_hour, True)
pd_values.insert(3, "machine_nb", list_machine, True)
pd_values.insert(4, "last fifo of use", list_fifo_uniq, True)
pd_values.reset_index(level=0, inplace=True)
pd_values.rename(columns={'articlenb':'number in the data','index':'reference'}, inplace=True)
return pd_values[['reference','last date of use','last hour of use','machine_nb','last fifo of use']]
def get_unique_numbers(numbers):
unique = []
for number in numbers:
if number not in unique:
unique.append(number)
return unique
#--------- MAIN FONCTION - APP STABILITY4.0 ----------#
def main():
st.sidebar.title("Traceability 4.0")
st.sidebar.markdown("---")
data_load_state = st.text('Loading data...')
data = load_data(2000000)
data_load_state.text("")
type = st.sidebar.radio("What information do you have ?",('Product DMC', 'Delivery Voucher (Invoice)', 'Machine number', 'Fifo number', 'Material reference', 'Product reference'))
if type=='Machine number':
option = st.sidebar.text_input('Which Machine number do you want to display?','Machine number')
if option!='Machine number':
st.write('Available information for Machine number',option)
otp=int(option)
fifoOK = data['machine_nb']==otp
filtered_data=data[fifoOK]
st.write(filtered_data)
values = filtered_data.articlenb.value_counts(dropna=True)
#dateSorted = sort_values(by='col1', ascending=False)
st.write("Raw material analysis on machine",option)
#st.write(values)
list_date = []
list_fifo_uniq = []
list_hour = []
for x in values.index:
x_df = filtered_data[filtered_data['articlenb']==x]
dateSorted = x_df.sort_values(by=['date_material_use','timestamp'], ascending=False)
#st.write(dateSorted)
last_fifos= dateSorted[dateSorted['date_material_use']==dateSorted['date_material_use'].iloc[0]]
last_fifos_unique=last_fifos.fifo.unique()
#st.write(last_fifos_unique)
if (last_fifos_unique.size==1):
#st.write("ref", x, "last use", dateSorted['date_material_use'].iloc[0], "-", dateSorted['timestamp'].iloc[0], "fifo", last_fifos_unique[0])
list_date.append(dateSorted['date_material_use'].iloc[0])
list_hour.append(dateSorted['timestamp'].iloc[0])
list_fifo_uniq.append(last_fifos_unique[0])
else:
st.write("be careful : several fifo number for this ref - please contact the dev'")
st.write("ref", x, "last use", dateSorted['date_material_use'].iloc[0], "-", dateSorted['timestamp'].iloc[0], "fifo", last_fifos_unique[0])
st.write(last_fifos_unique)
pd_values=values.to_frame()
pd_values.insert(1, "last date of use", list_date, True)
pd_values.insert(2, "last hour of use", list_hour, True)
pd_values.insert(3, "last fifo of use", list_fifo_uniq, True)
pd_values.reset_index(level=0, inplace=True)
pd_values.rename(columns={'articlenb':'number in the data','index':'raw material reference'}, inplace=True)
st.write(pd_values)
#d= filtered_data.date_material_use - filtered_data.date
#st.write(d)
#st.bar_chart(values)
if st.checkbox('Show which reference this machine manufacture'):
val = filtered_data.reference_nb.value_counts(dropna=True)
st.write(val)
if st.checkbox('Show a table with columns to be selected'):
options = st.multiselect('What colums do you need in table', data.columns)
st.write(filtered_data[options])
if type=='Delivery Voucher (Invoice)':
st.sidebar.write('Please enter the Date AND the product reference you want to track and trace.')
option = st.sidebar.text_input('Date ?','Date')
option2 = st.sidebar.text_input('Product reference?','ref')
if option!='Date' and option2!='ref':
st.write("Date:", option, " for the product reference:", option2)
if (len(option)==7):
st.write("Codification: VW norm")
res=decodeVW(option)
line=get_Cebi_line(option2, res[4])
prevLines=get_Cebi_previous_lines(line)
st.write("Year:", res[0], " - Month:", res[1], " - Day:", res[2], " - Serial Number:", res[3], " - Line (marking):", line, " - previous lines:", prevLines)
st.write('Available information for Machine number',option)
machineNb=int(line)
dateProd=res[0]+res[1]+res[2]
#st.write("BOM - according to AS400 (some raw material can still not be used for this specific product! assumption: last fifo is still in use and previous fifo are not in use anymore - not really the case @Cebi?!)")
st.write("BOM - Assumption: last fifo is still in use and previous fifo are not in use anymore (not really the case @Cebi?!) + No way to check if really used for this product (can be loaded but not used in prod)")
pd_values=get_BOM(data, dateProd, machineNb)
st.write(pd_values)
for k in prevLines:
pd_values=get_BOM(data, dateProd, int(k))
#st.write("BOM - according to AS400 (some raw material can still not be used for this specific product! assumption: last fifo is still in use and previous fifo are not in use anymore AND date of pre-assembly = date of marking - not really the case @Cebi?!)")
st.write("BOM - Assumption: last fifo is still in use and previous fifo are not in use anymore (not really the case @Cebi?!) + date of pre-assembly = date of marking (not really the case @Cebi?!) + No way to check if really used for this product (can be loaded but not used in prod)")
st.write(pd_values)
if type=='Fifo number':
option = st.sidebar.text_input('Which Fifo number do you want to display?','Fifo')
if option!='Fifo':
st.write('Available information for fifo number:',option)
otp=int(option)
fifoOK = data['fifo']==otp
filtered_data=data[fifoOK]
st.write(filtered_data)
if st.checkbox('Show the number of use per machine number'):
values = filtered_data.machine_nb.value_counts(dropna=True)
st.write("Number of use per machine number")
st.write(values)
if st.checkbox('Show the evolution of the delay (in days) between exit of storage and use'):
st.write("Evolution of the delay (in days) between exit of storage and use")
date_use=pd.to_datetime(filtered_data['date_material_use'], format="%Y%m%d")
date_exit=pd.to_datetime(filtered_data['date_exit_storage'], format="%Y%m%d")
diffdate= date_use - date_exit
#st.write(diffdate.dt.days)
#date_test=pd.to_datetime(filtered_data['date'], format="%Y%m%d")
#TODO GROUP BY DATE CAR POSSIBLE D'AVOIR PLUSIEURS VALEURS PAR JOURS
#finalDiff=diffdate.dt.days.groupby('date').mean()
chart_data2 = pd.DataFrame({'date': date_use,'delay (in days)': diffdate.dt.days})
chart_data2 = chart_data2.rename(columns={'date':'index'}).set_index('index')
#st.write(chart_data)
st.line_chart(chart_data2)
if st.checkbox('Show a table with columns to be selected'):
options = st.multiselect('What colums do you need in table', data.columns)
st.write(filtered_data[options])
if type=='Material reference':
option = st.sidebar.text_input('Which Material reference do you want to display?','Material reference')
if option!='Material reference':
st.write('Available information for Material reference:',option)
otp=int(option)
fifoOK = data['articlenb']==otp
filtered_data=data[fifoOK]
st.write(filtered_data)
if st.checkbox('Show the number of use per fifo'):
values = filtered_data.fifo.value_counts(dropna=True)
st.write("Number of use per fifo")
st.write(values)
if st.checkbox('Show the evolution of the price per unit'):
st.write("Evolution of the price per unit")
date_test=pd.to_datetime(filtered_data['date'], format="%Y%m%d")
chart_data = pd.DataFrame({'date': date_test,'price per unit': (filtered_data.unit_price/10000)/ filtered_data.quantity})
chart_data = chart_data.rename(columns={'date':'index'}).set_index('index')
#st.write(chart_data)
st.line_chart(chart_data)
if st.checkbox('Show a table with columns to be selected'):
options = st.multiselect('What colums do you need in table', data.columns)
st.write(filtered_data[options])
if type=='Product reference':
option = st.sidebar.text_input('Which Product reference do you want to display?','Product reference')
if option!='Product reference':
st.write('Available information for Product reference:',option)
try:
otp=int(option)
except ValueError :
st.write("Please verify your Product reference !!")
else:
otp=int(option)
col1, col2 = st.beta_columns(2)
if otp == 27500980:
query_api = client.query_api()
query = 'from(bucket: "POC_Nomenclatures")|> range(start: -30d)|> filter(fn: (r) => r._measurement == "27500980")'
image = Image.open('img/2750098.jpg')
col1.header("Product N° : "+option)
col1.image(image,use_column_width=True)
col2.header("Production Statistic")
#return the table and print the result
result = query_api.query(query)
results = []
results_unique = []
#results2 = []
for table in result:
for record in table.records:
results.append((record.get_measurement(),record.values.get("Composant 2"),record.values.get("Qt"),record.get_field(),record.get_value()))
results_unique = get_unique_numbers(results)
#st.write(results_unique)
else:
query = 'from(bucket: "POC_Nomenclatures")|> range(start: -30d)|> filter(fn: (r) => r._measurement =~ /option/)'
#Devide the interface in two sides
image = Image.open('img/2750097.jpg')
col1.header("Product N° : "+option)
col1.image(image,use_column_width=True)
col2.header("Production Statistic")
#return the table and print the result
result = query_api.query(query)
#result2 = client.query_api().query(org=org, query=query2)
results = []
results_unique = []
#results2 = []
for table in result:
for record in table.records:
results.append((record.get_measurement(),record.values.get("Composant 2"),record.values.get("Qt"),record.get_field(),record.get_value()))
results_unique = get_unique_numbers(results)
#st.write(results_unique)
df = pd.DataFrame(results_unique,columns=("Ref","Component","Quantity","Field_key","Field_value"))
st.header('Bill of materials')
st.dataframe(df) # Same as st.write(df)
st.header('Quality Controls:')
query = 'from(bucket: "POC_Collector")|> range(start: -30d))'
#return the table and print the result
result = query_api.query(query)
results = []
results_unique = []
for table in result_1:
for record in table.records:
results.append((record.get_measurement(),record.values.get("Tag_key"),record.get_value(),record.get_field()))
results_unique = get_unique_numbers(results)
df = pd.DataFrame(results_unique,columns=("1","Type","3","4"))
st.dataframe(df) # Same as st.write(df)
#fifoOK = data['reference_nb']==otp
#filtered_data=data[fifoOK]
#st.write(filtered_data)
if st.checkbox('Show on which machines this product is manufactured'):
values = filtered_data.machine_nb.value_counts(dropna=True)
st.write("Number of use per machine number")
st.write(values)
if st.checkbox('Show raw material use for this product'):
raw_material=filtered_data.articlenb.unique()
#st.write(raw_material)
list_date = []
list_fifo_uniq = []
list_hour = []
for x in raw_material:
x_df = filtered_data[filtered_data['articlenb']==x]
dateSorted = x_df.sort_values(by=['date_material_use','timestamp'], ascending=False)
#st.write(dateSorted)
last_fifos= dateSorted[dateSorted['date_material_use']==dateSorted['date_material_use'].iloc[0]]
last_fifos_unique=last_fifos.fifo.unique()
#st.write(last_fifos_unique)
if (last_fifos_unique.size==1):
#st.write("ref", x, "last use", dateSorted['date_material_use'].iloc[0], "-", dateSorted['timestamp'].iloc[0], "fifo", last_fifos_unique[0])
list_date.append(dateSorted['date_material_use'].iloc[0])
list_hour.append(dateSorted['timestamp'].iloc[0])
list_fifo_uniq.append(last_fifos_unique[0])
else:
st.write("be careful : several fifo number for this ref - please contact the dev'")
st.write("ref", x, "last use", dateSorted['date_material_use'].iloc[0], "-", dateSorted['timestamp'].iloc[0], "fifo", last_fifos_unique[0])
st.write(last_fifos_unique)
#pd_values=raw_material.to_frame()
pd_values=pd.DataFrame(data=raw_material[0:],columns={'Reference'}) # 1st row as the column names
pd_values.insert(1, "last date of use", list_date, True)
pd_values.insert(2, "last hour of use", list_hour, True)
pd_values.insert(3, "last fifo of use", list_fifo_uniq, True)
#pd_values.reset_index(level=0, inplace=True)
st.write(pd_values)
if type=='Product DMC':
st.sidebar.write('Please enter the Data Matrix code AND the product reference you want to track and trace.')
option = st.sidebar.text_input('DMC code?','DMC')
option2 = st.sidebar.text_input('Product reference?','ref')
if option!='DMC' and option2!='ref':
st.write("DMC:", option, " for the product reference:", option2)
if (len(option)==7):
st.write("Codification: VW norm")
res=decodeVW(option)
line=get_Cebi_line(option2, res[4])
prevLines=get_Cebi_previous_lines(line)
st.write("Year:", res[0], " - Month:", res[1], " - Day:", res[2], " - Serial Number:", res[3], " - Line (marking):", line, " - previous lines:", prevLines)
st.write('Available information for Machine number',option)
machineNb=int(line)
dateProd=res[0]+res[1]+res[2]
#st.write("BOM - according to AS400 (some raw material can still not be used for this specific product! assumption: last fifo is still in use and previous fifo are not in use anymore - not really the case @Cebi?!)")
st.write("BOM - Assumption: last fifo is still in use and previous fifo are not in use anymore (not really the case @Cebi?!) + No way to check if really used for this product (can be loaded but not used in prod)")
pd_values=get_BOM(data, dateProd, machineNb)
st.write(pd_values)
for k in prevLines:
pd_values=get_BOM(data, dateProd, int(k))
#st.write("BOM - according to AS400 (some raw material can still not be used for this specific product! assumption: last fifo is still in use and previous fifo are not in use anymore AND date of pre-assembly = date of marking - not really the case @Cebi?!)")
st.write("BOM - Assumption: last fifo is still in use and previous fifo are not in use anymore (not really the case @Cebi?!) + date of pre-assembly = date of marking (not really the case @Cebi?!) + No way to check if really used for this product (can be loaded but not used in prod)")
st.write(pd_values)
#st.write(pd_values[['reference','last date of use','last hour of use','machine_nb','last fifo of use']])
#st.sidebar.markdown('What do you look for?')
#option = st.sidebar.text_input('Please enter the DMC?','DMC')
#option2 = st.sidebar.text_input('Please enter the product reference?','ref')
#selection = st.sidebar.selectbox('Select the specific information you want to access about this product.',
#('Product overview','Production data'))
st.sidebar.markdown("---")
image = Image.open('img/cebi_logo.jpeg')
st.sidebar.image(image,use_column_width=True)
with st.spinner("Loading data ..."): # To be managed according to the real file url => date marking product ?
data_external_mov = load_data_external_mov()
data_internal_mov = load_data_internal_mov()
#page = PAGES[selection]
#with st.spinner(f"Loading {selection} ..."):
#if option!='DMC' and option2!='ref':
#p = Product(data_external_mov, data_internal_mov, option, option2)
#page.write(p, data_external_mov, data_internal_mov)
if __name__ == "__main__":
main()
client.__del__()
Here you will find only the part that interest us.
otp=int(option)
col1, col2 = st.beta_columns(2)
if otp == 27500980:
query_api = client.query_api()
query = 'from(bucket: "POC_Nomenclatures")|> range(start: -30d)|> filter(fn: (r) => r._measurement == "27500980")'
image = Image.open('img/2750098.jpg')
col1.header("Product N° : "+option)
col1.image(image,use_column_width=True)
col2.header("Production Statistic")
#return the table and print the result
result = query_api.query(query)
results = []
results_unique = []
#results2 = []
for table in result:
for record in table.records:
results.append((record.get_measurement(),record.values.get("Composant 2"),record.values.get("Qt"),record.get_field(),record.get_value()))
results_unique = get_unique_numbers(results)
#st.write(results_unique)
else:
query = 'from(bucket: "POC_Nomenclatures")|> range(start: -30d)|> filter(fn: (r) => r._measurement =~ /option/)'
#Devide the interface in two sides
image = Image.open('img/2750097.jpg')
col1.header("Product N° : "+option)
col1.image(image,use_column_width=True)
col2.header("Production Statistic")
#return the table and print the result
result = query_api.query(query)
#result2 = client.query_api().query(org=org, query=query2)
results = []
results_unique = []
#results2 = []
for table in result:
for record in table.records:
results.append((record.get_measurement(),record.values.get("Composant 2"),record.values.get("Qt"),record.get_field(),record.get_value()))
results_unique = get_unique_numbers(results)
#st.write(results_unique)
df = pd.DataFrame(results_unique,columns=("Ref","Component","Quantity","Field_key","Field_value"))
st.header('Bill of materials')
st.dataframe(df) # Same as st.write(df)
st.header('Quality Controls:')
query = 'from(bucket: "POC_Collector")|> range(start: -30d))'
#return the table and print the result
result = query_api.query(query)
results = []
results_unique = []
for table in result_1:
for record in table.records:
results.append((record.get_measurement(),record.values.get("Tag_key"),record.get_value(),record.get_field()))
results_unique = get_unique_numbers(results)
df = pd.DataFrame(results_unique,columns=("1","Type","3","4"))
st.dataframe(df) # Same as st.write(df)
The error message is here
ApiException: (400) Reason: Bad Request HTTP response headers: HTTPHeaderDict({'Content-Type': 'application/json; charset=utf-8', 'Vary': 'Accept-Encoding', 'X-Platform-Error-Code': 'invalid', 'Date': 'Wed, 24 Feb 2021 17:00:10 GMT', 'Transfer-Encoding': 'chunked'}) HTTP response body: b'{"code":"invalid","message":"compilation failed: error at @1:51-1:52: invalid statement: )"}'
Thank you.